从我升级eks地形脚本的版本开始。我不断地犯错误。
目前我被这个错误卡住了:
错误:获取http://localhost/api/v1/namespaces/kube-system/serviceaccounts/tiller:拨号tcp 127.0.0.1:80:connect:连接被拒绝
错误:获取http://localhost/apis/rbac.authorization.k8s.io/v1/clusterrolebindings/tiller:拨号tcp 127.0.0.1:80:connect:连接被拒绝
脚本运行良好,我仍然可以将其用于旧版本,但我正在尝试升级群集版本。
提供者.tf
provider "aws" {
region = "${var.region}"
version = "~> 2.0"
assume_role {
role_arn = "arn:aws:iam::${var.target_account_id}:role/terraform"
}
}
provider "kubernetes" {
config_path = ".kube_config.yaml"
version = "~> 1.9"
}
provider "helm" {
service_account = "${kubernetes_service_account.tiller.metadata.0.name}"
namespace = "${kubernetes_service_account.tiller.metadata.0.namespace}"
kubernetes {
config_path = ".kube_config.yaml"
}
}
terraform {
backend "s3" {
}
}
data "terraform_remote_state" "state" {
backend = "s3"
config = {
bucket = "${var.backend_config_bucket}"
region = "${var.backend_config_bucket_region}"
key = "${var.name}/${var.backend_config_tfstate_file_key}" # var.name == CLIENT
role_arn = "${var.backend_config_role_arn}"
skip_region_validation = true
dynamodb_table = "terraform_locks"
encrypt = "true"
}
}
kubernetes.tf
resource "kubernetes_service_account" "tiller" {
#depends_on = ["module.eks"]
metadata {
name = "tiller"
namespace = "kube-system"
}
automount_service_account_token = "true"
}
resource "kubernetes_cluster_role_binding" "tiller" {
depends_on = ["module.eks"]
metadata {
name = "tiller"
}
role_ref {
api_group = "rbac.authorization.k8s.io"
kind = "ClusterRole"
name = "cluster-admin"
}
subject {
kind = "ServiceAccount"
name = "tiller"
api_group = ""
namespace = "kube-system"
}
}
地形版本:0.12.12eks模块版本:6.0.2
这意味着.kube_config.yml
中的server:
条目指向错误的端口(甚至可能指向错误的协议,因为正常的kubernetes通信通过https
传输,并通过双向TLS身份验证进行保护(,或者不再有侦听localhost:80
的代理,或者--insecure-port
过去是80
,现在是0
(强烈推荐(
遗憾的是,如果没有更多的细节,没有人能够猜测正确的值是什么或应该更改为
我确信有必要在您的地形配置上设置Kubernetes提供程序。类似这样的东西:
provider "kubernetes" {
config_path = module.EKS_cluster.kubeconfig_filename
}
当我错过了为带有集群的地形配置凭据,并且无法访问集群时,就会发生这种情况。如果您配置了用于身份验证的kubectl/what,则应该解决此问题。