GCE Metada not defined



我试图在我的GKE集群上设置一个Postgresql prometheus导出器,运行云sql身份验证代理sidecar,但我的身份验证代理失败,并出现以下错误。到目前为止,我已经尝试给工作负载标识更多的访问权限(我以前使用cloudsql.client角色(,但我怀疑情况是否如此。也许我的代理连接命令错了?我是GCP的新手。谢谢

Get "https://sqladmin.googleapis.com/sql/v1beta4/projects/PROJECT/instances/REGION~CLOUDSQLINSTANCE?alt=json&prettyPrint=false": metadata: GCE metadata "instance/service-accounts/default/token?scopes=https%!A(MISSING)%!F(MISSING)%!F(MISSING)www.googleapis.com%!F(MISSING)auth%!F(MISSING)sqlservice.admin" not defined

我的集群是使用terraform部署的,所以我的资源如下:

resource "kubernetes_deployment" "postgres_exporter" {
metadata {
name      = format("%s-%s", var.cluster_name, var.app_name)
namespace = var.namespace
labels = {
app        = var.app_name
maintainer = "redacted"
cluster    = var.cluster_name
}
}
spec {
replicas = var.replicas
selector {
match_labels = {
app     = var.app_name
cluster = var.cluster_name
}
}
template {
metadata {
labels = {
app     = var.app_name
cluster = var.cluster_name
}
}
spec {
service_account_name = KSA_NAME
container {
name  = var.postgres-exporter.name 
image = var.postgres-exporter.image
command = ["postgres_exporter"]
port {
name           = "http"
container_port = 9187
protocol       = "TCP"
}
resources {
limits = {
cpu    = var.postgres-exporter.resources.limits.cpu 
memory = var.postgres-exporter.resources.limits.memory
}
requests = {
cpu    = var.postgres-exporter.resources.requests.cpu
memory = var.postgres-exporter.resources.requests.memory
}
}
liveness_probe {
http_get {
path   = "/healthz"
port   = "http"
scheme = "HTTP"
}
initial_delay_seconds = 5
timeout_seconds       = 5
period_seconds        = 5
success_threshold     = 1
failure_threshold     = 3
}
readiness_probe {
http_get {
path   = "/healthz"
port   = "http"
scheme = "HTTP"
}
initial_delay_seconds = 1
timeout_seconds       = 5
period_seconds        = 5
success_threshold     = 1
failure_threshold     = 3
}
lifecycle {
pre_stop {
exec {
command = ["/bin/bash", "-c", "sleep 20"]
}
}
}
image_pull_policy = "IfNotPresent"
security_context {
capabilities {
drop = ["SETPCAP", "MKNOD", "AUDIT_WRITE", "CHOWN", "NET_RAW", "DAC_OVERRIDE", "FOWNER", "FSETID", "KILL", "SETGID", "SETUID", "NET_BIND_SERVICE", "SYS_CHROOT", "SETFCAP"]
}
read_only_root_filesystem = true
}
}
container {
name = "gce-proxy-pg"
image = var.gce-proxy.image
resources {
limits = {
cpu    = var.gce-proxy.resources.limits.cpu
memory = var.gce-proxy.resources.limits.memory
}
requests = {
cpu    = var.gce-proxy.resources.requests.cpu
memory = var.gce-proxy.resources.requests.memory
}
}
port {
container_port = 5432
name = "proxy-pg"
protocol = "TCP"
}
command = [ "/cloud_sql_proxy", "-instances=PROJECT:REGION:INSTANCE=tcp:<PRIVATE_IP>:5432", "-enable-iam-login=true", "-ip_address_types=PRIVATE"]
security_context {
capabilities {
drop = ["SETPCAP", "MKNOD", "AUDIT_WRITE", "CHOWN", "NET_RAW", "DAC_OVERRIDE", "FOWNER", "FSETID", "KILL", "SETGID", "SETUID", "NET_BIND_SERVICE", "SYS_CHROOT", "SETFCAP"]
}
read_only_root_filesystem = true
}
}
restart_policy = "Always"
security_context {
run_as_user     = 1000
run_as_non_root = true
}
image_pull_secrets {
name = "redacted"
}
topology_spread_constraint {
max_skew           = 1
topology_key       = "topology.kubernetes.io/zone"
when_unsatisfiable = "DoNotSchedule"
label_selector {
match_labels = {
app = var.app_name
}
}
}
topology_spread_constraint {
max_skew           = 1
topology_key       = "kubernetes.io/hostname"
when_unsatisfiable = "ScheduleAnyway"
label_selector {
match_labels = {
app = var.app_name
}
}
}
}
}
strategy {
type = "RollingUpdate"
rolling_update {
max_surge = "1"
}
}
}
}
resource "kubernetes_service" "postgres_exporter" {
metadata {
name      = format("%s-%s", var.cluster_name, var.app_name)
namespace = var.namespace
labels = {
app        = var.app_name
maintainer = ""
cluster    = var.cluster_name
}
}
spec {
port {
name        = "http"
protocol    = "TCP"
port        = 9187
target_port = "9187"
}
port {
name = "proxy"
protocol = "TCP"
port = 5432
target_port = "5432"
}
selector = {
app     = var.app_name
cluster = var.cluster_name
}
type = "ClusterIP"
}
}
resource "kubernetes_service_account" "exporter-ksa" {
metadata {
name        = "postgres-exporter"
namespace   = var.namespace
annotations = {
"iam.gke.io/gcp-service-account" = "mail@PROJECT.iam.gserviceaccount.com"
}
}
}
### Workload Identitiy 
resource "google_service_account" "sql-monitoring" {
account_id   = "sql-monitoring" 
display_name = "sql-monitoring"
description  = ""
}
resource "google_service_account_iam_binding" "sql-monitoring-binding" {
service_account_id = google_service_account.sql-monitoring.id
role               = "roles/iam.workloadIdentityUser"
members = [
"serviceAccount:${data.google_project.current.project_id}.svc.id.goog[NAMESPACE/KSA-NAME]"
]
}
resource "google_project_iam_member" "sql-monitoring-iam-member" {
for_each = toset([
"roles/cloudsql.editor"
])
project = data.google_project.current.project_id
role    = each.key
member  = "serviceAccount:${google_service_account.sql-monitoring.email}"

我在这里使用了错误的邮件,邮件应该是GSA的邮件,我使用了kubernetes_saccount的名称。

resource "kubernetes_service_account" "exporter-ksa" {
metadata {
name        = "postgres-exporter"
namespace   = var.namespace
annotations = {
"iam.gke.io/gcp-service-account" = "mail@PROJECT.iam.gserviceaccount.com"
}
}
}```

相关内容

  • 没有找到相关文章

最新更新