此 ecs 远门脚本中是否需要两个aws_iam_policy_document?



这是我的地形脚本

variable "aws_region" { }
variable "flavor" { }  # test or prod
variable "task_worker_service_name" { }
variable "task_cpu" {}
variable "task_memory" {}
variable "az_count" {}
terraform {
required_version = "= 0.12.6"
}
provider "aws" {
version = "~> 2.21.1"
region = "${var.aws_region}"
}
data "aws_availability_zones" "available" {}
data "aws_iam_policy_document" "ecs_service_policy" {
statement {
actions = ["sts:AssumeRole"]
principals {
type = "Service"
identifiers = [ "ecs.amazonaws.com" ]
}
}
}
data "aws_iam_policy_document" "task_worker_iam_role_policy" {
statement {
actions   = [ "sts:AssumeRole" ]
principals {
type = "Service"
identifiers = [
"ecs-tasks.amazonaws.com"
]
}
}
}
data "aws_iam_policy_document" "assume_role_policy" {
statement {
actions = ["sts:AssumeRole"]
principals {
type = "Service"
identifiers = [ "ecs-tasks.amazonaws.com" ]
}
}
}
resource "aws_iam_role" "ecs_service_role" {
name = "${var.flavor}-task-ecs-service-role"
path = "/"
assume_role_policy = "${data.aws_iam_policy_document.ecs_service_policy.json}"
}
resource "aws_iam_role_policy_attachment" "ecs_service_role_attachment" {
role = "${aws_iam_role.ecs_service_role.name}"
policy_arn = "arn:aws:iam::aws:policy/service-role/AmazonEC2ContainerServiceRole"
}
resource "aws_vpc" "ecs" {
cidr_block  = "10.0.0.0/16"
enable_dns_hostnames = true
enable_dns_support = true
instance_tenancy = "default"
tags = {
Name = "ecs"
}
}
resource "aws_security_group" "vpc_ecs_task_worker" {
name        = "${var.flavor}-vpc_ecs_task_worker"
description = "ECS Allowed Ports"
ingress {
from_port       = 32768
to_port         = 65535
protocol        = "tcp"
cidr_blocks     = ["0.0.0.0/0"]
}

egress {
from_port       = 0
to_port         = 0
protocol        = "-1"
cidr_blocks     = ["0.0.0.0/0"]
}
}
resource "aws_iam_role" "ecs_task_execution_role" {
name = "${var.flavor}-ecs-task-worker-task-execution-role"
assume_role_policy = data.aws_iam_policy_document.assume_role_policy.json
}
resource "aws_iam_role" "task_worker_iam_role" {
name = "${var.flavor}-task-worker-role"
path = "/"
assume_role_policy = data.aws_iam_policy_document.task_worker_iam_role_policy.json
}
# Create var.az_count private subnets, each in a different AZ
resource "aws_subnet" "private" {
count             = "${var.az_count}"
cidr_block        = "${cidrsubnet(aws_vpc.ecs.cidr_block, 8, count.index)}"
availability_zone = "${data.aws_availability_zones.available.names[count.index]}"
vpc_id            = "${aws_vpc.ecs.id}"
}
resource "aws_ecs_task_definition" "task_worker" {
family = "${var.flavor}-${var.task_worker_service_name}"
network_mode = "awsvpc"
requires_compatibilities = ["FARGATE"]
cpu = var.task_cpu
memory = var.task_memory
execution_role_arn = aws_iam_role.ecs_task_execution_role.arn
task_role_arn = aws_iam_role.task_worker_iam_role.arn
container_definitions = <<JSON
[
{
"dnsSearchDomains": null,
"logConfiguration": null,
"entryPoint": null,
"portMappings": [],
"command": null,
"linuxParameters": null,
"cpu": ${var.task_cpu},
"environment": [],
"resourceRequirements": null,
"ulimits": null,
"dnsServers": null,
"mountPoints": [],
"workingDirectory": null,
"secrets": null,
"dockerSecurityOptions": null,
"memory": null,
"memoryReservation": ${var.task_memory},
"volumesFrom": [],
"stopTimeout": null,
"image": "us-west-2.amazonaws.com/task:4383669",
"startTimeout": null,
"dependsOn": null,
"disableNetworking": null,
"interactive": null,
"healthCheck": null,
"essential": true,
"links": null,
"hostname": null,
"extraHosts": null,
"pseudoTerminal": null,
"user": null,
"readonlyRootFilesystem": null,
"dockerLabels": null,
"systemControls": null,
"privileged": null,
"name": "task-worker"
}
]
JSON
}
resource "aws_ecs_cluster" "task_pool" {
name = "${var.flavor}-task-pool"
}
resource "aws_ecs_service" "task_service" {
name = "${var.flavor}-task-worker-service"
cluster = "${aws_ecs_cluster.task_pool.id}"
task_definition = "${aws_ecs_task_definition.task_worker.arn}"
launch_type = "FARGATE"
desired_count = 2
network_configuration {
subnets = "${aws_subnet.private[*].id}"
security_groups = ["${aws_security_group.vpc_ecs_task_worker.id}" ]
assign_public_ip = "true"
}
}

在此脚本中,我添加了两个"aws_iam_policy_document":

data "aws_iam_policy_document" "pdf_conversion_iam_role_policy"和 分别data "aws_iam_policy_document" "assume_role_policy"

它们基本上是相同的,即

data "aws_iam_policy_document" "assume_role_policy" {
statement {
actions = ["sts:AssumeRole"]
principals {
type = "Service"
identifiers = [ "ecs-tasks.amazonaws.com" ]
}
}
}

我在aws_iam_role.ecs_task_execution_role中使用了assume_role_policy,在aws_iam_role.pdf_conversion_iam_role中使用了pdf_conversion_iam_role_policy

我的问题是:在这种情况下有必要有两个aws_iam_policy_document吗?另外,您对这种地形资源名称的命名约定有什么好的建议吗?

如果您使用的是最新版本的 Terraform,那么您可以使用资源for_each功能编写此内容,以从单个data块生成多个aws_iam_policy_document实例:

data "aws_iam_policy_document" "assume_role" {
for_each = toset([
"ecs-tasks.amazonaws.com",
"ecs.amazonaws.com",
])
statement {
actions = ["sts:AssumeRole"]
principals {
type = "Service"
identifiers = [each.value]
}
}
}
resource "aws_iam_role" "service" {
for_each = data.aws_iam_policy_document.assume_role
name               = "${var.flavor}-${each.key}-service-role"
assume_role_policy = each.value.json
}
resource "aws_iam_role_policy_attachment" "ecs_service" {
role = aws_iam_role.service["ecs.amazonaws.com"].name
policy_arn = "arn:aws:iam::aws:policy/service-role/AmazonEC2ContainerServiceRole"
}

dataresource块内的for_each对资源的引用会生成资源对象的映射。在本例中,我们的映射键是服务标识符。这为我们提供了一些具有以下地址的资源:

  • data.aws_iam_policy_document.assume_role["ecs.amazonaws.com"]
  • data.aws_iam_policy_document.assume_role["ecs-tasks.amazonaws.com"]
  • aws_iam_role.service["ecs.amazonaws.com"]
  • aws_iam_role.service["ecs-tasks.amazonaws.com"]

然后,aws_iam_role_policy_attachment.ecs_service资源直接引用ecs.amazonaws.com的特定角色对象,允许它表示唯一附加到该角色的策略,即使我们概括了角色本身的创建。

我们可以在data "aws_iam_policy_document" "assume_role"中向for_each添加更多服务标识符,或者甚至可以将此模式分解到一个模块中,该模块将一组服务主机名作为输入变量并通过输出值导出角色名称,如果您发现自己创建了很多这样的角色名称。

相关内容

  • 没有找到相关文章

最新更新