Terraformrabbitmq

问题描述 投票:0回答:1

我有这个rabbitMQ模块,在部署时运行良好(terraform apply)

locals {
  facts = var.tags
}

# cluster access definition
data "tls_certificate" "eks_oidc_thumbprint" {
  url = var.eks_cluster_data.oidc_issuer_url
}


#########################################
### Policy and role setup

# Create OIDC provider
resource "aws_iam_openid_connect_provider" "oidc_provider" {
  url = data.tls_certificate.eks_oidc_thumbprint.url
  client_id_list = ["sts.amazonaws.com"]
  thumbprint_list = [data.tls_certificate.eks_oidc_thumbprint.certificates[0].sha1_fingerprint]

  tags = merge({ component = "oidc-provider" }, local.facts)
}


# IAM Policy for EBS CSI Driver
resource "aws_iam_policy" "ebs_csi_driver_policy" {
  name        = "AmazonEKS_EBS_CSI_Driver_Policy_${local.facts.env}"
  description = "Policy for EKS EBS CSI Driver"
  policy      = file("${path.module}/ebs_csi/ebs-csi-policy.json")  

  tags = merge({ component = "ebs-csi-driver-policy" }, local.facts)
}

# IAM Role for EBS CSI Driver
resource "aws_iam_role" "ebs_csi_driver_role" {
    name                = "AmazonEKS_EBS_CSI_DriverRole_${local.facts.env}"
    assume_role_policy  = jsonencode({
        Version = "2012-10-17"
        Statement = [
         {
            Effect = "Allow"
            Principal = {
                Federated = aws_iam_openid_connect_provider.oidc_provider.arn
            }
            Action = "sts:AssumeRoleWithWebIdentity"
            Condition = {
                StringEquals = {
                    "${replace(data.tls_certificate.eks_oidc_thumbprint.url, "https://", "")}:sub" = "system:serviceaccount:kube-system:ebs-csi-${local.facts.env}-controller-sa"
                }
            }
         }
        ]
    })

    tags = merge({ component = "ebs-csi-driver-role" }, local.facts)
}

# Attach Policy to Role
resource "aws_iam_role_policy_attachment" "attach_ebs_csi_driver_policy" {
  role       = aws_iam_role.ebs_csi_driver_role.name
  policy_arn = aws_iam_policy.ebs_csi_driver_policy.arn
}
# Kubernetes Service Account for EBS CSI Driver with IAM Role annotation
resource "kubernetes_service_account" "ebs_csi_controller_sa" {
  metadata {
    name      = "ebs-csi-${local.facts.env}-controller-sa"
    namespace = "kube-system"
    annotations = {
      "eks.amazonaws.com/role-arn" = aws_iam_role.ebs_csi_driver_role.arn
    }
    labels = merge(local.facts, {
      component = "ebs-csi-controller-sa"
    })
  }
}



#########################################
### RabbitMQ Volumes prep

# Define AWS EBS Volumes
resource "aws_ebs_volume" "rabbitmq" {
  count             = length(var.azs)
  availability_zone = element(var.azs, count.index)
  size              = 8
  type              = "gp2"

  tags = merge(local.facts, {
    component = "rabbitmq-ebs",
    volume_id = "volume-${count.index}"
  })
}

# Define Kubernetes Storage Class
resource "kubernetes_storage_class" "rabbitmq" {
  count = length(var.azs)

  metadata {
    name = "rabbitmq-${local.facts.env}-storage-class-${count.index}"
    labels = merge(local.facts, {
      component = "rabbitmq-storage-class"
    })
  }

  storage_provisioner = "ebs.csi.aws.com"

  parameters = {
    type = "gp2"
  }
}

# Define Kubernetes Persistent Volumes
resource "kubernetes_persistent_volume" "rabbitmq" {
  count = length(var.azs)

  metadata {
    name = "rabbitmq-${local.facts.env}-pv-${count.index}"
    labels = merge(local.facts, {
      component = "rabbitmq-persistent-volume"
    })
  }

  spec {
    capacity = {
      storage = "8Gi"
    }

    access_modes = ["ReadWriteOnce"]

    persistent_volume_reclaim_policy = "Delete"

    storage_class_name = kubernetes_storage_class.rabbitmq[count.index].metadata[0].name

    persistent_volume_source {
      aws_elastic_block_store {
        volume_id = aws_ebs_volume.rabbitmq[count.index].id
        fs_type   = "ext4"
      }
    }
  }

  # Ensure Helm release for EBS CSI driver is destroyed after PV
  # depends_on = [
  #   aws_ebs_volume.rabbitmq,
  #   # kubernetes_persistent_volume_claim.rabbitmq,
  # ]
}

# Define Kubernetes Persistent Volume Claims
resource "kubernetes_persistent_volume_claim" "rabbitmq" {
  count = length(var.azs)

  metadata {
    name      = "rabbitmq-${local.facts.env}-pvc-${count.index}"
    namespace = "infra"
    labels = merge(local.facts, {
      component = "rabbitmq-persistent-volume-claim"
    })
  }

  spec {
    access_modes = ["ReadWriteOnce"]

    resources {
      requests = {
        storage = "8Gi"
      }
    }

    storage_class_name = kubernetes_storage_class.rabbitmq[count.index].metadata[0].name
  }

  depends_on = [
    kubernetes_persistent_volume.rabbitmq,
  ]
}


#########################################
### Enable AWS EBS CSI driver
resource "helm_release" "aws_ebs_csi_driver" {
  name       = "aws-ebs-csi-driver"
  repository = "https://kubernetes-sigs.github.io/aws-ebs-csi-driver"
  chart      = "aws-ebs-csi-driver"
  namespace  = "kube-system"
  version    = "2.20.0"  

  set {
    name  = "controller.replicas"
    value = "2"
  }

  set {
    name  = "controller.serviceAccount.create"
    value = "false" # use existing service account
  }

  set {
    name  = "controller.serviceAccount.name"
    value = "ebs-csi-${local.facts.env}-controller-sa"
  }

  set {
    name  = "node.serviceAccount.create"
    value = "true"
  }

  set {
    name  = "node.serviceAccount.name"
    value = "ebs-csi-node-sa"
  }

  set {
    name  = "region"
    value = var.aws_region
  }

  set {
    name  = "enableVolumeScheduling"
    value = "true"
  }

  set {
    name  = "enableVolumeResizing"
    value = "true"
  }

  set {
    name  = "enableVolumeSnapshot"
    value = "true"
  }

  depends_on = [
    kubernetes_persistent_volume.rabbitmq, 
    kubernetes_persistent_volume_claim.rabbitmq,
  ]

}

#########################################
### Prepare Queues
resource "kubernetes_secret" "load-definition" {
  metadata {
    name      = "load-definition"
    namespace = "infra"
    labels = merge(local.facts, {
      component = "rabbitmq-load-definition"
    })
  }

  data = {
    "load_definitions.json" = file("${path.module}/rabbit_queues/definitions.json")
  }

}


#########################################
### Deploy RabbitMQ
resource "helm_release" "rabbitmq" {
  name             = "rabbit-rabbitmq"
  repository       = "https://charts.bitnami.com/bitnami"
  chart            = "rabbitmq"
  namespace        = "infra"
  create_namespace = true
  version          = "14.4.0"

  set {
    name  = "auth.username"
    value = "fbmquser"
  }
  set_sensitive {
    name  = "auth.password"
    value = var.rabbitmq_password
  }
  set_sensitive {
    name  = "auth.erlangCookie"
    value = var.rabbitmq_erlang_cookie
  }
  set {
    name  = "service.type"
    value = "ClusterIP"
  }
  set {
    name  = "replicaCount"
    value = "1"
  }
  set {
    name  = "persistence.enabled"
    value = "true"
  }

  set {
    name = "persistence.existingClaim"
    value = "rabbitmq-${local.facts.env}-pvc-1" # this is to simplify the setup ... might be needed to deploy multiple replicas to all AZs
  }
  set {
    name  = "persistence.storageClass"
    value = "rabbitmq-${local.facts.env}-storage-class-1" # this is to simplify the setup ... might be needed to deploy multiple replicas to all AZs
  }
  
  # Set extra configuration for default vhost and permissions
  set {
    name  = "extraConfiguration"
    value = <<EOF
            default_vhost = /
            default_permissions.configure = .*
            default_permissions.read = .*
            default_permissions.write = .*
            EOF
  }

  # load definitions / create queues
    set {
    name  = "loadDefinition.enabled"
    value = "true"
  }
  set {
    name  = "loadDefinition.existingSecret"
    value = "load-definition"
  }
  set {
    name  = "loadDefinition.file"
    value = "/app/load_definitions.json"
  }

  # Set graceful termination period for RabbitMQ pods
  set {
    name  = "terminationGracePeriodSeconds"
    value = "60"
  }
  
  depends_on = [
     kubernetes_persistent_volume_claim.rabbitmq
  ]
}

但是当我销毁模块时..它被阻塞在 pvc-1、pv-1 和卷上(无法分离)...

问题是,我正在为每个可用区域创建两个...但只附加一个卷...

因此

  set {
    name = "persistence.existingClaim"
    value = "rabbitmq-${local.facts.env}-pvc-1" # this is to simplify the setup ... might be needed to deploy multiple replicas to all AZs
  }
  set {
    name  = "persistence.storageClass"
    value = "rabbitmq-${local.facts.env}-storage-class-1" # this is to simplify the setup ... might be needed to deploy multiple replicas to all AZs
  }

我尝试重写该模块,因此它只使用一个..但随后它的可用区与它的 EC 不匹配(EC 与 EBS - 它们的可用区不匹配)...并且对其进行硬编码听起来不是一个好主意...

最好的解决方案是在所有可用区进行复制......但我不确定如何......

我试过了

 set {
    name  = "replicaCount"
    value = length(var.azs)
  }
  set {
    name  = "persistence.enabled"
    value = "true"
  }

 dynamic "set" {
   for_each = var.azs
   content {
        name = "persistence.existingClaim[${count.index}]"
    value = "rabbitmq-${local.facts.env}-pvc-${count.index}" 
  
   }
 }

 dynamic "set" {
   for_each = var.azs
   content {
        name = "persistence.storageClass[${count.index}]"
    value = "rabbitmq-${local.facts.env}-storage-class-${count.index}" 
  
   }
 }
  

但这也不起作用:(

知道我错过了什么吗?非常感谢

kubernetes terraform rabbitmq kubernetes-helm
1个回答
0
投票

当我销毁模块时..它被阻塞在 pvc-1、pv-1 和卷上(无法分离)

这是因为这些卷仍在使用中。您可以使用

depends_on
指定这些资源的依赖关系;这样,当您执行
terraform destroy
时,卷声明只会在卸载rabbitmq从而分离卷时销毁。

resource "helm_release" "rabbitmq" {
  name             = "rabbit-rabbitmq"
  ...
  depends_on = [kubernetes_persistent_volume_claim.rabbitmq]
}
© www.soinside.com 2019 - 2024. All rights reserved.