From 49b9506659c7ebb44b478a06cd003c6376b83e0a Mon Sep 17 00:00:00 2001 From: Martin Odstrcilik Date: Fri, 24 Nov 2023 14:17:38 +0100 Subject: [PATCH] feat: update karpenter to 0.32 --- .github/workflows/pre-commit.yml | 2 +- .pre-commit-config.yaml | 10 +- .tflint.hcl | 5 +- .tool-versions | 6 + README.md | 93 ++++++--- argo-crds.tf | 62 ++++++ argo-helm-crds.tf | 162 +++++++++++++++ argo-helm.tf | 25 ++- argo.tf | 9 +- examples/basic/main.tf | 10 +- helm.tf | 76 ++++++- iam-0.29.x.tf | 113 +++++++++++ iam.tf | 330 ++++++++++++++++++++++++++----- interruption.tf | 9 +- ll-logo.png | Bin 12095 -> 0 bytes migrations.tf | 9 + outputs.tf | 6 +- requirements.txt | 2 +- values.tf | 35 ++-- variables-crds.tf | 227 +++++++++++++++++++++ variables.tf | 14 +- versions.tf | 6 +- 22 files changed, 1101 insertions(+), 110 deletions(-) create mode 100644 .tool-versions create mode 100644 argo-crds.tf create mode 100644 argo-helm-crds.tf create mode 100644 iam-0.29.x.tf delete mode 100644 ll-logo.png create mode 100644 migrations.tf create mode 100644 variables-crds.tf diff --git a/.github/workflows/pre-commit.yml b/.github/workflows/pre-commit.yml index bbbd33f..cb202ac 100644 --- a/.github/workflows/pre-commit.yml +++ b/.github/workflows/pre-commit.yml @@ -13,7 +13,7 @@ on: env: PYTHON_VERSION: "3.10" TERRAFORM_DOCS_VERSION: "v0.16.0" - TFLINT_VERSION: "v0.40.1" + TFLINT_VERSION: "v0.48.0" jobs: pre-commit: diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 2f4c9dd..18025ae 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -1,6 +1,6 @@ repos: - repo: https://github.com/pre-commit/pre-commit-hooks - rev: v4.3.0 + rev: v4.4.0 hooks: - id: trailing-whitespace - id: check-merge-conflict @@ -10,18 +10,22 @@ repos: - id: end-of-file-fixer - repo: https://github.com/antonbabenko/pre-commit-terraform - rev: v1.75.0 + rev: v1.83.1 hooks: - id: terraform_fmt - id: terraform_tflint + args: + - --args=--config=__GIT_WORKING_DIR__/.tflint.hcl - id: terraform_validate - id: terraform_checkov + args: + - '--args=--skip-check CKV_TF_1' #CKV_TF_1: "Ensure Terraform module sources use a commit hash" - id: terraform_docs args: - '--args=--config=.terraform-docs.yml' - repo: https://github.com/Yelp/detect-secrets - rev: v1.3.0 + rev: v1.4.0 hooks: - id: detect-secrets args: ['--baseline', '.secrets.baseline'] diff --git a/.tflint.hcl b/.tflint.hcl index 372282e..543ca2b 100644 --- a/.tflint.hcl +++ b/.tflint.hcl @@ -1,11 +1,12 @@ plugin "terraform" { enabled = true - version = "0.1.1" + version = "0.4.0" source = "github.com/terraform-linters/tflint-ruleset-terraform" preset = "recommended" } + plugin "aws" { enabled = true - version = "0.17.0" + version = "0.26.0" source = "github.com/terraform-linters/tflint-ruleset-aws" } diff --git a/.tool-versions b/.tool-versions new file mode 100644 index 0000000..81b8328 --- /dev/null +++ b/.tool-versions @@ -0,0 +1,6 @@ +terraform 1.1.7 +terraform-docs 0.16.0 +tflint 0.48.0 +checkov 2.4.25 +awscli 2.13.15 +pre-commit 3.4.0 diff --git a/README.md b/README.md index b4e0ea9..b60fa1b 100644 --- a/README.md +++ b/README.md @@ -1,8 +1,8 @@ # AWS EKS Karpenter Terraform module -[![labyrinth labs logo](ll-logo.png)](https://lablabs.io/) +[](https://lablabs.io/) -We help companies build, run, deploy and scale software and infrastructure by embracing the right technologies and principles. Check out our website at +We help companies build, run, deploy and scale software and infrastructure by embracing the right technologies and principles. Check out our website at https://lablabs.io/ --- @@ -20,29 +20,26 @@ Check out other [terraform kubernetes addons](https://github.com/orgs/lablabs/re ## Deployment methods ### Helm +Deploy Helm chart via Helm resource (default method, set `enabled = true`) -Deploy helm chart by helm (default method, set `enabled = true`) +### Argo Kubernetes +Deploy Helm chart as ArgoCD Application via Kubernetes manifest resource (set `enabled = true` and `argo_enabled = true`) -### Argo kubernetes +> **Warning** +> +> When deploying with ArgoCD application, Kubernetes terraform provider requires access to Kubernetes cluster API during plan time. This introduces potential issue when you want to deploy the cluster with this addon at the same time, during the same Terraform run. +> +> To overcome this issue, the module deploys the ArgoCD application object using the Helm provider, which does not require API access during plan. If you want to deploy the application using this workaround, you can set the `argo_helm_enabled` variable to `true`. -Deploy helm chart as argo application by kubernetes manifest (set `enabled = true` and `argo_enabled = true`) +### Argo Helm +Deploy Helm chart as ArgoCD Application via Helm resource (set `enabled = true`, `argo_enabled = true` and `argo_helm_enabled = true`) -### Argo helm - -When deploying with ArgoCD application, Kubernetes terraform provider requires access to Kubernetes cluster API during plan time. This introduces potential issue when you want to deploy the cluster with this addon at the same time, during the same Terraform run. - -To overcome this issue, the module deploys the ArgoCD application object using the Helm provider, which does not require API access during plan. If you want to deploy the application using this workaround, you can set the `argo_helm_enabled` variable to `true`. - -Create helm release resource and deploy it as argo application (set `enabled = true`, `argo_enabled = true` and `argo_helm_enabled = true`) - - +To disable of creation IRSA role and IRSA policy, set `irsa_role_create = false` and `irsa_policy_enabled = false`, respectively - +To assume role set `irsa_assume_role_enabled = true` and specify `irsa_assume_role_arn` variable ## Repository configuration @@ -86,10 +83,11 @@ See [Basic example](examples/basic/README.md) for further information. | Name | Version | |------|---------| -| [terraform](#requirement\_terraform) | >= 1.0 | +| [terraform](#requirement\_terraform) | >= 1.1 | | [aws](#requirement\_aws) | >= 4.19.0 | | [helm](#requirement\_helm) | >= 2.6.0 | | [kubernetes](#requirement\_kubernetes) | >= 2.11.0 | +| [time](#requirement\_time) | >= 0.9.0 | | [utils](#requirement\_utils) | >= 0.17.0 | ## Modules @@ -103,27 +101,39 @@ No modules. | [aws_cloudwatch_event_rule.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/cloudwatch_event_rule) | resource | | [aws_cloudwatch_event_target.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/cloudwatch_event_target) | resource | | [aws_iam_policy.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_policy) | resource | +| [aws_iam_policy.this_0_29_x](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_policy) | resource | | [aws_iam_role.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_role) | resource | | [aws_iam_role_policy_attachment.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_role_policy_attachment) | resource | +| [aws_iam_role_policy_attachment.this_0_29_x](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_role_policy_attachment) | resource | | [aws_iam_role_policy_attachment.this_additional](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_role_policy_attachment) | resource | | [aws_sqs_queue.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/sqs_queue) | resource | | [aws_sqs_queue_policy.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/sqs_queue_policy) | resource | | [helm_release.argo_application](https://registry.terraform.io/providers/hashicorp/helm/latest/docs/resources/release) | resource | -| [helm_release.this](https://registry.terraform.io/providers/hashicorp/helm/latest/docs/resources/release) | resource | +| [helm_release.controller](https://registry.terraform.io/providers/hashicorp/helm/latest/docs/resources/release) | resource | +| [helm_release.crds](https://registry.terraform.io/providers/hashicorp/helm/latest/docs/resources/release) | resource | +| [helm_release.crds_argo_application](https://registry.terraform.io/providers/hashicorp/helm/latest/docs/resources/release) | resource | +| [kubernetes_job.crds_helm_argo_application_wait](https://registry.terraform.io/providers/hashicorp/kubernetes/latest/docs/resources/job) | resource | | [kubernetes_job.helm_argo_application_wait](https://registry.terraform.io/providers/hashicorp/kubernetes/latest/docs/resources/job) | resource | -| [kubernetes_manifest.this](https://registry.terraform.io/providers/hashicorp/kubernetes/latest/docs/resources/manifest) | resource | +| [kubernetes_manifest.controller](https://registry.terraform.io/providers/hashicorp/kubernetes/latest/docs/resources/manifest) | resource | +| [kubernetes_manifest.crds](https://registry.terraform.io/providers/hashicorp/kubernetes/latest/docs/resources/manifest) | resource | +| [kubernetes_role.crds_helm_argo_application_wait](https://registry.terraform.io/providers/hashicorp/kubernetes/latest/docs/resources/role) | resource | | [kubernetes_role.helm_argo_application_wait](https://registry.terraform.io/providers/hashicorp/kubernetes/latest/docs/resources/role) | resource | +| [kubernetes_role_binding.crds_helm_argo_application_wait](https://registry.terraform.io/providers/hashicorp/kubernetes/latest/docs/resources/role_binding) | resource | | [kubernetes_role_binding.helm_argo_application_wait](https://registry.terraform.io/providers/hashicorp/kubernetes/latest/docs/resources/role_binding) | resource | +| [kubernetes_service_account.crds_helm_argo_application_wait](https://registry.terraform.io/providers/hashicorp/kubernetes/latest/docs/resources/service_account) | resource | | [kubernetes_service_account.helm_argo_application_wait](https://registry.terraform.io/providers/hashicorp/kubernetes/latest/docs/resources/service_account) | resource | | [aws_caller_identity.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/caller_identity) | data source | | [aws_eks_cluster.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/eks_cluster) | data source | | [aws_iam_policy_document.queue](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/iam_policy_document) | data source | | [aws_iam_policy_document.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/iam_policy_document) | data source | +| [aws_iam_policy_document.this_0_29_x](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/iam_policy_document) | data source | | [aws_iam_policy_document.this_assume](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/iam_policy_document) | data source | | [aws_iam_policy_document.this_irsa](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/iam_policy_document) | data source | | [aws_partition.current](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/partition) | data source | | [aws_region.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/region) | data source | | [utils_deep_merge_yaml.argo_helm_values](https://registry.terraform.io/providers/cloudposse/utils/latest/docs/data-sources/deep_merge_yaml) | data source | +| [utils_deep_merge_yaml.crds_argo_helm_values](https://registry.terraform.io/providers/cloudposse/utils/latest/docs/data-sources/deep_merge_yaml) | data source | +| [utils_deep_merge_yaml.crds_values](https://registry.terraform.io/providers/cloudposse/utils/latest/docs/data-sources/deep_merge_yaml) | data source | | [utils_deep_merge_yaml.values](https://registry.terraform.io/providers/cloudposse/utils/latest/docs/data-sources/deep_merge_yaml) | data source | ## Inputs @@ -139,7 +149,9 @@ No modules. | [argo\_helm\_enabled](#input\_argo\_helm\_enabled) | If set to true, the ArgoCD Application manifest will be deployed using Kubernetes provider as a Helm release. Otherwise it'll be deployed as a Kubernetes manifest. See Readme for more info | `bool` | `false` | no | | [argo\_helm\_values](#input\_argo\_helm\_values) | Value overrides to use when deploying argo application object with helm | `string` | `""` | no | | [argo\_helm\_wait\_backoff\_limit](#input\_argo\_helm\_wait\_backoff\_limit) | Backoff limit for ArgoCD Application Helm release wait job | `number` | `6` | no | +| [argo\_helm\_wait\_node\_selector](#input\_argo\_helm\_wait\_node\_selector) | Node selector for ArgoCD Application Helm release wait job | `map(string)` | `{}` | no | | [argo\_helm\_wait\_timeout](#input\_argo\_helm\_wait\_timeout) | Timeout for ArgoCD Application Helm release wait job | `string` | `"10m"` | no | +| [argo\_helm\_wait\_tolerations](#input\_argo\_helm\_wait\_tolerations) | Tolerations for ArgoCD Application Helm release wait job | `list(any)` | `[]` | no | | [argo\_info](#input\_argo\_info) | ArgoCD info manifest parameter |
list(object({
name = string
value = string
}))
|
[
{
"name": "terraform",
"value": "true"
}
]
| no | | [argo\_kubernetes\_manifest\_computed\_fields](#input\_argo\_kubernetes\_manifest\_computed\_fields) | List of paths of fields to be handled as "computed". The user-configured value for the field will be overridden by any different value returned by the API after apply. | `list(string)` |
[
"metadata.labels",
"metadata.annotations",
"metadata.finalizers"
]
| no | | [argo\_kubernetes\_manifest\_field\_manager\_force\_conflicts](#input\_argo\_kubernetes\_manifest\_field\_manager\_force\_conflicts) | Forcibly override any field manager conflicts when applying the kubernetes manifest resource | `bool` | `false` | no | @@ -151,10 +163,47 @@ No modules. | [argo\_spec](#input\_argo\_spec) | ArgoCD Application spec configuration. Override or create additional spec parameters | `any` | `{}` | no | | [argo\_sync\_policy](#input\_argo\_sync\_policy) | ArgoCD syncPolicy manifest parameter | `any` | `{}` | no | | [aws\_partition](#input\_aws\_partition) | AWS partition in which the resources are located. Avaliable values are `aws`, `aws-cn`, `aws-us-gov` | `string` | `"aws"` | no | +| [crds\_argo\_helm\_values](#input\_crds\_argo\_helm\_values) | Value overrides to use when deploying argo application object with helm | `string` | `""` | no | +| [crds\_argo\_kubernetes\_manifest\_computed\_fields](#input\_crds\_argo\_kubernetes\_manifest\_computed\_fields) | List of paths of fields to be handled as "computed". The user-configured value for the field will be overridden by any different value returned by the API after apply. | `list(string)` |
[
"metadata.labels",
"metadata.annotations",
"metadata.finalizers"
]
| no | +| [crds\_argo\_kubernetes\_manifest\_field\_manager\_force\_conflicts](#input\_crds\_argo\_kubernetes\_manifest\_field\_manager\_force\_conflicts) | Forcibly override any field manager conflicts when applying the kubernetes manifest resource | `bool` | `false` | no | +| [crds\_argo\_kubernetes\_manifest\_field\_manager\_name](#input\_crds\_argo\_kubernetes\_manifest\_field\_manager\_name) | The name of the field manager to use when applying the kubernetes manifest resource. Defaults to Terraform | `string` | `"Terraform"` | no | +| [crds\_argo\_kubernetes\_manifest\_wait\_fields](#input\_crds\_argo\_kubernetes\_manifest\_wait\_fields) | A map of fields and a corresponding regular expression with a pattern to wait for. The provider will wait until the field matches the regular expression. Use * for any value. | `map(string)` | `{}` | no | +| [crds\_argo\_metadata](#input\_crds\_argo\_metadata) | ArgoCD Application metadata configuration. Override or create additional metadata parameters | `any` |
{
"finalizers": [
"resources-finalizer.argocd.argoproj.io"
]
}
| no | +| [crds\_argo\_spec](#input\_crds\_argo\_spec) | ArgoCD Application spec configuration. Override or create additional spec parameters | `any` | `{}` | no | +| [crds\_argo\_sync\_policy](#input\_crds\_argo\_sync\_policy) | ArgoCD syncPolicy manifest parameter | `any` | `{}` | no | +| [crds\_helm\_atomic](#input\_crds\_helm\_atomic) | If set, installation process purges chart on fail. The wait flag will be set automatically if atomic is used | `bool` | `false` | no | +| [crds\_helm\_chart\_name](#input\_crds\_helm\_chart\_name) | Helm chart name to be installed | `string` | `"karpenter-crd"` | no | +| [crds\_helm\_chart\_version](#input\_crds\_helm\_chart\_version) | Version of the Helm chart | `string` | `"v0.33.1"` | no | +| [crds\_helm\_cleanup\_on\_fail](#input\_crds\_helm\_cleanup\_on\_fail) | Allow deletion of new resources created in this helm upgrade when upgrade fails | `bool` | `false` | no | +| [crds\_helm\_dependency\_update](#input\_crds\_helm\_dependency\_update) | Runs helm dependency update before installing the chart | `bool` | `false` | no | +| [crds\_helm\_description](#input\_crds\_helm\_description) | Set helm release description attribute (visible in the history) | `string` | `""` | no | +| [crds\_helm\_devel](#input\_crds\_helm\_devel) | Use helm chart development versions, too. Equivalent to version '>0.0.0-0'. If version is set, this is ignored | `bool` | `false` | no | +| [crds\_helm\_disable\_openapi\_validation](#input\_crds\_helm\_disable\_openapi\_validation) | If set, the installation process will not validate rendered helm templates against the Kubernetes OpenAPI Schema | `bool` | `false` | no | +| [crds\_helm\_disable\_webhooks](#input\_crds\_helm\_disable\_webhooks) | Prevent helm chart hooks from running | `bool` | `false` | no | +| [crds\_helm\_force\_update](#input\_crds\_helm\_force\_update) | Force helm resource update through delete/recreate if needed | `bool` | `false` | no | +| [crds\_helm\_keyring](#input\_crds\_helm\_keyring) | Location of public keys used for verification. Used only if helm\_package\_verify is true | `string` | `"~/.gnupg/pubring.gpg"` | no | +| [crds\_helm\_lint](#input\_crds\_helm\_lint) | Run the helm chart linter during the plan | `bool` | `false` | no | +| [crds\_helm\_package\_verify](#input\_crds\_helm\_package\_verify) | Verify the package before installing it. Helm uses a provenance file to verify the integrity of the chart; this must be hosted alongside the chart | `bool` | `false` | no | +| [crds\_helm\_postrender](#input\_crds\_helm\_postrender) | Value block with a path to a binary file to run after helm renders the manifest which can alter the manifest contents | `map(any)` | `{}` | no | +| [crds\_helm\_recreate\_pods](#input\_crds\_helm\_recreate\_pods) | Perform pods restart during helm upgrade/rollback | `bool` | `false` | no | +| [crds\_helm\_release\_max\_history](#input\_crds\_helm\_release\_max\_history) | Maximum number of release versions stored per release | `number` | `0` | no | +| [crds\_helm\_release\_name](#input\_crds\_helm\_release\_name) | Helm release name | `string` | `"karpenter-crds"` | no | +| [crds\_helm\_render\_subchart\_notes](#input\_crds\_helm\_render\_subchart\_notes) | If set, render helm subchart notes along with the parent | `bool` | `true` | no | +| [crds\_helm\_replace](#input\_crds\_helm\_replace) | Re-use the given name of helm release, only if that name is a deleted release which remains in the history. This is unsafe in production | `bool` | `false` | no | +| [crds\_helm\_reset\_values](#input\_crds\_helm\_reset\_values) | When upgrading, reset the values to the ones built into the helm chart | `bool` | `false` | no | +| [crds\_helm\_reuse\_values](#input\_crds\_helm\_reuse\_values) | When upgrading, reuse the last helm release's values and merge in any overrides. If 'helm\_reset\_values' is specified, this is ignored | `bool` | `false` | no | +| [crds\_helm\_set\_sensitive](#input\_crds\_helm\_set\_sensitive) | Value block with custom sensitive values to be merged with the values yaml that won't be exposed in the plan's diff | `map(any)` | `{}` | no | +| [crds\_helm\_skip\_crds](#input\_crds\_helm\_skip\_crds) | If set, no CRDs will be installed before helm release | `bool` | `false` | no | +| [crds\_helm\_timeout](#input\_crds\_helm\_timeout) | Time in seconds to wait for any individual kubernetes operation (like Jobs for hooks) | `number` | `300` | no | +| [crds\_helm\_wait](#input\_crds\_helm\_wait) | Will wait until all helm release resources are in a ready state before marking the release as successful. It will wait for as long as timeout | `bool` | `true` | no | +| [crds\_helm\_wait\_for\_jobs](#input\_crds\_helm\_wait\_for\_jobs) | If wait is enabled, will wait until all helm Jobs have been completed before marking the release as successful. It will wait for as long as timeout | `bool` | `false` | no | +| [crds\_settings](#input\_crds\_settings) | Additional helm sets which will be passed to the Helm chart values, see https://github.com/aws/karpenter/tree/main/charts/karpenter-crd | `map(any)` | `{}` | no | +| [crds\_values](#input\_crds\_values) | Additional yaml encoded values which will be passed to the Helm chart, see https://github.com/aws/karpenter/tree/main/charts/karpenter-crd | `string` | `""` | no | +| [enable\_0\_29\_x\_support](#input\_enable\_0\_29\_x\_support) | Whether to enable 0.29.x support | `bool` | `false` | no | | [enabled](#input\_enabled) | Variable indicating whether deployment is enabled | `bool` | `true` | no | | [helm\_atomic](#input\_helm\_atomic) | If set, installation process purges chart on fail. The wait flag will be set automatically if atomic is used | `bool` | `false` | no | | [helm\_chart\_name](#input\_helm\_chart\_name) | Helm chart name to be installed | `string` | `"karpenter"` | no | -| [helm\_chart\_version](#input\_helm\_chart\_version) | Version of the Helm chart | `string` | `"0.28.0"` | no | +| [helm\_chart\_version](#input\_helm\_chart\_version) | Version of the Helm chart | `string` | `"v0.32.5"` | no | | [helm\_cleanup\_on\_fail](#input\_helm\_cleanup\_on\_fail) | Allow deletion of new resources created in this helm upgrade when upgrade fails | `bool` | `false` | no | | [helm\_create\_namespace](#input\_helm\_create\_namespace) | Create the namespace if it does not yet exist | `bool` | `true` | no | | [helm\_dependency\_update](#input\_helm\_dependency\_update) | Runs helm dependency update before installing the chart | `bool` | `false` | no | @@ -209,7 +258,7 @@ No modules. |------|-------------| | [helm\_release\_application\_metadata](#output\_helm\_release\_application\_metadata) | Argo application helm release attributes | | [helm\_release\_metadata](#output\_helm\_release\_metadata) | Helm release attributes | -| [iam\_irsa\_role\_attributes](#output\_iam\_irsa\_role\_attributes) | karpenter IAM role atributes | +| [iam\_irsa\_role\_attributes](#output\_iam\_irsa\_role\_attributes) | Karpenter IAM role attributes | | [kubernetes\_application\_attributes](#output\_kubernetes\_application\_attributes) | Argo kubernetes manifest attributes | diff --git a/argo-crds.tf b/argo-crds.tf new file mode 100644 index 0000000..0dfe663 --- /dev/null +++ b/argo-crds.tf @@ -0,0 +1,62 @@ +locals { + crds_argo_application_enabled = var.enabled && var.argo_enabled && !var.argo_helm_enabled && !var.helm_skip_crds + crds_argo_application_metadata = { + "labels" : try(var.crds_argo_metadata.labels, {}), + "annotations" : try(var.crds_argo_metadata.annotations, {}), + "finalizers" : try(var.crds_argo_metadata.finalizers, []) + } + crds_argo_application_values = { + "project" : var.argo_project + "source" : { + "repoURL" : var.helm_repo_oci ? local.helm_repo_url : "https://${local.helm_repo_url}" + "chart" : var.crds_helm_chart_name + "targetRevision" : var.crds_helm_chart_version + "helm" : { + "releaseName" : var.crds_helm_release_name + "parameters" : length(var.crds_settings) == 0 ? null : [for k, v in var.crds_settings : tomap({ "forceString" : true, "name" : k, "value" : v })] + "values" : var.enabled ? data.utils_deep_merge_yaml.crds_values[0].output : "" + } + } + "destination" : { + "server" : var.argo_destination_server + "namespace" : var.namespace + } + "syncPolicy" : var.crds_argo_sync_policy + "info" : var.argo_info + } + crds_argo_kubernetes_manifest_wait_fields = merge( + { + "status.sync.status" = "Synced" + "status.health.status" = "Healthy" + "status.operationState.phase" = "Succeeded" + }, + var.crds_argo_kubernetes_manifest_wait_fields + ) +} + +resource "kubernetes_manifest" "crds" { + count = local.crds_argo_application_enabled ? 1 : 0 + manifest = { + "apiVersion" = var.argo_apiversion + "kind" = "Application" + "metadata" = merge( + local.crds_argo_application_metadata, + { "name" = var.crds_helm_release_name }, + { "namespace" = var.argo_namespace }, + ) + "spec" = merge( + local.crds_argo_application_values, + var.crds_argo_spec + ) + } + computed_fields = var.crds_argo_kubernetes_manifest_computed_fields + + field_manager { + name = var.crds_argo_kubernetes_manifest_field_manager_name + force_conflicts = var.crds_argo_kubernetes_manifest_field_manager_force_conflicts + } + + wait { + fields = local.crds_argo_kubernetes_manifest_wait_fields + } +} diff --git a/argo-helm-crds.tf b/argo-helm-crds.tf new file mode 100644 index 0000000..ca56588 --- /dev/null +++ b/argo-helm-crds.tf @@ -0,0 +1,162 @@ +locals { + crds_helm_argo_application_enabled = var.enabled && var.argo_enabled && var.argo_helm_enabled && !var.helm_skip_crds + crds_helm_argo_application_wait_enabled = local.crds_helm_argo_application_enabled && length(keys(local.crds_argo_kubernetes_manifest_wait_fields)) > 0 + crds_helm_argo_application_values = [ + one(data.utils_deep_merge_yaml.crds_argo_helm_values[*].output), + var.crds_argo_helm_values + ] +} + +data "utils_deep_merge_yaml" "crds_argo_helm_values" { + count = local.crds_helm_argo_application_enabled ? 1 : 0 + input = compact([ + yamlencode({ + "apiVersion" : var.argo_apiversion + }), + yamlencode({ + "spec" : local.crds_argo_application_values + }), + yamlencode({ + "spec" : var.crds_argo_spec + }), + yamlencode( + local.crds_argo_application_metadata + ) + ]) +} + +resource "helm_release" "crds_argo_application" { + count = local.crds_helm_argo_application_enabled ? 1 : 0 + + chart = "${path.module}/helm/argocd-application" + name = var.crds_helm_release_name + namespace = var.argo_namespace + + values = local.crds_helm_argo_application_values +} + +resource "kubernetes_role" "crds_helm_argo_application_wait" { + count = local.crds_helm_argo_application_wait_enabled ? 1 : 0 + + metadata { + name = "${var.crds_helm_release_name}-argo-application-wait" + namespace = var.argo_namespace + labels = local.crds_argo_application_metadata.labels + annotations = local.crds_argo_application_metadata.annotations + } + + rule { + api_groups = ["argoproj.io"] + resources = ["applications"] + verbs = ["get", "list", "watch"] + } +} + +resource "kubernetes_role_binding" "crds_helm_argo_application_wait" { + count = local.crds_helm_argo_application_wait_enabled ? 1 : 0 + + metadata { + name = "${var.crds_helm_release_name}-argo-application-wait" + namespace = var.argo_namespace + labels = local.crds_argo_application_metadata.labels + annotations = local.crds_argo_application_metadata.annotations + } + + role_ref { + api_group = "rbac.authorization.k8s.io" + kind = "Role" + name = one(kubernetes_role.crds_helm_argo_application_wait[*].metadata[0].name) + } + + subject { + kind = "ServiceAccount" + name = one(kubernetes_service_account.crds_helm_argo_application_wait[*].metadata[0].name) + namespace = one(kubernetes_service_account.crds_helm_argo_application_wait[*].metadata[0].namespace) + } +} + +resource "kubernetes_service_account" "crds_helm_argo_application_wait" { + count = local.crds_helm_argo_application_wait_enabled ? 1 : 0 + + metadata { + name = "${var.crds_helm_release_name}-argo-application-wait" + namespace = var.argo_namespace + labels = local.crds_argo_application_metadata.labels + annotations = local.crds_argo_application_metadata.annotations + } +} + +resource "kubernetes_job" "crds_helm_argo_application_wait" { + count = local.crds_helm_argo_application_wait_enabled ? 1 : 0 + + metadata { + generate_name = "${var.crds_helm_release_name}-argo-application-wait-" + namespace = var.argo_namespace + labels = local.crds_argo_application_metadata.labels + annotations = local.crds_argo_application_metadata.annotations + } + + spec { + template { + metadata { + name = "${var.crds_helm_release_name}-argo-application-wait" + labels = local.crds_argo_application_metadata.labels + annotations = local.crds_argo_application_metadata.annotations + } + + spec { + service_account_name = one(kubernetes_service_account.crds_helm_argo_application_wait[*].metadata[0].name) + + dynamic "container" { + for_each = local.crds_argo_kubernetes_manifest_wait_fields + + content { + name = "${lower(replace(container.key, ".", "-"))}-${md5(jsonencode(local.crds_helm_argo_application_values))}" # md5 suffix is a workaround for https://github.com/hashicorp/terraform-provider-kubernetes/issues/1325 + image = "bitnami/kubectl:latest" + command = ["/bin/bash", "-ecx"] + # Waits for ArgoCD Application to be "Healthy", see https://kubernetes.io/docs/reference/generated/kubectl/kubectl-commands#wait + # i.e. kubectl wait --for=jsonpath='{.status.sync.status}'=Healthy application.argoproj.io <$addon-name> + args = [ + <<-EOT + kubectl wait \ + --namespace ${var.argo_namespace} \ + --for=jsonpath='{.${container.key}}'=${container.value} \ + --timeout=${var.argo_helm_wait_timeout} \ + application.argoproj.io ${var.crds_helm_release_name} + EOT + ] + } + } + + node_selector = var.argo_helm_wait_node_selector + + dynamic "toleration" { + for_each = var.argo_helm_wait_tolerations + + content { + key = try(toleration.value.key, null) + operator = try(toleration.value.operator, null) + value = try(toleration.value.value, null) + effect = try(toleration.value.effect, null) + } + } + + # ArgoCD Application status fields might not be available immediately after creation + restart_policy = "OnFailure" + } + } + + backoff_limit = var.argo_helm_wait_backoff_limit + } + + wait_for_completion = true + + timeouts { + create = var.argo_helm_wait_timeout + update = var.argo_helm_wait_timeout + } + + depends_on = [ + helm_release.crds_argo_application + ] +} diff --git a/argo-helm.tf b/argo-helm.tf index cd57ab5..e075921 100644 --- a/argo-helm.tf +++ b/argo-helm.tf @@ -34,6 +34,10 @@ resource "helm_release" "argo_application" { namespace = var.argo_namespace values = local.helm_argo_application_values + + depends_on = [ + kubernetes_job.crds_helm_argo_application_wait + ] } resource "kubernetes_role" "helm_argo_application_wait" { @@ -80,10 +84,10 @@ resource "kubernetes_service_account" "helm_argo_application_wait" { count = local.helm_argo_application_wait_enabled ? 1 : 0 metadata { - name = "${var.helm_release_name}-argo-application-wait" - namespace = var.argo_namespace - labels = local.argo_application_metadata.labels - annotations = local.argo_application_metadata.annotations + generate_name = "${var.helm_release_name}-argo-application-wait-" + namespace = var.argo_namespace + labels = local.argo_application_metadata.labels + annotations = local.argo_application_metadata.annotations } } @@ -129,6 +133,19 @@ resource "kubernetes_job" "helm_argo_application_wait" { } } + node_selector = var.argo_helm_wait_node_selector + + dynamic "toleration" { + for_each = var.argo_helm_wait_tolerations + + content { + key = try(toleration.value.key, null) + operator = try(toleration.value.operator, null) + value = try(toleration.value.value, null) + effect = try(toleration.value.effect, null) + } + } + # ArgoCD Application status fields might not be available immediately after creation restart_policy = "OnFailure" } diff --git a/argo.tf b/argo.tf index 0dfb673..0351591 100644 --- a/argo.tf +++ b/argo.tf @@ -12,8 +12,9 @@ locals { "targetRevision" : var.helm_chart_version "helm" : { "releaseName" : var.helm_release_name - "parameters" : [for k, v in var.settings : tomap({ "forceString" : true, "name" : k, "value" : v })] + "parameters" : length(var.settings) == 0 ? null : [for k, v in var.settings : tomap({ "forceString" : true, "name" : k, "value" : v })] "values" : var.enabled ? data.utils_deep_merge_yaml.values[0].output : "" + "skipCrds" : true # CRDs are installed in a separate ArgoCD Application } } "destination" : { @@ -25,7 +26,7 @@ locals { } } -resource "kubernetes_manifest" "this" { +resource "kubernetes_manifest" "controller" { count = var.enabled && var.argo_enabled && !var.argo_helm_enabled ? 1 : 0 manifest = { "apiVersion" = var.argo_apiversion @@ -50,4 +51,8 @@ resource "kubernetes_manifest" "this" { wait { fields = var.argo_kubernetes_manifest_wait_fields } + + depends_on = [ + kubernetes_manifest.crds + ] } diff --git a/examples/basic/main.tf b/examples/basic/main.tf index 18d0643..952d06a 100644 --- a/examples/basic/main.tf +++ b/examples/basic/main.tf @@ -1,8 +1,3 @@ -resource "aws_iam_role" "this" { - name = "karpenter-node-role" - assume_role_policy = data.aws_iam_policy_document.karpenter_node_assume_policy.json -} - data "aws_iam_policy_document" "karpenter_node_assume_policy" { statement { actions = ["sts:AssumeRole"] @@ -15,6 +10,11 @@ data "aws_iam_policy_document" "karpenter_node_assume_policy" { } } +resource "aws_iam_role" "this" { + name = "karpenter-node-role" + assume_role_policy = data.aws_iam_policy_document.karpenter_node_assume_policy.json +} + module "addon_installation_disabled" { source = "../../" diff --git a/helm.tf b/helm.tf index 19d47d3..6723bd9 100644 --- a/helm.tf +++ b/helm.tf @@ -1,4 +1,8 @@ -resource "helm_release" "this" { +locals { + helm_repo_url = trimprefix(var.helm_repo_url, "https://") +} + +resource "helm_release" "controller" { count = var.enabled && !var.argo_enabled ? 1 : 0 chart = var.helm_chart_name create_namespace = var.helm_create_namespace @@ -26,7 +30,7 @@ resource "helm_release" "this" { atomic = var.helm_atomic wait = var.helm_wait wait_for_jobs = var.helm_wait_for_jobs - skip_crds = var.helm_skip_crds + skip_crds = true # CRDs are installed in a separate Helm release render_subchart_notes = var.helm_render_subchart_notes disable_openapi_validation = var.helm_disable_openapi_validation dependency_update = var.helm_dependency_update @@ -60,4 +64,72 @@ resource "helm_release" "this" { binary_path = postrender.value } } + + depends_on = [ + helm_release.crds + ] +} + +resource "helm_release" "crds" { + count = var.enabled && !var.helm_skip_crds && !var.argo_enabled ? 1 : 0 + chart = var.crds_helm_chart_name + create_namespace = var.helm_create_namespace + namespace = var.namespace + name = var.crds_helm_release_name + version = var.crds_helm_chart_version + repository = var.helm_repo_oci ? "oci://${local.helm_repo_url}" : "https://${local.helm_repo_url}" + + repository_key_file = var.helm_repo_key_file + repository_cert_file = var.helm_repo_cert_file + repository_ca_file = var.helm_repo_ca_file + repository_username = var.helm_repo_username + repository_password = var.helm_repo_password + devel = var.crds_helm_devel + verify = var.crds_helm_package_verify + keyring = var.crds_helm_keyring + timeout = var.crds_helm_timeout + disable_webhooks = var.crds_helm_disable_webhooks + reset_values = var.crds_helm_reset_values + reuse_values = var.crds_helm_reuse_values + force_update = var.crds_helm_force_update + recreate_pods = var.crds_helm_recreate_pods + cleanup_on_fail = var.crds_helm_cleanup_on_fail + max_history = var.crds_helm_release_max_history + atomic = var.crds_helm_atomic + wait = var.crds_helm_wait + wait_for_jobs = var.crds_helm_wait_for_jobs + skip_crds = var.crds_helm_skip_crds + render_subchart_notes = var.crds_helm_render_subchart_notes + disable_openapi_validation = var.crds_helm_disable_openapi_validation + dependency_update = var.crds_helm_dependency_update + replace = var.crds_helm_replace + description = var.crds_helm_description + lint = var.crds_helm_lint + + values = [ + data.utils_deep_merge_yaml.crds_values[0].output + ] + + dynamic "set" { + for_each = var.crds_settings + content { + name = set.key + value = set.value + } + } + + dynamic "set_sensitive" { + for_each = var.crds_helm_set_sensitive + content { + name = set_sensitive.key + value = set_sensitive.value + } + } + + dynamic "postrender" { + for_each = var.crds_helm_postrender + content { + binary_path = postrender.value + } + } } diff --git a/iam-0.29.x.tf b/iam-0.29.x.tf new file mode 100644 index 0000000..eef2759 --- /dev/null +++ b/iam-0.29.x.tf @@ -0,0 +1,113 @@ +variable "enable_0_29_x_support" { + type = bool + default = false + description = "Whether to enable 0.29.x support" +} + +data "aws_iam_policy_document" "this_0_29_x" { + count = local.irsa_role_create && var.irsa_policy_enabled && !var.irsa_assume_role_enabled && var.enable_0_29_x_support ? 1 : 0 + + #checkov:skip=CKV_AWS_111:In the future, we may further lock down ec2:RunInstances by using tags in related resources. + #checkov:skip=CKV_AWS_356: Describe need to be allowed on all resources + statement { + sid = "NodeResourceCreation" + actions = [ + "ec2:CreateLaunchTemplate", + "ec2:CreateFleet", + "ec2:CreateTags", + "ec2:DescribeLaunchTemplates", + "ec2:DescribeImages", + "ec2:DescribeInstances", + "ec2:DescribeSecurityGroups", + "ec2:DescribeSubnets", + "ec2:DescribeInstanceTypes", + "ec2:DescribeInstanceTypeOfferings", + "ec2:DescribeAvailabilityZones", + "ec2:DescribeSpotPriceHistory", + "pricing:GetProducts", + "ec2:RunInstances" + ] + resources = ["*"] + } + + statement { + sid = "NodeResourceDeletion" + actions = [ + "ec2:TerminateInstances", + ] + + resources = ["*"] + + condition { + test = "StringEquals" + variable = "ec2:ResourceTag/kubernetes.io/cluster/${var.cluster_name}" + values = ["owned"] + } + } + + statement { + sid = "KarpenterResourceDeletion" + actions = [ + "ec2:DeleteLaunchTemplate", + ] + + resources = ["*"] + condition { + test = "StringEquals" + variable = "ec2:ResourceTag/karpenter.k8s.aws/cluster" + values = [var.cluster_name] + } + } + + statement { + sid = "GetParameters" + actions = ["ssm:GetParameter"] + resources = ["arn:aws:ssm:*:*:parameter/aws/service/*"] + } + + statement { + sid = "PassRole" + actions = [ + "iam:PassRole" + ] + resources = var.karpenter_node_role_arns + effect = "Allow" + } + + statement { + sid = "EKSClusterEndpointLookup" + actions = [ + "eks:DescribeCluster" + ] + resources = ["arn:${var.aws_partition}:eks:${data.aws_region.this[0].name}:${data.aws_caller_identity.this[0].id}:cluster/${var.cluster_name}"] + effect = "Allow" + } + + statement { + sid = "HandleInterruptionsQueueMessages" + actions = [ + "sqs:DeleteMessage", + "sqs:GetQueueUrl", + "sqs:GetQueueAttributes", + "sqs:ReceiveMessage", + ] + resources = [aws_sqs_queue.this[0].arn] + } +} + +resource "aws_iam_policy" "this_0_29_x" { + count = local.irsa_role_create && (var.irsa_policy_enabled || var.irsa_assume_role_enabled) && var.enable_0_29_x_support ? 1 : 0 + + name = "${var.irsa_role_name_prefix}-${var.helm_chart_name}-0-29-x" + path = "/" + description = "Policy for Karpenter 0.29.x service" + policy = var.irsa_assume_role_enabled ? data.aws_iam_policy_document.this_assume[0].json : data.aws_iam_policy_document.this_0_29_x[0].json + + tags = var.irsa_tags +} + +resource "aws_iam_role_policy_attachment" "this_0_29_x" { + count = local.irsa_role_create && var.irsa_policy_enabled && var.enable_0_29_x_support ? 1 : 0 + role = aws_iam_role.this[0].name + policy_arn = aws_iam_policy.this_0_29_x[0].arn +} diff --git a/iam.tf b/iam.tf index d798ecd..4c96af4 100644 --- a/iam.tf +++ b/iam.tf @@ -2,100 +2,338 @@ locals { irsa_role_create = var.enabled && var.rbac_create && var.service_account_create && var.irsa_role_create } -data "aws_region" "this" {} +data "aws_region" "this" { + count = var.enabled ? 1 : 0 +} -data "aws_caller_identity" "this" {} +data "aws_caller_identity" "this" { + count = var.enabled ? 1 : 0 +} data "aws_iam_policy_document" "this" { + #checkov:skip=CKV_AWS_111: In the future, we may further lock down ec2:RunInstances by using tags in related resources. + #checkov:skip=CKV_AWS_356: Describe need to be allowed on all resources count = local.irsa_role_create && var.irsa_policy_enabled && !var.irsa_assume_role_enabled ? 1 : 0 - #checkov:skip=CKV_AWS_111:In the future, we may further lock down ec2:RunInstances by using tags in related resources. - #checkov:skip=CKV_AWS_356: Describe need to be allowed on all resources + # Aligned with https://github.com/aws/karpenter-provider-aws/blob/v0.32.4/website/content/en/v0.32/getting-started/getting-started-with-karpenter/cloudformation.yaml statement { - sid = "NodeResourceCreation" + sid = "AllowScopedEC2InstanceActions" + effect = "Allow" + + resources = [ + "arn:${var.aws_partition}:ec2:${data.aws_region.this[0].name}::image/*", + "arn:${var.aws_partition}:ec2:${data.aws_region.this[0].name}::snapshot/*", + "arn:${var.aws_partition}:ec2:${data.aws_region.this[0].name}:*:spot-instances-request/*", + "arn:${var.aws_partition}:ec2:${data.aws_region.this[0].name}:*:security-group/*", + "arn:${var.aws_partition}:ec2:${data.aws_region.this[0].name}:*:subnet/*", + "arn:${var.aws_partition}:ec2:${data.aws_region.this[0].name}:*:launch-template/*", + ] + actions = [ - "ec2:CreateLaunchTemplate", + "ec2:RunInstances", "ec2:CreateFleet", - "ec2:CreateTags", - "ec2:DescribeLaunchTemplates", - "ec2:DescribeImages", - "ec2:DescribeInstances", - "ec2:DescribeSecurityGroups", - "ec2:DescribeSubnets", - "ec2:DescribeInstanceTypes", - "ec2:DescribeInstanceTypeOfferings", - "ec2:DescribeAvailabilityZones", - "ec2:DescribeSpotPriceHistory", - "pricing:GetProducts", - "ec2:RunInstances" ] - resources = ["*"] } statement { - sid = "NodeResourceDeletion" + sid = "AllowScopedEC2InstanceActionsWithTags" + effect = "Allow" + + resources = [ + "arn:${var.aws_partition}:ec2:${data.aws_region.this[0].name}:*:fleet/*", + "arn:${var.aws_partition}:ec2:${data.aws_region.this[0].name}:*:instance/*", + "arn:${var.aws_partition}:ec2:${data.aws_region.this[0].name}:*:volume/*", + "arn:${var.aws_partition}:ec2:${data.aws_region.this[0].name}:*:network-interface/*", + "arn:${var.aws_partition}:ec2:${data.aws_region.this[0].name}:*:launch-template/*", + ] + actions = [ - "ec2:TerminateInstances", + "ec2:RunInstances", + "ec2:CreateFleet", + "ec2:CreateLaunchTemplate", ] - resources = ["*"] + condition { + test = "StringEquals" + variable = "aws:RequestTag/kubernetes.io/cluster/${var.cluster_name}" + values = ["owned"] + } + + condition { + test = "StringLike" + variable = "aws:RequestTag/karpenter.sh/nodepool" + values = ["*"] + } + } + + statement { + sid = "AllowScopedResourceCreationTagging" + effect = "Allow" + + resources = [ + "arn:${var.aws_partition}:ec2:${data.aws_region.this[0].name}:*:fleet/*", + "arn:${var.aws_partition}:ec2:${data.aws_region.this[0].name}:*:instance/*", + "arn:${var.aws_partition}:ec2:${data.aws_region.this[0].name}:*:volume/*", + "arn:${var.aws_partition}:ec2:${data.aws_region.this[0].name}:*:network-interface/*", + "arn:${var.aws_partition}:ec2:${data.aws_region.this[0].name}:*:launch-template/*", + ] + + actions = ["ec2:CreateTags"] condition { test = "StringEquals" - variable = "ec2:ResourceTag/kubernetes.io/cluster/${var.cluster_name}" + variable = "aws:RequestTag/kubernetes.io/cluster/${var.cluster_name}" values = ["owned"] } + + condition { + test = "StringEquals" + variable = "ec2:CreateAction" + + values = [ + "RunInstances", + "CreateFleet", + "CreateLaunchTemplate", + ] + } + + condition { + test = "StringLike" + variable = "aws:RequestTag/karpenter.sh/nodepool" + values = ["*"] + } } + statement { - sid = "KarpenterResourceDeletion" + sid = "AllowScopedResourceTagging" + effect = "Allow" + resources = ["arn:${var.aws_partition}:ec2:${data.aws_region.this[0].name}:*:instance/*"] + actions = ["ec2:CreateTags"] + + condition { + test = "StringEquals" + variable = "aws:ResourceTag/kubernetes.io/cluster/${var.cluster_name}" + values = ["owned"] + } + + condition { + test = "StringLike" + variable = "aws:ResourceTag/karpenter.sh/nodepool" + values = ["*"] + } + + condition { + test = "ForAllValues:StringEquals" + variable = "aws:TagKeys" + + values = [ + "karpenter.sh/nodeclaim", + "Name", + ] + } + } + + statement { + sid = "AllowScopedDeletion" + effect = "Allow" + + resources = [ + "arn:${var.aws_partition}:ec2:${data.aws_region.this[0].name}:*:instance/*", + "arn:${var.aws_partition}:ec2:${data.aws_region.this[0].name}:*:launch-template/*", + ] + actions = [ + "ec2:TerminateInstances", "ec2:DeleteLaunchTemplate", ] + condition { + test = "StringEquals" + variable = "aws:ResourceTag/kubernetes.io/cluster/${var.cluster_name}" + values = ["owned"] + } + + condition { + test = "StringLike" + variable = "aws:ResourceTag/karpenter.sh/nodepool" + values = ["*"] + } + } + + statement { + sid = "AllowRegionalReadActions" + effect = "Allow" resources = ["*"] + + actions = [ + "ec2:DescribeAvailabilityZones", + "ec2:DescribeImages", + "ec2:DescribeInstances", + "ec2:DescribeInstanceTypeOfferings", + "ec2:DescribeInstanceTypes", + "ec2:DescribeLaunchTemplates", + "ec2:DescribeSecurityGroups", + "ec2:DescribeSpotPriceHistory", + "ec2:DescribeSubnets", + ] + condition { test = "StringEquals" - variable = "ec2:ResourceTag/karpenter.k8s.aws/cluster" - values = [var.cluster_name] + variable = "aws:RequestedRegion" + values = [data.aws_region.this[0].name] } } + statement { - sid = "GetParameters" + sid = "AllowSSMReadActions" + effect = "Allow" + resources = ["arn:${var.aws_partition}:ssm:${data.aws_region.this[0].name}::parameter/aws/service/*"] actions = ["ssm:GetParameter"] - resources = ["arn:aws:ssm:*:*:parameter/aws/service/*"] } statement { - sid = "PassRole" + sid = "AllowPricingReadActions" + effect = "Allow" + resources = ["*"] + actions = ["pricing:GetProducts"] + } + + statement { + sid = "AllowInterruptionQueueActions" + effect = "Allow" + resources = [aws_sqs_queue.this[0].arn] + actions = [ - "iam:PassRole" + "sqs:DeleteMessage", + "sqs:GetQueueAttributes", + "sqs:GetQueueUrl", + "sqs:ReceiveMessage", ] + } + + statement { + sid = "AllowPassingInstanceRole" + effect = "Allow" resources = var.karpenter_node_role_arns + actions = ["iam:PassRole"] + + condition { + test = "StringEquals" + variable = "iam:PassedToService" + values = ["ec2.amazonaws.com"] + } + } + + statement { + sid = "AllowScopedInstanceProfileCreationActions" effect = "Allow" + resources = ["*"] + actions = ["iam:CreateInstanceProfile"] + + condition { + test = "StringEquals" + variable = "aws:RequestTag/kubernetes.io/cluster/${var.cluster_name}" + values = ["owned"] + } + + condition { + test = "StringEquals" + variable = "aws:RequestTag/topology.kubernetes.io/region" + values = [data.aws_region.this[0].name] + } + + condition { + test = "StringLike" + variable = "aws:RequestTag/karpenter.k8s.aws/ec2nodeclass" + values = ["*"] + } } statement { - sid = "EKSClusterEndpointLookup" - actions = [ - "eks:DescribeCluster" - ] - resources = ["arn:${var.aws_partition}:eks:${data.aws_region.this.name}:${data.aws_caller_identity.this.id}:cluster/${var.cluster_name}"] + sid = "AllowScopedInstanceProfileTagActions" effect = "Allow" + resources = ["*"] + actions = ["iam:TagInstanceProfile"] + + condition { + test = "StringEquals" + variable = "aws:ResourceTag/kubernetes.io/cluster/${var.cluster_name}" + values = ["owned"] + } + + condition { + test = "StringEquals" + variable = "aws:ResourceTag/topology.kubernetes.io/region" + values = [data.aws_region.this[0].name] + } + + condition { + test = "StringEquals" + variable = "aws:RequestTag/kubernetes.io/cluster/${var.cluster_name}" + values = ["owned"] + } + + condition { + test = "StringEquals" + variable = "aws:RequestTag/topology.kubernetes.io/region" + values = [data.aws_region.this[0].name] + } + + condition { + test = "StringLike" + variable = "aws:ResourceTag/karpenter.k8s.aws/ec2nodeclass" + values = ["*"] + } + + condition { + test = "StringLike" + variable = "aws:RequestTag/karpenter.k8s.aws/ec2nodeclass" + values = ["*"] + } } - dynamic "statement" { - for_each = var.enabled ? [0] : [] + statement { + sid = "AllowScopedInstanceProfileActions" + effect = "Allow" + resources = ["*"] - content { - sid = "HandleInteruptionsQueueMessages" - actions = [ - "sqs:DeleteMessage", - "sqs:GetQueueUrl", - "sqs:GetQueueAttributes", - "sqs:ReceiveMessage", - ] - resources = [aws_sqs_queue.this[0].arn] + actions = [ + "iam:AddRoleToInstanceProfile", + "iam:RemoveRoleFromInstanceProfile", + "iam:DeleteInstanceProfile", + ] + + condition { + test = "StringEquals" + variable = "aws:ResourceTag/kubernetes.io/cluster/${var.cluster_name}" + values = ["owned"] + } + + condition { + test = "StringEquals" + variable = "aws:ResourceTag/topology.kubernetes.io/region" + values = [data.aws_region.this[0].name] } + + condition { + test = "StringLike" + variable = "aws:ResourceTag/karpenter.k8s.aws/ec2nodeclass" + values = ["*"] + } + } + + statement { + sid = "AllowInstanceProfileReadActions" + effect = "Allow" + resources = ["*"] + actions = ["iam:GetInstanceProfile"] + } + + statement { + sid = "AllowAPIServerEndpointDiscovery" + effect = "Allow" + resources = ["arn:${var.aws_partition}:eks:${data.aws_region.this[0].name}:${data.aws_caller_identity.this[0].account_id}:cluster/${var.cluster_name}"] + actions = ["eks:DescribeCluster"] } } diff --git a/interruption.tf b/interruption.tf index 2a731a2..2e0b7ce 100644 --- a/interruption.tf +++ b/interruption.tf @@ -1,8 +1,10 @@ locals { - aws_partition_dns_suffix = data.aws_partition.current.dns_suffix + aws_partition_dns_suffix = data.aws_partition.current[0].dns_suffix } -data "aws_partition" "current" {} +data "aws_partition" "current" { + count = var.enabled ? 1 : 0 +} resource "aws_sqs_queue" "this" { count = var.enabled ? 1 : 0 @@ -38,10 +40,7 @@ resource "aws_sqs_queue_policy" "this" { policy = data.aws_iam_policy_document.queue[0].json } - - # Node Termination Event Rules - locals { events = { health_event = { diff --git a/ll-logo.png b/ll-logo.png deleted file mode 100644 index 8aa2c6a4997fbdcf7009e089b2d07ad4a89218a6..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 12095 zcmbVxWl$YF(C)##xVsmJ;!xbRc#FHcyL)l>!ol6$DeiFK;O_3Ox9^Yp^WOR9o6KgC zooqIf+0E?pBvMIH3I&k>5dZ+7$ViK;000nr|He`9kpKGPNi$Ob04>c+?U##+p*xv_ zlf9XxjVYOnr-Lb(sfVQ*0N}9#&d_eAEOQFm#19PzK%=n|kr^;e_x3;HN~@QgxU~3s zE&Dl?H5cZ{QYSb*vz$46>~7jWaet)D%YIe&CajY9NKuyj(|LS-Yu|Z@HSX>AdGg#X z*F%cx>)VseOVv4c_%kPWk%L$7^E#B&y72z>dZs6L@1twxF_;t=OP}|4$PXoP_u2V- z%4O)#bEjp&X$`RY&2xPdQ~&E`ZpUqh-up((CyAecQZl2aDf#HLbmG&WS6ayDH+etA zX&KnV>tlY3K-`3fn2?Y+H*X<5<2O!`9_r`ej;rZq`}^sn+`~uY=MtS}`&f_L64Jt# z!%=^_4^zWueC3aKsB-5|2Y#VMpQ=r7L3eIB%7XJ%Wwy7Gsqqg^klvne)wPlz{pGy? zY#fRCTJqXXY}BwKrD?g782uX|9doAC#*QofhctHuGFy47 z=^fHjdeOC@2gZ9pRcI&Sl2LEUHPjI;ZA^OGopZvX1oBRFWt@V$bh93KG z84X{h+c(?6X|UK}p3I)6tezSfI5URK*^t9&X04%S(w(Er*?E%b)vz{FRLwGf z-fX1sSCQI_lw3);S&EibRh88_FqSW?5oBepkQKb9Hk+@WRqjU2*Kr53w%7H(ESOFR z4+wN^1Qx`x9ZVS3&+KU>-O@^F=+%#vS1{@N#&B|z;cmbgn?}_G-*Z?LrIZKg!LfoL|+?*hBx!eU&FYoLGYrV!OE$@_iR-ol&kjrK#GD z_R%^k9!@mNaE>KDy+6C3q*6iSv58$#$*5OIrl+8YM`x=^T2(MA(I@u}1x{^QP>$!F z&={(^^MMvMzj>88H#w$hW#A(&5s<}rF|laqyh5%x%VZm!BZDQH)Q^*!LG2aSoP_5l z*1ZarR)2+azhL3d3AN~VUy5-SS;-XL2$krmt{=J~D!wm7N`^Qt-x$!G39c5Ix&wvg zz9{jile*4wKnwmnKBpp$|9%@3iBq)#d#;x5IC;O{aAJ24KdXOWnbINJq*-0O8g^dj zZFby&P$m>r^bMuiTZ=)nie;Wezo-}q*al?dvz_n}BdWs9YKRZiA+JryT-u5jn4)Vb zs^a#hOd9?b<4w!MAYAWu19l>&P9veZ-~=s2mQNV3A~UJBu(a2xAB5cCgbAqvof|4w z!mCn8A$sdFrAvMZBzvMeCe>PVl9t-sr6sOd>$`u?b;9ILA^vr=&Rn7p0H2h`FQt#%+!3Vt@?3S zjaHs!0T4z=EHa?3R;2Km4G!}Sj=jj5^qu&U|tA;3QaC8ST=Yj4l1;9EVZ zYWi4N_o{}SB0^&ndS+*uFTdzeoYWZY&@c4uia09vn z5bJzf-DH6|f5K~LwL-!>-Q{U zxcIl}qy%|itvpNDh?B1q&Ag3fe&dDkFaZ7&XiTq`r2MjZW9XAwo(Y*ho?;FGH#k#; zNPX-eq+7_F%<^$(FX--rkK=JfwiX7sgl$hKRP^{31s)MqvFW5e;R_M9Erq^A)dHHR zF9&(*P!6L*c8iBUlQYbB@~he*IBlR$unV;WrwJ^C?L5yOLdG>UROR%44^r-ZCv_)4Nsi0m&-s>1p=yQ(Qni>6%&a$wBl7oGj^PgJY~hWGo=f z<^6!ip0H){8jT)JDtSRmaS#Q;rgi2VCP2cN1rd3Lj)cH`5mC}`-YpWoE-34=7=*Y| zJaM|kb&e4g^8Jb~%c3~|LGlpgTGQC^QsA;^15-|?ea0RkI&kX~aR1|x-)L$PPmn>L z(4FvjQ#a0;XT><-$S9&uzTS{#9=?y6TgGJAMP7}LjT!Mg@^|BIZ4i<;!WORD91$zK zm^3Xv9I*#V0QSLhIuSfmQxu;gzl4brH?M#e_inrfW3$Mv$1Huw^UZIQB4dj%GB~ni zC`kxc`rgxt`BlOVfow=SVH;{Ij%rl0bkNp!=ZNdJ5anN2-zYc$77|5h2k+IL>%G&U z9L`ZaH4HyTrSR*MwbfQX^t}}u7O|jM3yd)V>}8v%3o9mfMEGN`=htmK{A)r5vNoQ5 z*WU@u&=GIMU+kl!4sGDR~4W4{u^oXe6PSEEdh?s44aR37|^z9tv)3dgx*V?=>_)052fxdzNECs zQluxtF@o(!s{Tuo9Gf^BxJKF49=Q++d&EG+FB#xS<{E+SCB~XxcSUAN>J#cf%epBH z1DYH{jQT6OEw0xKnHM40#l#9f)M(9!$pyh+QGvv=9cCvnoa3ueQh?5Yuzos78746o ztP^&GSB=I!*Q0^^rr~Wgq5bxh&Z_t;Fb1*8LF_%El)5@Y*J1FNHc)_u?oUJ(zY?W2 z@lZ;_sb9m<%?m6>gd{E#GNCRi|1EDmx`X&KL#+e)W$Eo6EXwb2GWKjmZWAwEYy(m# z^gz2cnG5m?+I<(}5%M4_YcXhT7fGLb8U@Eu z0woAJ3SPZYsJE#vb_wY`lh24!N%flU7Xn#Ni4a08^set;CS`iaykj~lXUO!~)QlGy zep3kim3M0ihj_qn<_@Mr_#7PJtgCbLZ%(R9TgdyA#ZMJ5+-L)`&oF&q_=4rzW5sjv!2LwwpavBY7EXE^l&tRa*h{gsrx$kC& z`-3l@#3MVkk(n0>M`5r87F!WtYRQhcmvY5qhux8U zlMx}OJjU;j2xYZEe@(w3qnwrZ>6+er!p+4|;S?)KQ9wWPWB@zBWSGzhtznJOf7m!%6&B-;JYB|_R~>cnHs-p7`@KI5;8kf+ zHsrmAcn{(}x=*wLAm`RF6Ro<5C0+9ou7`^sSL4pa%)lihDKQB-4maSRgg!`~b=Zq2 z4GBYR0bUTDl25ZB4VeBa#NkJT9n8Q)Lb+{iO2p+Xk){0QNOI62kEbI<>T47Yw(DuT z??+B4j*=N%-_wRY6hFsa;)aDksdnMu9O{F39pPzt_$G^VNR$Qzmq!Up#oi!lg!V1W z3uv|Yp4?{8C8wiBj=P3Eg0|%}jx6*NH5&Snkv%LJBPmKko;U(16Eho} zGgY~GUBMNBAtp9q2>Dy-`Q3rlaT226+@Xc?koB5yMAuz{d017>yHg@9%@3( zQSy2cusOilN{rt&;e zTF*NG5sS67n>gx9M8(E^w2=sp4VQUV6PX2eViqje{npE(#PbAovPDGp*I^ROM@$ic z5L^Q2>>y`CcE$&AJ`oEz9yJ~CjiZ1Z?O`NZBA#3cZ83Jl!%;S!yrH2ZYGG{$m1Y^N zp8c@2pT&5nPGJTZQu%B)4MZEbsj3(pPoPZ{uBrtST6Fn#bYNeIzd8NsQelHq=r#4x zPfIJik^yt8#_^2cI3wHSHITYwU+0UBzhg9B{_d{TpGp42;H(1;E^~XrV`w5pnvejf zmT$OxJTnk=eHtKZX*hN#mxP8N33^v;c=AvEts_cB;strXWKdCr01H-s<%J^=5dkP6 z?;lku7H({_n9Ofxtoxgd99EXSpK#bsBFEzF8zNxVv6-MNC+L>%p>N8cV!ws+o#i4x zI0Rc-yQF`Oxfq;D7UG0cRdacxSlZhnjC zHV(BLu7gsYnXyfvZ$8bk-`^$(IXVDOhCK?k+xk4!f5vXAS?%M;gSb!1>TX_(xVc>*tD!wpJ zwez(i25rGbBo!p(M&HB!8Z>Ncrv#pn zf?<43Gfp|A68=)8l8J-_!P1xh6^+)&!NSB}`Qh~>MbsnC9IAuO>cp4_i&DP}_Zhso zo;i3-$4!?G{i0{{YK(le3A3WG`XShS{(!LM4_kd#-Ln9Rkz2^!p>+ojZwrOHsH?CH z#q1uiUD9qd61!JIhZWWPitnpR0>23vY;9+sf`24Tsb(JLj(;zrVYdQk}%#{BLgh!E(+dn0oi;RK< z+#WP08Whz#b=x5TK$IaPE~4hKa-QR^qq_Jy{DX@e8sjgC;U&ri%vZZp;dh)Wt)`}G zt5d6-CcpEV=H{k_m2%tr(^7Bk3fFBt#sq4N-rD{@nkH+Tq)O6uVfo5X-!IerJ*K;| zy`6^+vv0GV|A_UnB0~TGrD7pe|ISeOZx{~?00{uIAZecSZ-cBm;D$Y3o0>w*G_=(U}v&3gL!xREB9bPq; zLG$KbBuD*)ajta_O=i4WtPv{*w2S0w8Z16q>i)pI9my_fYbs8}{cu!gw}%0W>`EXP z0rp(+7habagkyd7@e=1(z9lgdS8vPKTlqXG+#MTDOy3-x+{G|7G^bhvo?oo_0z`<` zgg;DhiE-vFeVz0uF1aBQokJ4M#h;$rHoNaUMMaB6%Um3rqav=tIgT$t@hU29RGj*! zRetg>`AY6T7@}f_Morn%9_~r?b$jmr(VUWX2T;zK9#XK%>%2HhBhhfQQraa5&GBEz zP~gecR9JK}vP&WwH$Uw9M$^qgzLT;PPwfL@nh!$P$1GWInO zl``}*Z@%&~y=VMlESMe9sdn{9cQ*(~(_tQ;P>~ZB(syTPR~1m^< zzrkcp!lPWnqdqn?8&wFVrlim&NUMwe?Qd$?%gAU`v22k2j6owg2uGu5WMrr*wL{`E zo_T-Mn*0qDYTDK&(bR-uCO;Hm=!-5YApt4kslM8;hDc2{P;!1F|n6#^Ym|Y(dFgP)@eoAu>qx5WCM?M zAsKf=RrfrWrlgLgS%3g@z{zm zJ^mii5)L%@MSH5MRS#LEGC??(tpt_GZY_SFn*nkxS?jXheH-&$@I8we0Fa{p`Oq&D&M&ciz z?vI-0aPM$U(zTZI^P8i(C!rrEO}c(fZqO_(N4KuDr&)VES>V=flGB9=i;6lQzvJX( z??zNL^9oIB_9U*;#IXAMa(%aKL3f!d`;NmIW1QQyg-J)Km+-DLembFRP_XwfbL)qd*Mc8v@A9)J6Xrf=qn=nd{UeFPXoN-A1 zo{GvTQ|7aDcQ^X=lhm~~m7wu{h2-AG^rzIkMYUg(@52v;lX{%Fhkz8h_h$FEn1cz4 z$38So&42GcTZE`4xn(mqmb(?$9K1n2k`@MX;3eKj z&MvMjaGoD+;~!W&jSg<;th#o`W^Qj5-4;28I?^OaGio;3*}LiW6MFB~IWP0OfC2n;)tkztYldBrPw@taVhgkOie04_3oNsqcPNUm{DWVW?ui0ea z{d}~AqLGtmQk!0HB8=Tmgelxkd14Q);e~3O=&vqmn6&oV#nZnKC+u8L*ktTU(KjpX zD(|u1RKVJTqztMoRzSof-hGgzRR2b9*SXm^3h}T&)2iKbA>)&rxqkDB-v3&negS)Zg({^>nztc_m^+8O#Avw)35ro}?dsAn2jZhme*q@Oi8Ns{vJj25pZ>4w z3C?M^d<3^Qad7fkeEEYUbuZ4#`wXapx~^LHfx+Rs{i6SpmgGXwINUqCWB%i}FWgQ+ zfz~jREOql~+<3V^9)STq(cb%xK1C}1a%I}u>fa{rcQa!hgFM<$(W!|E?d}smPhl5> z@+K7nBZh~k6D7slGrka+tu;vduYSBK(#x=7M19jfJhb{Ns?OVazzQ16y`!Fs+K+fx zD70Lb8Dt%-9mNH0A+`{e-wZ$YQuV5^ckE^(L)xmj`UQxq@Mye@4CXAOe2Es#aCZ2< z;G2TCC7a<-EWh-K_%gol9z2Q#tup+nVlv+iTP_MBO#+qceP7{a|9d!a2` zEQyL0Nmaq!yoxlfGojm1A2`V+1>eQ*8DGw^CWCe50i&nK3=jP8g_)NHj~x1;iFrYU z>UQ{CnhPHxwWEtAaOieH4q#FBtFMq|)DZKQ)3ksw*wo+cemvCI^WiDs0qW$tJ8Cg-d6pOcie0{hYSp>CPd0i+epk>HhL22H zT6nveO;fGD`hD-NT(EYYQ|2+*rKx6VQo<& z$g!(+GMIVT2qhb$VUBen>r*~+z8NS0@f3iyz ze#pn~>1ciV71n)@*OgPniOJVf+*hh1!yOr zSgul`W`s&osN3-;BuZpD+mtXE7T*jZpsQ;?YXV7?Z+(egc`-T68vAKu@mF=X;|I%V zXZ{ziW9=Jg%^(6@_o~Sw-}I-619ieQj@Gu)A{jG{goR=H3cjs#SXFvL!Y1TllW)%% zNWA6bI0xslhNE2i*L5qM{TE%=oAw>m1mE6;L^2f^x~KDdby~l`%4Bn&y(dXi1aWk_ zoeZe_@$!y9y8+vpj$qAaSKG{Fauo1R{Ib4PQaxwLX(_(OZZ{vwKU;dj1#{r5i!QH} zb4Gi|6{@Rk4So?jy32@F*wXJwD8zp{Qf`*7PSt3YnClO}S2=ww`iV4XmD7r_`y%uR zE-+q;9xJdH3$DFgC}94Ik|N3!C6?_nZJ;~PmBzLj>Fj9|#{vgaMS%A7dKKa8>#NK0 zvKkkXxF(z9X3UMV0UWR5+f9V^7jkdP7pAIwb+XshHP(*MspfIq8T&BCC;shroY~MS z<007N0>+h66Z;Fvm@}_{L2@xw5O$;^gN|;3jkoo@_On6LzCqj0D9BzE17kI*y0f#` z8C<~3#g z7}UD-ed?jB`|$OCO^_ZS(wmLrzrY0h-&f)*XVOGlsur;5%-Qv2JZ>w2i4}$-iyy;1 zMhf8Jmg2mY|x&2|98$D9_uOqpQhs`H_B}u#pO;xIU?AX#0!b zYnYI}rjvJZeo|2l46nvy+*YFFP8lkD-+$rw2pTra`hj=s`C?RUwWXi7>Mpsrj|=nU z?UL#>4+Do~6tipw8(KNc8i<5op8o5&w9AhGHcI=R@8=48BQHD`7j2hYvqp!1&OOcM z(Sb?3;X5+Dy4=o8X>uDW@&dB{ccI^gvX@w_iO)<0<3obs(6|5yG6Tig|$`{ zk&8PUhqZT--Oq#&MW+FEkJim&v0DM%##JL$WTv;gpKwE~95-*8d?k3{5>Aij9|NPx zCG068P)+gzUpxbOq>7ojmFB7R4?dr;p{ z;m>J6Y5uw{!Je$CljHv7kKd$Gh3l+@R@h^{)SR!fRud7^IPI%L9(=V&jdm%J(@)g2 zpBk$sbq0!9@3xnM(3T$Kd+OlN^vuUvWyt5*LN&iE?dEX(y1$#ep!Gk}N6hP%h@n0% zm*M5)e@Ld3NZM;S4Dj5xEoaky{3fSWD@$e&l-nIfw8D|}$a4Qv)Kn9cR8aNj{)_Nq zB5a#z8zr5&za@%Eu!f>K2iC^e($c1W&p7XUVc~a~X&e2Dag%RG!&3NHl=hpR$)A}0zub4PI(IU#r|p*&^YZ#g{r7*oQatn$$PvKNsg;yIxLAbr zU;^@HUO@jsM4Os1%Fz1g-Jv8i@F^*{nLr+4$kUarWj`?L|9<3lZ4vQd zW%Ac(#ZCUX6?2XAvS*a>R)NJNMc6v`b(j)r7_QZAI?@*sLV`#0k_3ZmSu#qjTpx+v znf^4d6H-@`p!y(nPv-i|3gq!Tv2tH^>cRSVp$UuRBHy=v@Hw;PL;BEI$sn+r9>plb z$DDa%I};|dwJxrvhz{ijid>uq9ry7ern&dhu3e91j4^nbC@*S-ATf%8fkUUN5@Cz4 z>+INA7XvY}wXx(erlPFX?k{yW*;Ny-$n-ReqTIBCoPU_1EMh1I-WCSd(ec5-(hM0R zW@(Pz{{C_0Cb710(pO$y2L28uCA}y#C%5J=h%TsKZWb;-Dmy#*m~ztk8LBHQrQkC% zW_>$}eLHo#I$NP3&kB!DKn_jt{^D9Z!CW(wBuze!4&O**3#|0I6h!HFw(=*k=@G7>fL>jRYe3q&I&s~AfuES&D zuyxm!2&XwGHJ@%EYQxAR!|$wa2{BwPUWe~c|KJ<%J-7Y!VF|~7@XHB_U^BPpb7tG$ zf!~slFqA>#0#uCj$wk`|C8e;InwLNGD0surz zn^nN&8hB3bly~LbMw7>K3ygm=pQPK9t1f`o9f~G2K4TCbvX@WW#^MjnkRhJ_6dU)8 zk^l9_ztm>+U%n2%?$*$-35oy3xO}&Z0`AcMu=jCMgAM}{>Gh4dhCdBf<EVk4-b0)^pWyc)VUc&he~2$1`~eRE2y z`W~-8zn$dDRY`!uFW5f#yn|~^@Y!WRyC_)BG=Ak_GZ5?EfQXVz#FoxP!G7N5;4Dd( zMYi4KYdKemH3Zi_7DrojnAD|5;e3=77aw08yEzX9_CJ=C ze=it2xJ2z{FOO?z$jQl3SIT>AQ#}RZx%gTF_>hZvL)g;ydG-@l4C%*l@A`?k!vBkT z?Jg>(xrHniJlYh|`L%8dnxx_`ZZ&lRkGG-UV+4fOaya8_VPX}fcAMXw?&Ke4*_YwT zF(O$lqkl-(YFk94rKKTF=yxBR9iMj#_J|;R=D|(wy6GIlX0H)woK}E2lMWyjM1j!c;zcUmzn)_kO0f{nj+W2BgeJT@*?F-BV2SI)CDVr6n5~6Ay-aD*qErTp36K$NG!mZ z5%!zZodygnc+AYYmJ|Jl2mbgj>VKaKnIDUe)|tG5L!EOZXG~qu;Pd9M7)>&2{=AdX z0JSVm)hx9*51mjeS1Vc(%6u$+qRC1uCc_eB+*J$sjgDQceT$Tb;02DB5xt3$;Y1!8 zwN9298d}kM3HYhJu3nm(erqddws}9caPk^%^j9yFEO`XC@ChYqF(v0tqRph-zNY*{@QxhXY z#fJZthODHkRKX;J#czW9bMq*;P-yQFlN&hH+_SZ;5O1@IBSEImHb-{L?fL@H!9#Oj zU*KHJ5BPueIBT?iUUTTc3dX~Mo315%{h`O~)A#$PiGL`}kLXrOieV%U$4UakDf4~u z%Hk(KRClN-uu==1|2|ltJn%6Q`Y}Mp`yY5Gqv5r*@F*yx+`6QOATw>*TN{IzK5Z1lYcz74#`i`v}o zj68Ky8XKAj zSECh5l%;ECbHv*7%$AM2twxl%j3rn|`@2I;t@58S3!Nmo3N_~$St_k%>1zb2O6Ts` z&G{V<9`8J%GJ*_$sk`Q=0LhZ*h^8va8>AU+OYOy>2h22Q^gN1ErKv0}EGW>>0)_V+ zeo&#kz&nO)5&kp+34n{xh|#0m85om`bJ}I{XM9Z!6+QqR*UV#OO%Mf{o4)z$a4lX7y$qQ9RL2wK>tJ< r3IHHO`5!C(w|4)z`hWYg&;5k^G7wQ_5aKEP-$EG)Me%A;gP{KdKD|Ap diff --git a/migrations.tf b/migrations.tf new file mode 100644 index 0000000..9df4192 --- /dev/null +++ b/migrations.tf @@ -0,0 +1,9 @@ +moved { + from = helm_release.this + to = helm_release.controller +} + +moved { + from = kubernetes_manifest.this + to = kubernetes_manifest.controller +} diff --git a/outputs.tf b/outputs.tf index fb7da5a..dc9d527 100644 --- a/outputs.tf +++ b/outputs.tf @@ -1,6 +1,6 @@ output "helm_release_metadata" { description = "Helm release attributes" - value = try(helm_release.this[0].metadata, {}) + value = try(helm_release.controller[0].metadata, {}) } output "helm_release_application_metadata" { @@ -10,10 +10,10 @@ output "helm_release_application_metadata" { output "kubernetes_application_attributes" { description = "Argo kubernetes manifest attributes" - value = try(kubernetes_manifest.this, {}) + value = try(kubernetes_manifest.controller, {}) } output "iam_irsa_role_attributes" { - description = "karpenter IAM role atributes" + description = "Karpenter IAM role attributes" value = try(aws_iam_role.this[0], {}) } diff --git a/requirements.txt b/requirements.txt index 41a7bea..cde6311 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1 +1 @@ -checkov==2.2.158 +checkov==2.4.25 diff --git a/values.tf b/values.tf index 53b54e0..4bcba02 100644 --- a/values.tf +++ b/values.tf @@ -1,26 +1,37 @@ locals { - helm_repo_url = trimprefix(var.helm_repo_url, "https://") - values_default = yamlencode({ - "settings" : { - "aws" : { - "clusterEndpoint" : data.aws_eks_cluster.this.endpoint - "clusterName" : var.cluster_name - "interruptionQueueName" : aws_sqs_queue.this[0].name + settings = { + aws = { + clusterEndpoint = one(data.aws_eks_cluster.this[*].endpoint) + clusterName = var.cluster_name + interruptionQueueName = one(aws_sqs_queue.this[*].name) } } - "serviceAccount" : { - "create" : var.service_account_create - "name" : var.service_account_name - "annotations" : { + serviceAccount = { + create = var.service_account_create + name = var.service_account_name + annotations = { "eks.amazonaws.com/role-arn" : local.irsa_role_create ? aws_iam_role.this[0].arn : "" } } }) + + crds_values_default = yamlencode({ + # add default values here + }) } data "aws_eks_cluster" "this" { - name = var.cluster_name + count = var.enabled ? 1 : 0 + name = var.cluster_name +} + +data "utils_deep_merge_yaml" "crds_values" { + count = var.enabled ? 1 : 0 + input = compact([ + local.crds_values_default, + var.crds_values + ]) } data "utils_deep_merge_yaml" "values" { diff --git a/variables-crds.tf b/variables-crds.tf new file mode 100644 index 0000000..2d90408 --- /dev/null +++ b/variables-crds.tf @@ -0,0 +1,227 @@ +# ================ crd common variables (required) ================ + +variable "crds_helm_chart_name" { + type = string + default = "karpenter-crd" + description = "Helm chart name to be installed" +} + +variable "crds_helm_chart_version" { + type = string + default = "v0.33.1" + description = "Version of the Helm chart" +} + +variable "crds_helm_release_name" { + type = string + default = "karpenter-crds" + description = "Helm release name" +} + +variable "crds_settings" { + type = map(any) + default = {} + description = "Additional helm sets which will be passed to the Helm chart values, see https://github.com/aws/karpenter/tree/main/charts/karpenter-crd" +} + +variable "crds_values" { + type = string + default = "" + description = "Additional yaml encoded values which will be passed to the Helm chart, see https://github.com/aws/karpenter/tree/main/charts/karpenter-crd" +} + +# ================ argo variables (required) ================ + +variable "crds_argo_sync_policy" { + type = any + description = "ArgoCD syncPolicy manifest parameter" + default = {} +} + +variable "crds_argo_metadata" { + type = any + default = { + "finalizers" : [ + "resources-finalizer.argocd.argoproj.io" + ] + } + description = "ArgoCD Application metadata configuration. Override or create additional metadata parameters" +} + +variable "crds_argo_spec" { + type = any + default = {} + description = "ArgoCD Application spec configuration. Override or create additional spec parameters" +} + +variable "crds_argo_helm_values" { + type = string + default = "" + description = "Value overrides to use when deploying argo application object with helm" +} + +# ================ argo kubernetes manifest variables (required) ================ + +variable "crds_argo_kubernetes_manifest_computed_fields" { + type = list(string) + default = ["metadata.labels", "metadata.annotations", "metadata.finalizers"] + description = "List of paths of fields to be handled as \"computed\". The user-configured value for the field will be overridden by any different value returned by the API after apply." +} + +variable "crds_argo_kubernetes_manifest_field_manager_name" { + type = string + default = "Terraform" + description = "The name of the field manager to use when applying the kubernetes manifest resource. Defaults to Terraform" +} + +variable "crds_argo_kubernetes_manifest_field_manager_force_conflicts" { + type = bool + default = false + description = "Forcibly override any field manager conflicts when applying the kubernetes manifest resource" +} + +variable "crds_argo_kubernetes_manifest_wait_fields" { + type = map(string) + default = {} + description = "A map of fields and a corresponding regular expression with a pattern to wait for. The provider will wait until the field matches the regular expression. Use * for any value." +} + +# ================ helm release variables (required) ================ + +variable "crds_helm_devel" { + type = bool + default = false + description = "Use helm chart development versions, too. Equivalent to version '>0.0.0-0'. If version is set, this is ignored" +} + +variable "crds_helm_package_verify" { + type = bool + default = false + description = "Verify the package before installing it. Helm uses a provenance file to verify the integrity of the chart; this must be hosted alongside the chart" +} + +variable "crds_helm_keyring" { + type = string + default = "~/.gnupg/pubring.gpg" + description = "Location of public keys used for verification. Used only if helm_package_verify is true" +} + +variable "crds_helm_timeout" { + type = number + default = 300 + description = "Time in seconds to wait for any individual kubernetes operation (like Jobs for hooks)" +} + +variable "crds_helm_disable_webhooks" { + type = bool + default = false + description = "Prevent helm chart hooks from running" +} + +variable "crds_helm_reset_values" { + type = bool + default = false + description = "When upgrading, reset the values to the ones built into the helm chart" +} + +variable "crds_helm_reuse_values" { + type = bool + default = false + description = "When upgrading, reuse the last helm release's values and merge in any overrides. If 'helm_reset_values' is specified, this is ignored" +} + +variable "crds_helm_force_update" { + type = bool + default = false + description = "Force helm resource update through delete/recreate if needed" +} + +variable "crds_helm_recreate_pods" { + type = bool + default = false + description = "Perform pods restart during helm upgrade/rollback" +} + +variable "crds_helm_cleanup_on_fail" { + type = bool + default = false + description = "Allow deletion of new resources created in this helm upgrade when upgrade fails" +} + +variable "crds_helm_release_max_history" { + type = number + default = 0 + description = "Maximum number of release versions stored per release" +} + +variable "crds_helm_atomic" { + type = bool + default = false + description = "If set, installation process purges chart on fail. The wait flag will be set automatically if atomic is used" +} + +variable "crds_helm_wait" { + type = bool + default = true + description = "Will wait until all helm release resources are in a ready state before marking the release as successful. It will wait for as long as timeout" +} + +variable "crds_helm_wait_for_jobs" { + type = bool + default = false + description = "If wait is enabled, will wait until all helm Jobs have been completed before marking the release as successful. It will wait for as long as timeout" +} + +variable "crds_helm_skip_crds" { + type = bool + default = false + description = "If set, no CRDs will be installed before helm release" +} + +variable "crds_helm_render_subchart_notes" { + type = bool + default = true + description = "If set, render helm subchart notes along with the parent" +} + +variable "crds_helm_disable_openapi_validation" { + type = bool + default = false + description = "If set, the installation process will not validate rendered helm templates against the Kubernetes OpenAPI Schema" +} + +variable "crds_helm_dependency_update" { + type = bool + default = false + description = "Runs helm dependency update before installing the chart" +} + +variable "crds_helm_replace" { + type = bool + default = false + description = "Re-use the given name of helm release, only if that name is a deleted release which remains in the history. This is unsafe in production" +} + +variable "crds_helm_description" { + type = string + default = "" + description = "Set helm release description attribute (visible in the history)" +} + +variable "crds_helm_lint" { + type = bool + default = false + description = "Run the helm chart linter during the plan" +} + +variable "crds_helm_set_sensitive" { + type = map(any) + default = {} + description = "Value block with custom sensitive values to be merged with the values yaml that won't be exposed in the plan's diff" +} + +variable "crds_helm_postrender" { + type = map(any) + default = {} + description = "Value block with a path to a binary file to run after helm renders the manifest which can alter the manifest contents" +} diff --git a/variables.tf b/variables.tf index 2783348..ad4c672 100644 --- a/variables.tf +++ b/variables.tf @@ -41,7 +41,7 @@ variable "helm_chart_name" { variable "helm_chart_version" { type = string - default = "0.28.0" + default = "v0.32.5" description = "Version of the Helm chart" } @@ -181,6 +181,18 @@ variable "argo_helm_wait_backoff_limit" { description = "Backoff limit for ArgoCD Application Helm release wait job" } +variable "argo_helm_wait_node_selector" { + type = map(string) + default = {} + description = "Node selector for ArgoCD Application Helm release wait job" +} + +variable "argo_helm_wait_tolerations" { + type = list(any) + default = [] + description = "Tolerations for ArgoCD Application Helm release wait job" +} + variable "argo_destination_server" { type = string default = "https://kubernetes.default.svc" diff --git a/versions.tf b/versions.tf index ea8c3a1..281c506 100644 --- a/versions.tf +++ b/versions.tf @@ -1,5 +1,5 @@ terraform { - required_version = ">= 1.0" + required_version = ">= 1.1" required_providers { aws = { @@ -14,6 +14,10 @@ terraform { source = "hashicorp/helm" version = ">= 2.6.0" } + time = { + source = "hashicorp/time" + version = ">= 0.9.0" + } utils = { source = "cloudposse/utils" version = ">= 0.17.0"