diff --git a/.ansible-lint b/.ansible-lint index ba0c6d31..a90f5bf2 100644 --- a/.ansible-lint +++ b/.ansible-lint @@ -8,4 +8,5 @@ warn_list: - var-naming - yaml[comments-indentation] skip_list: - - experimental \ No newline at end of file + - experimental + - yaml[line-length] \ No newline at end of file diff --git a/.ansible-lint-ignore b/.ansible-lint-ignore index dc3fc6ac..cb343553 100644 --- a/.ansible-lint-ignore +++ b/.ansible-lint-ignore @@ -1,4 +1,8 @@ # This file contains ignores rule violations for ansible-lint roles/testing/tasks/troubleshooting.yml ignore-errors -inventory/sample/hosts.yml yaml[line-length] \ No newline at end of file +inventory/sample/hosts.yml yaml[line-length] +inventory/sample/hosts.yml yaml[comments-indentation] +roles/rke2/tasks/add-audit-policy-config.yml no-handler +roles/rke2/tasks/add-pod-security-admission-config.yml no-handler +roles/rke2/tasks/add-registry-config.yml no-handler diff --git a/.github/workflows/rocky8.yml b/.github/workflows/rocky8.yml index b4d9973e..f21a42c7 100644 --- a/.github/workflows/rocky8.yml +++ b/.github/workflows/rocky8.yml @@ -119,7 +119,7 @@ jobs: echo " $(aws ec2 describe-instances --filters "Name=instance-state-name,Values=running" "Name=tag:Owner,Values=rke2-ansible-github-actions" "Name=tag:NodeType,Values=Agent" "Name=tag:github_run,Values=$GITHUB_RUN_ID" --query "Reservations[*].Instances[*].PublicIpAddress" --output text | head -1):" >> hosts.yml echo "all:" >> hosts.yml echo " vars:" >> hosts.yml - echo " kubernetes_api_server_host: $(aws ec2 describe-instances --filters "Name=instance-state-name,Values=running" "Name=tag:Owner,Values=rke2-ansible-github-actions" "Name=tag:NodeType,Values=Server" "Name=tag:github_run,Values=$GITHUB_RUN_ID" --query "Reservations[*].Instances[*].PrivateIpAddress" --output text | head -1)" >> hosts.yml + echo " rke2_kubernetes_api_server_host: $(aws ec2 describe-instances --filters "Name=instance-state-name,Values=running" "Name=tag:Owner,Values=rke2-ansible-github-actions" "Name=tag:NodeType,Values=Server" "Name=tag:github_run,Values=$GITHUB_RUN_ID" --query "Reservations[*].Instances[*].PrivateIpAddress" --output text | head -1)" >> hosts.yml echo "" >> ansible.cfg echo "" >> ansible.cfg echo "remote_user=centos" >> ansible.cfg @@ -172,7 +172,7 @@ jobs: echo " $(aws ec2 describe-instances --filters "Name=instance-state-name,Values=running" "Name=tag:Owner,Values=rke2-ansible-github-actions" "Name=tag:NodeType,Values=ExtraNode" "Name=tag:github_run,Values=$GITHUB_RUN_ID" --query "Reservations[*].Instances[*].PublicIpAddress" --output text | head -1):" >> hosts.yml echo "all:" >> hosts.yml echo " vars:" >> hosts.yml - echo " kubernetes_api_server_host: $(aws ec2 describe-instances --filters "Name=instance-state-name,Values=running" "Name=tag:Owner,Values=rke2-ansible-github-actions" "Name=tag:NodeType,Values=Server" "Name=tag:github_run,Values=$GITHUB_RUN_ID" --query "Reservations[*].Instances[*].PrivateIpAddress" --output text | head -1)" >> hosts.yml + echo " rke2_kubernetes_api_server_host: $(aws ec2 describe-instances --filters "Name=instance-state-name,Values=running" "Name=tag:Owner,Values=rke2-ansible-github-actions" "Name=tag:NodeType,Values=Server" "Name=tag:github_run,Values=$GITHUB_RUN_ID" --query "Reservations[*].Instances[*].PrivateIpAddress" --output text | head -1)" >> hosts.yml cp hosts.yml inventory/rocky8/hosts.yml env: AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }} diff --git a/.github/workflows/ubuntu20.yml b/.github/workflows/ubuntu20.yml index b3dbeeb5..f87e40e0 100644 --- a/.github/workflows/ubuntu20.yml +++ b/.github/workflows/ubuntu20.yml @@ -117,7 +117,7 @@ jobs: echo " $(aws ec2 describe-instances --filters "Name=instance-state-name,Values=running" "Name=tag:Owner,Values=rke2-ansible-github-actions" "Name=tag:NodeType,Values=Agent" "Name=tag:github_run,Values=$GITHUB_RUN_ID" --query "Reservations[*].Instances[*].PublicIpAddress" --output text | head -1):" >> hosts.yml echo "all:" >> hosts.yml echo " vars:" >> hosts.yml - echo " kubernetes_api_server_host: $(aws ec2 describe-instances --filters "Name=instance-state-name,Values=running" "Name=tag:Owner,Values=rke2-ansible-github-actions" "Name=tag:NodeType,Values=Server" "Name=tag:github_run,Values=$GITHUB_RUN_ID" --query "Reservations[*].Instances[*].PrivateIpAddress" --output text | head -1)" >> hosts.yml + echo " rke2_kubernetes_api_server_host: $(aws ec2 describe-instances --filters "Name=instance-state-name,Values=running" "Name=tag:Owner,Values=rke2-ansible-github-actions" "Name=tag:NodeType,Values=Server" "Name=tag:github_run,Values=$GITHUB_RUN_ID" --query "Reservations[*].Instances[*].PrivateIpAddress" --output text | head -1)" >> hosts.yml echo "" >> ansible.cfg echo "" >> ansible.cfg echo "remote_user=ubuntu" >> ansible.cfg @@ -170,7 +170,7 @@ jobs: echo " $(aws ec2 describe-instances --filters "Name=instance-state-name,Values=running" "Name=tag:Owner,Values=rke2-ansible-github-actions" "Name=tag:NodeType,Values=ExtraNode" "Name=tag:github_run,Values=$GITHUB_RUN_ID" --query "Reservations[*].Instances[*].PublicIpAddress" --output text | head -1):" >> hosts.yml echo "all:" >> hosts.yml echo " vars:" >> hosts.yml - echo " kubernetes_api_server_host: $(aws ec2 describe-instances --filters "Name=instance-state-name,Values=running" "Name=tag:Owner,Values=rke2-ansible-github-actions" "Name=tag:NodeType,Values=Server" "Name=tag:github_run,Values=$GITHUB_RUN_ID" --query "Reservations[*].Instances[*].PrivateIpAddress" --output text | head -1)" >> hosts.yml + echo " rke2_kubernetes_api_server_host: $(aws ec2 describe-instances --filters "Name=instance-state-name,Values=running" "Name=tag:Owner,Values=rke2-ansible-github-actions" "Name=tag:NodeType,Values=Server" "Name=tag:github_run,Values=$GITHUB_RUN_ID" --query "Reservations[*].Instances[*].PrivateIpAddress" --output text | head -1)" >> hosts.yml cp hosts.yml inventory/ubuntu20/hosts.yml env: AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }} diff --git a/.gitignore b/.gitignore index 782a0c73..66226d30 100644 --- a/.gitignore +++ b/.gitignore @@ -2,12 +2,9 @@ .cache/ venv/ +.venv/ test_inventory* -rke2-images.linux-amd64.tar.gz -rke2.linux-amd64.tar.gz - - -tarball_install/* -!tarball_install/README.md \ No newline at end of file +sample_files/tarball_install/* +!sample_files/tarball_install/README.md \ No newline at end of file diff --git a/.yamllint b/.yamllint index c2321b0f..b2e05b7f 100644 --- a/.yamllint +++ b/.yamllint @@ -5,8 +5,6 @@ rules: line-length: max: 120 level: warning - truthy: - allowed-values: ['true', 'false', 'yes', 'no'] ignore: | .github/ diff --git a/README.md b/README.md index a6dc3363..d1789905 100644 --- a/README.md +++ b/README.md @@ -109,7 +109,7 @@ Kubeconfig To get access to your **Kubernetes** cluster just ```bash -ssh ec2-user@kubernetes_api_server_host "sudo /var/lib/rancher/rke2/bin/kubectl --kubeconfig /etc/rancher/rke2/rke2.yaml get nodes" +ssh ec2-user@rke2_kubernetes_api_server_host "sudo /var/lib/rancher/rke2/bin/kubectl --kubeconfig /etc/rancher/rke2/rke2.yaml get nodes" ``` Available configurations diff --git a/ansible.cfg b/ansible.cfg index 43a4415d..a351711f 100644 --- a/ansible.cfg +++ b/ansible.cfg @@ -12,3 +12,4 @@ host_key_checking = False deprecation_warnings = False callback_whitelist = profile_roles, timer display_skipped_hosts = no +force_handlers = True diff --git a/galaxy.yml b/galaxy.yml new file mode 100644 index 00000000..4e52df6b --- /dev/null +++ b/galaxy.yml @@ -0,0 +1,22 @@ +--- +namespace: rancherfederal +name: rke2_ansible +version: 1.0.0 +readme: README.md +authors: + - Rancher Government +description: Collection for rancherfederal/rke2-ansible + +license_file: 'LICENSE' + +tags: [infrastructure, linux, kubernetes, rancher, rke2] + +repository: https://github.com/rancherfederal/rke2-ansible +documentation: https://github.com/rancherfederal/rke2-ansible +homepage: https://github.com/rancherfederal/rke2-ansible +issues: https://github.com/rancherfederal/rke2-ansible/issues + +build_ignore: + - tarball_install/* + - testing + - .github diff --git a/inventory/sample/group_vars/rke2_agents.yml b/inventory/sample/group_vars/rke2_agents.yml index e9d13353..dd8c405d 100644 --- a/inventory/sample/group_vars/rke2_agents.yml +++ b/inventory/sample/group_vars/rke2_agents.yml @@ -7,4 +7,4 @@ rke2_config: {} # See https://docs.rke2.io/install/containerd_registry_configuration/ # Add a registry configuration file by specifying the file path on the control host -# registry_config_file_path: "{{ playbook_dir }}/sample_files/registries.yaml" +# rke2_registry_config_file_path: "{{ playbook_dir }}/sample_files/registries.yaml" diff --git a/inventory/sample/group_vars/rke2_servers.yml b/inventory/sample/group_vars/rke2_servers.yml index d451b625..40d7117e 100644 --- a/inventory/sample/group_vars/rke2_servers.yml +++ b/inventory/sample/group_vars/rke2_servers.yml @@ -36,11 +36,11 @@ rke2_config: {} # See https://kubernetes.io/docs/tasks/debug-application-cluster/audit/ # Add a policy configuration file by specifying the file path on the control host -# audit_policy_config_file_path: "{{ playbook_dir }}/sample_files/audit-policy.yaml" +# rke2_audit_policy_config_file_path: "{{ playbook_dir }}/sample_files/audit-policy.yaml" # See https://docs.rke2.io/install/containerd_registry_configuration/ # Add a registry configuration file by specifying the file path on the control host -# registry_config_file_path: "{{ playbook_dir }}/sample_files/registries.yaml" +# rke2_registry_config_file_path: "{{ playbook_dir }}/sample_files/registries.yaml" # See https://docs.rke2.io/helm/#automatically-deploying-manifests-and-helm-charts # Add manifest files by specifying the directory path on the control host @@ -50,4 +50,4 @@ rke2_config: {} # Available in RKE2 1.25+ # Add a pod security admission config file by specifying the file path on the control host # Requires config.yaml to include `- admission-control-config-file=/etc/rancher/rke2/pod-security-admission-config.yaml` in order for this to be honored -# pod_security_admission_config_file_path: "{{ playbook_dir }}/sample_files/pod-security-admission-config.yaml" +# rke2_pod_security_admission_config_file_path: "{{ playbook_dir }}/sample_files/pod-security-admission-config.yaml" diff --git a/inventory/sample/hosts.yml b/inventory/sample/hosts.yml index 56811651..82aeab26 100644 --- a/inventory/sample/hosts.yml +++ b/inventory/sample/hosts.yml @@ -1,13 +1,16 @@ --- all: vars: - install_rke2_version: v1.27.10+rke2r1 + rke2_install_version: v1.27.10+rke2r1 # # In air-gapped envs, it might be convenient to download the tar files from custom URLs - # rke2_tarball_url: https://github.com/rancher/rke2/releases/download/v1.26.15%2Brke2r1/rke2.linux-amd64.tar.gz + # rke2_install_tarball_url: https://github.com/rancher/rke2/releases/download/v1.26.15%2Brke2r1/rke2.linux-amd64.tar.gz # rke2_image_tar_urls: # - https://github.com/rancher/rke2/releases/download/v1.26.15%2Brke2r1/rke2-images-canal.linux-amd64.tar.zst # - https://github.com/rancher/rke2/releases/download/v1.26.15%2Brke2r1/rke2-images-core.linux-amd64.tar.zst + # Or specify a tarball that's been prestaged on the ansible control host + # rke2_binary_tarball: {{ inventory_dir }}/tarball/rke2.linux-amd64.tar.gz + rke2_cluster: children: rke2_servers: @@ -46,10 +49,10 @@ rke2_cluster: # write-kubeconfig-mode: "0640" # # See https://kubernetes.io/docs/tasks/debug-application-cluster/audit/ # # Add a policy configuration file by specifying the file path on the control host - # audit_policy_config_file_path: "{{ playbook_dir }}/sample_files/audit-policy.yaml" + # rke2_audit_policy_config_file_path: "{{ playbook_dir }}/sample_files/audit-policy.yaml" # # See https://docs.rke2.io/install/containerd_registry_configuration/ # # Add a registry configuration file by specifying the file path on the control host - # registry_config_file_path: "{{ playbook_dir }}/sample_files/registries.yaml" + # rke2_registry_config_file_path: "{{ playbook_dir }}/sample_files/registries.yaml" # # See https://docs.rke2.io/helm/#automatically-deploying-manifests-and-helm-charts # # Add manifest files by specifying the directory path on the control host # manifest_config_file_path: "{{ playbook_dir }}/sample_files/manifest/" diff --git a/roles/cluster_manifest/tasks/main.yml b/roles/cluster_manifest/tasks/main.yml deleted file mode 100644 index 4af88cc0..00000000 --- a/roles/cluster_manifest/tasks/main.yml +++ /dev/null @@ -1,13 +0,0 @@ ---- - -- name: Add cluster manifest addons files - ansible.builtin.copy: - src: "{{ cluster_manifest_config_file_path }}" - dest: "/var/lib/rancher/rke2/server/manifests/" - mode: '0640' - owner: root - group: root - when: - - inventory_hostname in groups['rke2_servers'][0] - - cluster_manifest_config_file_path is defined - - cluster_manifest_config_file_path | length > 0 diff --git a/roles/rke2/defaults/main.yml b/roles/rke2/defaults/main.yml new file mode 100644 index 00000000..ed16321c --- /dev/null +++ b/roles/rke2/defaults/main.yml @@ -0,0 +1,37 @@ +--- +rke2_kubernetes_api_server_host: "" +rke2_tarball_install_dir: "/usr/local" +rke2_install_local_tarball_path: "" +rke2_install_tarball_url: "" +rke2_images_urls: [] +rke2_images_local_tarball_path: [] +rke2_channel: "stable" +rke2_audit_policy_config_file_path: "" +rke2_registry_config_file_path: "" +rke2_pod_security_admission_config_file_path: "" +rke2_add_iptables_rules: false +rke2_manifest_config_directory: "" +rke2_manifest_config_post_run_directory: "" +rke2_force_tarball_install: false +rke2_install_version: "" +rke2_common_yum_repo: + name: rancher-rke2-common + description: "Rancher RKE2 Common Latest" + baseurl: "https://rpm.rancher.io/rke2/{{ rke2_channel }}/common/centos/$releasever/noarch" + gpgcheck: true + gpgkey: "https://rpm.rancher.io/public.key" + enabled: true + +rke2_versioned_yum_repo: + name: "rancher-rke2-v{{ rke2_version_majmin }}" # noqa jinja[spacing] + description: "Rancher RKE2 Version" + baseurl: "https://rpm.rancher.io/rke2/{{ rke2_channel }}/{{ rke2_version_majmin }}/centos/$releasever/$basearch" + gpgcheck: true + gpgkey: "https://rpm.rancher.io/public.key" + enabled: true +rke2_kubelet_node_name: + - "nodeNameNotFound" +rke2_config: {} +rke2_metrics_running: false +rke2_node_ready: "false" +rke2_api_server_running: false diff --git a/roles/rke2/handlers/main.yml b/roles/rke2/handlers/main.yml new file mode 100644 index 00000000..ac0f71cb --- /dev/null +++ b/roles/rke2/handlers/main.yml @@ -0,0 +1,47 @@ +--- + +- name: Restart systemd-sysctl + ansible.builtin.service: + state: restarted + name: systemd-sysctl + when: + - not rke2_reboot + +- name: Restart fapolicyd + ansible.builtin.service: + state: restarted + name: fapolicyd + when: + - not rke2_reboot + +- name: Restart rke2-server + ansible.builtin.service: + state: restarted + enabled: true + name: rke2-server + throttle: 1 + when: + - not rke2_reboot + +- name: Restart rke2-agent + ansible.builtin.service: + state: restarted + enabled: true + name: rke2-agent + throttle: 1 + when: + - not rke2_reboot + +- name: Reload NetworkManager + ansible.builtin.systemd: + name: NetworkManager + state: reloaded + when: + - not rke2_reboot + +- name: Reboot the machine + ansible.builtin.reboot: + reboot_timeout: 300 + throttle: 1 + when: + - rke2_reboot diff --git a/roles/rke2/molecule/README.md b/roles/rke2/molecule/README.md new file mode 100644 index 00000000..72bad79b --- /dev/null +++ b/roles/rke2/molecule/README.md @@ -0,0 +1,59 @@ +# Molecule Scenarios +| Scenario | Passing | +| ----------- | ------- | +| rocky-89 | False | +| rocky-94 | True | +| ubuntu-2404 | True | +| ubuntu-2204 | True | +| sles-15 | False | + +## template +As the name would imply this is a template scenario, no one is supposed to run this and it will not ever work. The purpose is to prevent other scenarios from having to rewrite or copy from one another, this also allows changes to be shared across all scenarios that are descendants of the template. + +## rocky-94 +The rocky-94 scenario is the simplest possible scenario, with a single Rocky 9.4 master node and a single Rocky 9.4 worker node. + +## rocky-89 +The rocky-89 scenario is the simplest possible scenario, with a single Rocky 8.9 master node and a single Rocky 8.9 worker node. + +## ubuntu-2404 +The ubuntu-2204 scenario is the simplest possible scenario, with a single Ubuntu 24.04 master node and a single Ubuntu 24.04 worker node. + +## ubuntu-2204 +The ubuntu-2404 scenario is the simplest possible scenario, with a single Ubuntu 22.04 master node and a single Ubuntu 22.04 worker node. + + +--- +# Development +## Required ENV Vars +| Name | Purpose | +| --------------------- | ------- | +| AWS_ACCESS_KEY_ID | Access to AWS | +| AWS_SECRET_ACCESS_KEY | Access to AWS | +| VPC_SUBNET_ID | Subnet to assign EC2s to | + +## Summary +The molecule test scenarios are based on the cookie cutter ec2 instance and require the molecule plugin here: [molecule-plugin](https://github.com/ansible-community/molecule-plugins), the pip3 `requirements.txt` can be found in this directory while the ansible specfic requirements will be installed automatically when running molecule as a part of the `requirements` stage. +As this is an ec2 based scenario an AWS account is needed, you will need to define the following variables either as environment variables or in your aws cli config file (`~/.aws/config`) + +``` +export AWS_ACCESS_KEY_ID="" +export AWS_SECRET_ACCESS_KEY="" +``` + +or +``` +[default] +aws_access_key_id= +aws_secret_access_key= +``` + +It is worth noting that the EC2 driver does not provide a way to login to EC2 instances, this needs to be done manually, your ssh key can be found in `~/.cache/molecule/rke2/default/id_rsa` and the default user is `ansible`, you will be able to login like so: +`ssh ansible@000.000.000.000 -i ~/.cache/molecule/rke2/default/id_rsa` note that the keys location is dependant on the scenario name. + +The `vpc_subnet_id` key has been removed as a defined variable and is pulled from the environment variable `VPC_SUBNET_ID`. Other than the AWS keys needed this is the only environment variable required. + +# To Do + - Add tests + - Ensure node labels are applied + - Ensure setting CIS profile works as expected diff --git a/roles/rke2/molecule/requirements.txt b/roles/rke2/molecule/requirements.txt new file mode 100644 index 00000000..60a88857 --- /dev/null +++ b/roles/rke2/molecule/requirements.txt @@ -0,0 +1,28 @@ +ansible-compat==24.7.0 +ansible-core==2.17.2 +attrs==23.2.0 +bracex==2.4 +cffi==1.16.0 +click==8.1.7 +click-help-colors==0.9.4 +cryptography==42.0.8 +enrich==1.2.7 +Jinja2==3.1.4 +jsonschema==4.23.0 +jsonschema-specifications==2023.12.1 +markdown-it-py==3.0.0 +MarkupSafe==2.1.5 +mdurl==0.1.2 +molecule==24.7.0 +molecule-plugins==23.5.3 +packaging==24.1 +pluggy==1.5.0 +pycparser==2.22 +Pygments==2.18.0 +PyYAML==6.0.1 +referencing==0.35.1 +resolvelib==1.0.1 +rich==13.7.1 +rpds-py==0.19.0 +subprocess-tee==0.4.2 +wcmatch==8.5.2 diff --git a/roles/rke2/molecule/rocky-89/molecule.yml b/roles/rke2/molecule/rocky-89/molecule.yml new file mode 100644 index 00000000..aacfede6 --- /dev/null +++ b/roles/rke2/molecule/rocky-89/molecule.yml @@ -0,0 +1,60 @@ +--- +driver: + name: ec2 + +platforms: + - name: master-01 + image: ami-02391db2758465a87 + instance_type: t2.medium + region: us-east-2 + assign_public_ip: true + tags: + deployed-with: "molecule" + molecule-scenario: "default" + groups: + - rke2_servers + - name: worker-01 + image: ami-02391db2758465a87 + instance_type: t2.medium + region: us-east-2 + assign_public_ip: true + tags: + deployed-with: "molecule" + molecule-scenario: "default" + groups: + - rke2_agents + +provisioner: + name: ansible + playbooks: + converge: ../template/converge.yml + create: ../template/create.yml + destroy: ../template/destroy.yml + requirements: ../template/requirements.yml + inventory: + hosts: + rke2_cluster: + children: + rke2_servers: + vars: + group_rke2_config: + node-label: + - serverGroupLabel=true + hosts: + master-01: + host_rke2_config: + node-label: + - host0Label=true + rke2_agents: + vars: + group_rke2_config: + node-label: + - agentGroupLabel=true + hosts: + worker-01: + host_rke2_config: + node-label: + - host1Label=true + +verifier: + name: ansible diff --git a/roles/rke2/molecule/rocky-94/molecule.yml b/roles/rke2/molecule/rocky-94/molecule.yml new file mode 100644 index 00000000..8b0808e3 --- /dev/null +++ b/roles/rke2/molecule/rocky-94/molecule.yml @@ -0,0 +1,60 @@ +--- +driver: + name: ec2 + +platforms: + - name: master-01 + image: ami-051a0f669bb174783 + instance_type: t2.medium + region: us-east-2 + assign_public_ip: true + tags: + deployed-with: "molecule" + molecule-scenario: "default" + groups: + - rke2_servers + - name: worker-01 + image: ami-051a0f669bb174783 + instance_type: t2.medium + region: us-east-2 + assign_public_ip: true + tags: + deployed-with: "molecule" + molecule-scenario: "default" + groups: + - rke2_agents + +provisioner: + name: ansible + playbooks: + converge: ../template/converge.yml + create: ../template/create.yml + destroy: ../template/destroy.yml + requirements: ../template/requirements.yml + inventory: + hosts: + rke2_cluster: + children: + rke2_servers: + vars: + group_rke2_config: + node-label: + - serverGroupLabel=true + hosts: + master-01: + host_rke2_config: + node-label: + - host0Label=true + rke2_agents: + vars: + group_rke2_config: + node-label: + - agentGroupLabel=true + hosts: + worker-01: + host_rke2_config: + node-label: + - host1Label=true + +verifier: + name: ansible diff --git a/roles/rke2/molecule/sles-15/molecule.yml b/roles/rke2/molecule/sles-15/molecule.yml new file mode 100644 index 00000000..8fd4ca6a --- /dev/null +++ b/roles/rke2/molecule/sles-15/molecule.yml @@ -0,0 +1,60 @@ +--- +driver: + name: ec2 + +platforms: + - name: master-01 + image: ami-05e760b0ec1a5588a + instance_type: t2.medium + region: us-east-2 + assign_public_ip: true + tags: + deployed-with: "molecule" + molecule-scenario: "default" + groups: + - rke2_servers + - name: worker-01 + image: ami-05e760b0ec1a5588a + instance_type: t2.medium + region: us-east-2 + assign_public_ip: true + tags: + deployed-with: "molecule" + molecule-scenario: "default" + groups: + - rke2_agents + +provisioner: + name: ansible + playbooks: + converge: ../template/converge.yml + create: ../template/create.yml + destroy: ../template/destroy.yml + requirements: ../template/requirements.yml + inventory: + hosts: + rke2_cluster: + children: + rke2_servers: + vars: + group_rke2_config: + node-label: + - serverGroupLabel=true + hosts: + master-01: + host_rke2_config: + node-label: + - host0Label=true + rke2_agents: + vars: + group_rke2_config: + node-label: + - agentGroupLabel=true + hosts: + worker-01: + host_rke2_config: + node-label: + - host1Label=true + +verifier: + name: ansible diff --git a/roles/rke2/molecule/template/converge.yml b/roles/rke2/molecule/template/converge.yml new file mode 100644 index 00000000..2c5f85ba --- /dev/null +++ b/roles/rke2/molecule/template/converge.yml @@ -0,0 +1,11 @@ +--- +- name: Converge + hosts: all + gather_facts: true + pre_tasks: + - name: Set api_server_host + ansible.builtin.set_fact: + rke2_kubernetes_api_server_host: "{{ hostvars[groups['rke2_servers'][0]].ansible_host }}" + roles: + - role: rke2 + become: true diff --git a/roles/rke2/molecule/template/create.yml b/roles/rke2/molecule/template/create.yml new file mode 100644 index 00000000..3008c936 --- /dev/null +++ b/roles/rke2/molecule/template/create.yml @@ -0,0 +1,333 @@ +--- +- name: Create + hosts: localhost + connection: local + gather_facts: false + no_log: "{{ molecule_no_log }}" + vars: + # Run config handling + default_run_id: "{{ lookup('password', '/dev/null chars=ascii_lowercase length=5') }}" + default_run_config: + run_id: "{{ default_run_id }}" + + run_config_path: "{{ lookup('env', 'MOLECULE_EPHEMERAL_DIRECTORY') }}/run-config.yml" + run_config_from_file: "{{ (lookup('file', run_config_path, errors='ignore') or '{}') | from_yaml }}" + run_config: '{{ default_run_config | combine(run_config_from_file) }}' + + # Platform settings handling + default_assign_public_ip: true + default_aws_profile: "{{ lookup('env', 'AWS_PROFILE') }}" + default_boot_wait_seconds: 120 + default_instance_type: t2.medium + default_key_inject_method: cloud-init # valid values: [cloud-init, ec2] + default_key_name: "molecule-{{ run_config.run_id }}" + default_private_key_path: "{{ lookup('env', 'MOLECULE_EPHEMERAL_DIRECTORY') }}/id_rsa" + default_public_key_path: "{{ default_private_key_path }}.pub" + default_ssh_user: ansible + default_ssh_port: 22 + default_user_data: '' + + default_security_group_name: "molecule-{{ run_config.run_id }}" + default_security_group_description: Ephemeral security group for Molecule instances + default_security_group_rules: + - proto: tcp + from_port: "{{ default_ssh_port }}" + to_port: "{{ default_ssh_port }}" + cidr_ip: "0.0.0.0/0" + - proto: icmp + from_port: 8 + to_port: -1 + cidr_ip: "0.0.0.0/0" + - proto: tcp + from_port: 9345 + to_port: 9345 + cidr_ip: "0.0.0.0/0" + - proto: tcp + from_port: 6443 + to_port: 6443 + cidr_ip: "0.0.0.0/0" + default_security_group_rules_egress: + - proto: -1 + from_port: 0 + to_port: 0 + cidr_ip: "0.0.0.0/0" + + platform_defaults: + assign_public_ip: "{{ default_assign_public_ip }}" + aws_profile: "{{ default_aws_profile }}" + boot_wait_seconds: "{{ default_boot_wait_seconds }}" + instance_type: "{{ default_instance_type }}" + key_inject_method: "{{ default_key_inject_method }}" + key_name: "{{ default_key_name }}" + private_key_path: "{{ default_private_key_path }}" + public_key_path: "{{ default_public_key_path }}" + security_group_name: "{{ default_security_group_name }}" + security_group_description: "{{ default_security_group_description }}" + security_group_rules: "{{ default_security_group_rules }}" + security_group_rules_egress: "{{ default_security_group_rules_egress }}" + ssh_user: "{{ default_ssh_user }}" + ssh_port: "{{ default_ssh_port }}" + cloud_config: {} + image: "" + image_name: "" + image_owner: [self] + name: "" + region: "" + security_groups: [] + tags: {} + volumes: [] + vpc_id: "" + vpc_subnet_id: "{{ lookup('env', 'VPC_SUBNET_ID') }}" + + # Merging defaults into a list of dicts is, it turns out, not straightforward + platforms: >- + {{ [platform_defaults | dict2items] + | product(molecule_yml.platforms | map('dict2items') | list) + | map('flatten', levels=1) + | list + | map('items2dict') + | list }} + pre_tasks: + - name: Validate platform configurations + ansible.builtin.assert: + that: + - platforms | length > 0 + - platform.name is string and platform.name | length > 0 + - platform.assign_public_ip is boolean + - platform.aws_profile is string + - platform.boot_wait_seconds is integer and platform.boot_wait_seconds >= 0 + - platform.cloud_config is mapping + - platform.image is string + - platform.image_name is string + - platform.image_owner is sequence or (platform.image_owner is string and platform.image_owner | length > 0) + - platform.instance_type is string and platform.instance_type | length > 0 + - platform.key_inject_method is in ["cloud-init", "ec2"] + - platform.key_name is string and platform.key_name | length > 0 + - platform.private_key_path is string and platform.private_key_path | length > 0 + - platform.public_key_path is string and platform.public_key_path | length > 0 + - platform.region is string + - platform.security_group_name is string and platform.security_group_name | length > 0 + - platform.security_group_description is string and platform.security_group_description | length > 0 + - platform.security_group_rules is sequence + - platform.security_group_rules_egress is sequence + - platform.security_groups is sequence + - platform.ssh_user is string and platform.ssh_user | length > 0 + - platform.ssh_port is integer and platform.ssh_port in range(1, 65536) + - platform.tags is mapping + - platform.volumes is sequence + - platform.vpc_id is string + - platform.vpc_subnet_id is string and platform.vpc_subnet_id | length > 0 + quiet: true + loop: '{{ platforms }}' + loop_control: + loop_var: platform + label: "{{ platform.name }}" + tasks: + - name: Write run config to file + ansible.builtin.copy: + dest: "{{ run_config_path }}" + content: "{{ run_config | to_yaml }}" + mode: "0600" + + - name: Generate local key pairs + community.crypto.openssh_keypair: + path: "{{ item.private_key_path }}" + type: rsa + size: 2048 + regenerate: never + backend: cryptography + private_key_format: pkcs1 + loop: "{{ platforms }}" + loop_control: + label: "{{ item.name }}" + register: local_keypairs + + - name: Look up EC2 AMI(s) by owner and name (if image not set) + amazon.aws.ec2_ami_info: + owners: "{{ item.image_owner }}" + filters: "{{ item.image_filters | default({}) | combine(image_name_map) }}" + vars: + image_name_map: "{% if item.image_name is defined and item.image_name | length > 0 %}{{ {'name': item.image_name} }}{% else %}{}{% endif %}" + loop: "{{ platforms }}" + loop_control: + label: "{{ item.name }}" + when: not item.image + register: ami_info + + - name: Look up subnets to determine VPCs (if needed) + amazon.aws.ec2_vpc_subnet_info: + subnet_ids: "{{ item.vpc_subnet_id }}" + loop: "{{ platforms }}" + loop_control: + label: "{{ item.name }}" + when: not item.vpc_id + register: subnet_info + + - name: Validate discovered information + ansible.builtin.assert: + that: + - platform.image or (ami_info.results[index].images | length > 0) + - platform.vpc_id or (subnet_info.results[index].subnets | length > 0) + quiet: true + loop: "{{ platforms }}" + loop_control: + loop_var: platform + index_var: index + label: "{{ platform.name }}" + + - name: Create ephemeral EC2 keys (if needed) + amazon.aws.ec2_key: + profile: "{{ item.aws_profile | default(omit) }}" + region: "{{ item.region | default(omit) }}" + name: "{{ item.key_name }}" + key_material: "{{ local_keypair.public_key }}" + vars: + local_keypair: "{{ local_keypairs.results[index] }}" + loop: "{{ platforms }}" + loop_control: + index_var: index + label: "{{ item.name }}" + when: item.key_inject_method == "ec2" + register: ec2_keys + + - name: Create ephemeral security groups (if needed) + amazon.aws.ec2_security_group: + profile: "{{ item.aws_profile | default(omit) }}" + iam_instance_profile: "{{ item.iam_instance_profile | default(omit) }}" + region: "{{ item.region | default(omit) }}" + vpc_id: "{{ item.vpc_id or vpc_subnet.vpc_id }}" + name: "{{ item.security_group_name }}" + description: "{{ item.security_group_description }}" + rules: "{{ item.security_group_rules }}" + rules_egress: "{{ item.security_group_rules_egress }}" + vars: + vpc_subnet: "{{ subnet_info.results[index].subnets[0] }}" + loop: "{{ platforms }}" + loop_control: + index_var: index + label: "{{ item.name }}" + when: item.security_groups | length == 0 + + - name: Create ephemeral EC2 instance(s) + amazon.aws.ec2_instance: + name: "{{ item.name }}" + profile: "{{ item.aws_profile | default(omit) }}" + region: "{{ item.region | default(omit) }}" + filters: "{{ platform_filters }}" + instance_type: "{{ item.instance_type }}" + image_id: "{{ platform_image_id }}" + vpc_subnet_id: "{{ item.vpc_subnet_id }}" + security_groups: "{{ platform_security_groups }}" + network: + assign_public_ip: "{{ item.assign_public_ip }}" + volumes: "{{ item.volumes }}" + key_name: "{{ (item.key_inject_method == 'ec2') | ternary(item.key_name, omit) }}" + tags: "{{ platform_tags }}" + user_data: "{{ platform_user_data }}" + state: "running" + wait: true + vars: + platform_security_groups: "{{ item.security_groups or [item.security_group_name] }}" + platform_generated_image_id: "{{ (ami_info.results[index].images | sort(attribute='creation_date', reverse=True))[0].image_id }}" + platform_image_id: "{{ item.image or platform_generated_image_id }}" + + platform_generated_cloud_config: + users: + - name: "{{ item.ssh_user }}" + ssh_authorized_keys: + - "{{ local_keypairs.results[index].public_key }}" + sudo: "ALL=(ALL) NOPASSWD:ALL" + platform_cloud_config: >- + {{ (item.key_inject_method == 'cloud-init') + | ternary((item.cloud_config | combine(platform_generated_cloud_config)), item.cloud_config) }} + platform_user_data: |- + #cloud-config + {{ platform_cloud_config | to_yaml }} + + platform_generated_tags: + instance: "{{ item.name }}" + molecule-run-id: "{{ run_config.run_id }}" + platform_tags: "{{ (item.tags or {}) | combine(platform_generated_tags) }}" + platform_filter_keys: "{{ platform_generated_tags.keys() | map('regex_replace', '^(.+)$', 'tag:\\1') }}" + platform_filters: "{{ dict(platform_filter_keys | zip(platform_generated_tags.values())) }}" + loop: "{{ platforms }}" + loop_control: + index_var: index + label: "{{ item.name }}" + register: ec2_instances_async + async: 7200 + poll: 0 + + - name: Instance boot block + when: ec2_instances_async is changed + block: + - name: Wait for instance creation to complete + ansible.builtin.async_status: + jid: "{{ item.ansible_job_id }}" + loop: "{{ ec2_instances_async.results }}" + loop_control: + index_var: index + label: "{{ platforms[index].name }}" + register: ec2_instances + until: ec2_instances is finished + retries: 300 + + - name: Collect instance configs + ansible.builtin.set_fact: + instance_config: + instance: "{{ item.name }}" + address: "{{ item.assign_public_ip | ternary(instance.public_ip_address, instance.private_ip_address) }}" + user: "{{ item.ssh_user }}" + port: "{{ item.ssh_port }}" + identity_file: "{{ item.private_key_path }}" + instance_ids: + - "{{ instance.instance_id }}" + vars: + instance: "{{ ec2_instances.results[index].instances[0] }}" + loop: "{{ platforms }}" + loop_control: + index_var: index + label: "{{ item.name }}" + register: instance_configs + + - name: Write Molecule instance configs + ansible.builtin.copy: + dest: "{{ molecule_instance_config }}" + content: >- + {{ instance_configs.results + | map(attribute='ansible_facts.instance_config') + | list + | to_json + | from_json + | to_yaml }} + mode: "0600" + + - name: Start SSH pollers + ansible.builtin.wait_for: + host: "{{ item.address }}" + port: "{{ item.port }}" + search_regex: SSH + delay: 10 + timeout: 320 + loop: "{{ instance_configs.results | map(attribute='ansible_facts.instance_config') | list }}" + loop_control: + label: "{{ item.instance }}" + register: ssh_wait_async + async: 300 + poll: 0 + + - name: Wait for SSH + ansible.builtin.async_status: + jid: "{{ item.ansible_job_id }}" + loop: "{{ ssh_wait_async.results }}" + loop_control: + index_var: index + label: "{{ platforms[index].name }}" + register: ssh_wait + until: ssh_wait is finished + retries: 300 + delay: 1 + + - name: Wait for boot process to finish + ansible.builtin.pause: + seconds: "{{ platforms | map(attribute='boot_wait_seconds') | max }}" diff --git a/roles/rke2/molecule/template/destroy.yml b/roles/rke2/molecule/template/destroy.yml new file mode 100644 index 00000000..5ec0eaf4 --- /dev/null +++ b/roles/rke2/molecule/template/destroy.yml @@ -0,0 +1,143 @@ +--- +- name: Destroy + hosts: localhost + connection: local + gather_facts: false + no_log: "{{ molecule_no_log }}" + vars: + # Run config handling + default_run_id: "{{ lookup('password', '/dev/null chars=ascii_lowercase length=5') }}" + default_run_config: + run_id: "{{ default_run_id }}" + + run_config_path: "{{ lookup('env', 'MOLECULE_EPHEMERAL_DIRECTORY') }}/run-config.yml" + run_config_from_file: "{{ (lookup('file', run_config_path, errors='ignore') or '{}') | from_yaml }}" + run_config: '{{ default_run_config | combine(run_config_from_file) }}' + + # Platform settings handling + default_aws_profile: "{{ lookup('env', 'AWS_PROFILE') }}" + default_key_inject_method: cloud-init # valid values: [cloud-init, ec2] + default_key_name: "molecule-{{ run_config.run_id }}" + default_security_group_name: "molecule-{{ run_config.run_id }}" + + platform_defaults: + aws_profile: "{{ default_aws_profile }}" + key_inject_method: "{{ default_key_inject_method }}" + key_name: "{{ default_key_name }}" + region: "" + security_group_name: "{{ default_security_group_name }}" + security_groups: [] + vpc_id: "" + vpc_subnet_id: "{{ lookup('env', 'VPC_SUBNET_ID') }}" + + # Merging defaults into a list of dicts is, it turns out, not straightforward + platforms: >- + {{ [platform_defaults | dict2items] + | product(molecule_yml.platforms | map('dict2items') | list) + | map('flatten', levels=1) + | list + | map('items2dict') + | list }} + + # Stored instance config + instance_config: "{{ (lookup('file', molecule_instance_config, errors='ignore') or '{}') | from_yaml }}" + pre_tasks: + - name: Validate platform configurations + ansible.builtin.assert: + that: + - platforms | length > 0 + - platform.name is string and platform.name | length > 0 + - platform.aws_profile is string + - platform.key_inject_method is in ["cloud-init", "ec2"] + - platform.key_name is string and platform.key_name | length > 0 + - platform.region is string + - platform.security_group_name is string and platform.security_group_name | length > 0 + - platform.security_groups is sequence + - platform.vpc_id is string + - platform.vpc_subnet_id is string and platform.vpc_subnet_id | length > 0 + quiet: true + loop: '{{ platforms }}' + loop_control: + loop_var: platform + label: "{{ platform.name }}" + tasks: + - name: Look up subnets to determine VPCs (if needed) + amazon.aws.ec2_vpc_subnet_info: + profile: "{{ item.aws_profile | default(omit) }}" + region: "{{ item.region | default(omit) }}" + subnet_ids: "{{ item.vpc_subnet_id }}" + loop: "{{ platforms }}" + loop_control: + label: "{{ item.name }}" + when: not item.vpc_id + register: subnet_info + + - name: Validate discovered information + ansible.builtin.assert: + that: platform.vpc_id or (subnet_info.results[index].subnets | length > 0) + quiet: true + loop: "{{ platforms }}" + loop_control: + loop_var: platform + index_var: index + label: "{{ platform.name }}" + + - name: Destroy resources + when: instance_config | length != 0 + block: + - name: Destroy ephemeral EC2 instances + amazon.aws.ec2_instance: + profile: "{{ item.aws_profile | default(omit) }}" + region: "{{ item.region | default(omit) }}" + instance_ids: "{{ instance_config | map(attribute='instance_ids') | flatten }}" + vpc_subnet_id: "{{ item.vpc_subnet_id }}" + state: absent + loop: "{{ platforms }}" + loop_control: + label: "{{ item.name }}" + register: ec2_instances_async + async: 7200 + poll: 0 + + - name: Wait for instance destruction to complete + ansible.builtin.async_status: + jid: "{{ item.ansible_job_id }}" + loop: "{{ ec2_instances_async.results }}" + loop_control: + index_var: index + label: "{{ platforms[index].name }}" + register: ec2_instances + until: ec2_instances is finished + retries: 300 + + - name: Write Molecule instance configs + ansible.builtin.copy: + dest: "{{ molecule_instance_config }}" + content: "{{ {} | to_yaml }}" + + - name: Destroy ephemeral security groups (if needed) + amazon.aws.ec2_security_group: + profile: "{{ item.aws_profile | default(omit) }}" + region: "{{ item.region | default(omit) }}" + vpc_id: "{{ item.vpc_id or vpc_subnet.vpc_id }}" + name: "{{ item.security_group_name }}" + state: absent + vars: + vpc_subnet: "{{ subnet_info.results[index].subnets[0] }}" + loop: "{{ platforms }}" + loop_control: + index_var: index + label: "{{ item.name }}" + when: item.security_groups | length == 0 + + - name: Destroy ephemeral keys (if needed) + amazon.aws.ec2_key: + profile: "{{ item.aws_profile | default(omit) }}" + region: "{{ item.region | default(omit) }}" + name: "{{ item.key_name }}" + state: absent + loop: "{{ platforms }}" + loop_control: + index_var: index + label: "{{ item.name }}" + when: item.key_inject_method == "ec2" diff --git a/roles/rke2/molecule/template/requirements.yml b/roles/rke2/molecule/template/requirements.yml new file mode 100644 index 00000000..35a10503 --- /dev/null +++ b/roles/rke2/molecule/template/requirements.yml @@ -0,0 +1,5 @@ +--- +collections: + - name: ansible.utils + - name: amazon.aws + - name: community.crypto diff --git a/roles/rke2/molecule/ubuntu-2204/molecule.yml b/roles/rke2/molecule/ubuntu-2204/molecule.yml new file mode 100644 index 00000000..96dddaa1 --- /dev/null +++ b/roles/rke2/molecule/ubuntu-2204/molecule.yml @@ -0,0 +1,60 @@ +--- +driver: + name: ec2 + +platforms: + - name: master-01 + image: ami-0677b91957321ed76 + instance_type: t2.medium + region: us-east-2 + assign_public_ip: true + tags: + deployed-with: "molecule" + molecule-scenario: "default" + groups: + - rke2_servers + - name: worker-01 + image: ami-0677b91957321ed76 + instance_type: t2.medium + region: us-east-2 + assign_public_ip: true + tags: + deployed-with: "molecule" + molecule-scenario: "default" + groups: + - rke2_agents + +provisioner: + name: ansible + playbooks: + converge: ../template/converge.yml + create: ../template/create.yml + destroy: ../template/destroy.yml + requirements: ../template/requirements.yml + inventory: + hosts: + rke2_cluster: + children: + rke2_servers: + vars: + group_rke2_config: + node-label: + - serverGroupLabel=true + hosts: + master-01: + host_rke2_config: + node-label: + - host0Label=true + rke2_agents: + vars: + group_rke2_config: + node-label: + - agentGroupLabel=true + hosts: + worker-01: + host_rke2_config: + node-label: + - host1Label=true + +verifier: + name: ansible diff --git a/roles/rke2/molecule/ubuntu-2404/molecule.yml b/roles/rke2/molecule/ubuntu-2404/molecule.yml new file mode 100644 index 00000000..dea82735 --- /dev/null +++ b/roles/rke2/molecule/ubuntu-2404/molecule.yml @@ -0,0 +1,60 @@ +--- +driver: + name: ec2 + +platforms: + - name: master-01 + image: ami-0862be96e41dcbf74 + instance_type: t2.medium + region: us-east-2 + assign_public_ip: true + tags: + deployed-with: "molecule" + molecule-scenario: "default" + groups: + - rke2_servers + - name: worker-01 + image: ami-0862be96e41dcbf74 + instance_type: t2.medium + region: us-east-2 + assign_public_ip: true + tags: + deployed-with: "molecule" + molecule-scenario: "default" + groups: + - rke2_agents + +provisioner: + name: ansible + playbooks: + converge: ../template/converge.yml + create: ../template/create.yml + destroy: ../template/destroy.yml + requirements: ../template/requirements.yml + inventory: + hosts: + rke2_cluster: + children: + rke2_servers: + vars: + group_rke2_config: + node-label: + - serverGroupLabel=true + hosts: + master-01: + host_rke2_config: + node-label: + - host0Label=true + rke2_agents: + vars: + group_rke2_config: + node-label: + - agentGroupLabel=true + hosts: + worker-01: + host_rke2_config: + node-label: + - host1Label=true + +verifier: + name: ansible diff --git a/roles/rke2/tasks/NOT_USED_cluster_state.yml b/roles/rke2/tasks/NOT_USED_cluster_state.yml new file mode 100644 index 00000000..130fe3d2 --- /dev/null +++ b/roles/rke2/tasks/NOT_USED_cluster_state.yml @@ -0,0 +1,66 @@ +--- + +- name: Check for existing cluster + when: + - rke2_running is defined + - rke2_running + block: + - name: Check for node-token (existing cluster) + ansible.builtin.stat: + path: /var/lib/rancher/rke2/server/node-token + register: node_token_tmp + + - name: Read node-token (existing cluster) + ansible.builtin.slurp: + src: /var/lib/rancher/rke2/server/node-token + register: rke2_config_token_tmp + when: + - node_token_tmp.stat.exists + + - name: Set node-token fact (existing cluster) + ansible.builtin.set_fact: + rke2_config_token: "{{ rke2_config_token_tmp.content | b64decode | regex_replace('\n', '') }}" + when: + - "rke2_config_token_tmp.content is defined" + - rke2_config_token_tmp.content | length != 0 + + - name: Set node-token fact on all hosts (existing cluster) + ansible.builtin.set_fact: + rke2_config_token: "{{ hostvars[item]['rke2_config_token'] }}" + delegate_to: localhost + run_once: true + loop: "{{ groups['all'] }}" + when: "hostvars[item]['rke2_config_token'] is defined" + + - name: Debug found token + ansible.builtin.debug: + msg: "rke2_config_token: {{ rke2_config_token }}" + when: rke2_config_token != "" + + - name: Read host with token (existing cluster) + ansible.builtin.set_fact: + existing_join_host: "{{ ansible_hostname }}" + when: + - node_token_tmp.stat.exists + + - name: Set join server fact on all hosts (existing cluster) + ansible.builtin.set_fact: + rke2_kubernetes_api_server_host: "{{ hostvars[item]['existing_join_host'] }}" + delegate_to: localhost + run_once: true + loop: "{{ groups['all'] }}" + when: + - "hostvars[item]['existing_join_host'] is defined" + - hostvars[item]['rke2_kubernetes_api_server_host'] == "" + vars: + rke2_kubernetes_api_server_host: "{{ existing_join_host | default('') }}" + +- name: No existing cluster found and api server not set + ansible.builtin.set_fact: + rke2_kubernetes_api_server_host: "{{ hostvars[groups['rke2_servers'][0]].inventory_hostname }}" + when: + - rke2_kubernetes_api_server_host == "" + +- name: Debug found join_server + ansible.builtin.debug: + msg: "Join Server: {{ rke2_kubernetes_api_server_host }}" diff --git a/roles/rke2/tasks/add_ansible_managed_config.yml b/roles/rke2/tasks/add_ansible_managed_config.yml new file mode 100644 index 00000000..cb07f931 --- /dev/null +++ b/roles/rke2/tasks/add_ansible_managed_config.yml @@ -0,0 +1,37 @@ +--- +- name: "Add {{ file_description }} file" # noqa name[template] + ansible.builtin.template: + src: ansible_managed_yaml.j2 + dest: "{{ file_destination }}" + mode: '0640' + owner: root + group: root + when: + - file_path | default("") | length != 0 + notify: "Restart {{ service_name }}" + +- name: "Remove {{ file_description }} file" # noqa name[template] + when: + - file_path | default("") | length == 0 + block: + - name: "Check that the {{ file_description }} file exists" # noqa name[template] + ansible.builtin.stat: + path: "{{ file_destination }}" + register: stat_result + + - name: "Check that the {{ file_description }} config file has ansible managed comments" # noqa name[template] + ansible.builtin.lineinfile: + name: "{{ file_destination }}" + line: '## This is an Ansible managed file, contents will be overwritten ##' + state: present + check_mode: true + register: ansible_managed_check + when: stat_result.stat.exists | bool is true + + - name: "Remove the {{ file_description }} file if exists and has ansible managed comments" # noqa name[template] + ansible.builtin.file: + path: "{{ file_destination }}" + state: absent + when: + - ansible_managed_check.changed | bool is false # noqa no-handler + notify: "Restart {{ service_name }}" diff --git a/roles/rke2/tasks/add_manifest_addons.yml b/roles/rke2/tasks/add_manifest_addons.yml new file mode 100644 index 00000000..e8421971 --- /dev/null +++ b/roles/rke2/tasks/add_manifest_addons.yml @@ -0,0 +1,35 @@ +--- + +- name: Look up manifest files on localhost + ansible.builtin.find: + paths: "{{ source_directory }}" + register: local_files_find_return + delegate_to: localhost + +- name: Create array of managed files + ansible.builtin.set_fact: + managed_files: "{{ local_files_find_return.files | map(attribute='path') | map('basename') }}" + +- name: Add manifest addons files from localhost + ansible.builtin.copy: + src: "{{ source_directory | regex_replace('\\/$', '') }}/" + dest: "{{ destination_directory }}" + mode: '0640' + owner: root + group: root + +- name: Look up manifest files on remote + ansible.builtin.find: + paths: "{{ destination_directory }}" + register: remote_files_find_return + +- name: Create array of remote files + ansible.builtin.set_fact: + current_files: "{{ remote_files_find_return.files | map(attribute='path') | map('basename') }}" + +- name: Remove remote files not in managed files list + ansible.builtin.file: + path: "{{ destination_directory }}/{{ item }}" + state: absent + with_items: "{{ current_files }}" + when: item not in managed_files diff --git a/roles/rke2/tasks/calculate_rke2_version.yml b/roles/rke2/tasks/calculate_rke2_version.yml new file mode 100644 index 00000000..7c0a939d --- /dev/null +++ b/roles/rke2/tasks/calculate_rke2_version.yml @@ -0,0 +1,93 @@ +--- + +- name: "Determine latest version from internet" + when: + - rke2_install_version | length == 0 + - '"rpm.rancher.io" in rke2_versioned_yum_repo.baseurl' + - rke2_install_local_tarball_path == "" + - rke2_install_tarball_url == "" + block: + + - name: Get versions from update.rke2.io + ansible.builtin.uri: + url: https://update.rke2.io/v1-release/channels/{{ rke2_channel }} + follow_redirects: safe + remote_src: true + register: rke2_version_url + + - name: Save version + ansible.builtin.shell: set -o pipefail && echo {{ rke2_version_url.url }} | sed -e 's|.*/||' + register: rke2_full_version + changed_when: false + args: + executable: /usr/bin/bash + + - name: Set rke2_full_version fact from internet source + ansible.builtin.set_fact: + rke2_full_version: "{{ rke2_full_version.stdout }}" + +- name: Unset rke2_full_version if skipped + ansible.builtin.set_fact: + rke2_full_version: "" + when: + - rke2_full_version.skipped is defined + - rke2_full_version is skipped + +# - name: Set rke2_full_version fact +# ansible.builtin.set_fact: +# rke2_full_version: "{{ rke2_full_version.stdout if (install_rke2_version | length == 0) else install_rke2_version }}" + +- name: Set rke2_full_version fact from variable source + ansible.builtin.set_fact: + rke2_full_version: "{{ rke2_install_version }}" + when: + - rke2_install_version | length > 0 + +- name: Set rke2_package_state to latest + ansible.builtin.set_fact: + rke2_package_state: "latest" + when: + - rke2_full_version | length == 0 + +- name: "Set install version for RPM" + when: + - install_method == "rpm" + - rke2_full_version | length > 0 + block: + + - name: Set Maj.Min version + ansible.builtin.shell: + cmd: set -o pipefail && echo {{ rke2_full_version }} | /bin/awk -F'.' '{ print $1"."$2 }' | sed "s|^v||g" + register: rke2_version_majmin_tmp + changed_when: false + args: + executable: /usr/bin/bash + + - name: Set rke2_version_majmin fact + ansible.builtin.set_fact: + rke2_version_majmin: "{{ rke2_version_majmin_tmp.stdout }}" + + - name: Set RPM version + ansible.builtin.shell: + cmd: set -o pipefail && echo {{ rke2_full_version }} | sed -E -e "s/[\+-]/~/g" | sed -E -e "s/v(.*)/\1/" + register: rke2_version_rpm_tmp + changed_when: false + args: + executable: /usr/bin/bash + + - name: Set rke2_version_rpm fact + ansible.builtin.set_fact: + rke2_version_rpm: "{{ rke2_version_rpm_tmp.stdout }}" + + - name: Prepend 'dash' to version string + ansible.builtin.set_fact: + rke2_version_rpm: "{{ '-' + rke2_version_rpm }}" + when: + - rke2_version_rpm | length > 0 + +# - name: Describe versions +# ansible.builtin.debug: +# msg: +# - "Full version, with revision indication: {{ rke2_full_version }}" +# - "Major and Minor Only: {{ rke2_version_majmin }}" +# - "RPM Version (tilde): {{ rke2_version_rpm }}" diff --git a/roles/rke2/tasks/check_node_ready.yml b/roles/rke2/tasks/check_node_ready.yml new file mode 100644 index 00000000..e543852d --- /dev/null +++ b/roles/rke2/tasks/check_node_ready.yml @@ -0,0 +1,80 @@ +--- + +- name: Wait for k8s apiserver + ansible.builtin.wait_for: + host: localhost + port: "6443" + state: present + timeout: "{{ check_node_ready_timeout }}" + changed_when: false + register: api_serve_status + ignore_errors: "{{ check_node_ready_ignore_errors }}" + +- name: Set fact + ansible.builtin.set_fact: + rke2_api_server_running: true + when: + - api_serve_status.state is not undefined + - api_serve_status.state == "present" + +- name: Set fact + ansible.builtin.set_fact: + rke2_api_server_running: "{{ rke2_api_server_running }}" + +- name: Get node_metrics + ansible.builtin.uri: + url: https://localhost:10250/metrics + return_content: true + ca_path: /var/lib/rancher/rke2/server/tls/server-ca.crt + client_cert: /var/lib/rancher/rke2/server/tls/client-admin.crt + client_key: /var/lib/rancher/rke2/server/tls/client-admin.key + register: node_metrics + retries: "{{ check_node_ready_retries }}" + delay: "{{ check_node_ready_delay }}" + ignore_errors: "{{ check_node_ready_ignore_errors }}" + +- name: Check that node_metrics collection was successful + ansible.builtin.set_fact: + rke2_metrics_running: true + when: + - 200 | string in node_metrics.status | string + +- name: Set fact for rke2_metrics_running + ansible.builtin.set_fact: + rke2_metrics_running: "{{ rke2_metrics_running }}" + +- name: Extract the kubelet_node_name from node metrics + ansible.builtin.set_fact: + kubelet_node_name: "{{ node_metrics.content | regex_search('kubelet_node_name{node=\"(.*)\"}', '\\1') }}" + when: + - 200 | string in node_metrics.status | string + +- name: Wait for node to show Ready status + ansible.builtin.command: >- + /var/lib/rancher/rke2/bin/kubectl --kubeconfig /etc/rancher/rke2/rke2.yaml + --server https://127.0.0.1:6443 get no {{ kubelet_node_name[0] }} + -o jsonpath='{.status.conditions[?(@.type=="Ready")].status}' + register: status_result + until: status_result.stdout.find("True") != -1 + retries: "{{ check_node_ready_retries }}" + delay: "{{ check_node_ready_delay }}" + changed_when: false + ignore_errors: "{{ check_node_ready_ignore_errors }}" + +- name: Set fact + ansible.builtin.set_fact: + rke2_node_ready: "true" + when: + - status_result.rc is not undefined + - status_result.rc | string == "0" + +- name: Set fact + ansible.builtin.set_fact: + rke2_node_ready: "{{ rke2_node_ready }}" + +- name: Node status + ansible.builtin.debug: + msg: | + "rke2_node_ready: {{ rke2_node_ready }}" + "rke2_metrics_running: {{ rke2_metrics_running }}" + "rke2_api_server_running: {{ rke2_api_server_running }}" diff --git a/roles/rke2_common/tasks/cis-hardening.yml b/roles/rke2/tasks/cis_hardening.yml similarity index 63% rename from roles/rke2_common/tasks/cis-hardening.yml rename to roles/rke2/tasks/cis_hardening.yml index 67a12bb6..b2d194b2 100644 --- a/roles/rke2_common/tasks/cis-hardening.yml +++ b/roles/rke2/tasks/cis_hardening.yml @@ -1,9 +1,12 @@ --- + - name: CIS MODE - become: yes - when: rke2_config.profile | default("") | regex_search('^cis(-\\d+.\\d+)?$') + become: true + when: + - (cluster_rke2_config.profile | default("") | regex_search('^cis(-\\d+.\\d+)?$')) or + (group_rke2_config.profile | default("") | regex_search('^cis(-\\d+.\\d+)?$')) or + (host_rke2_config.profile | default("") | regex_search('^cis(-\\d+.\\d+)?$')) block: - - name: Create etcd group ansible.builtin.group: name: etcd @@ -25,35 +28,32 @@ mode: 0600 register: sysctl_operation_yum when: - - ansible_os_family == 'RedHat' or ansible_os_family == 'Rocky' - - not rke2_binary_tarball_check.stat.exists - - rke2_tarball_url is not defined or rke2_tarball_url == "" + - install_method == "rpm" + notify: + - Restart systemd-sysctl + - "Restart {{ service_name }}" + - Reboot the machine - name: Copy systemctl file for kernel hardening for non-yum installs ansible.builtin.copy: src: /usr/local/share/rke2/rke2-cis-sysctl.conf dest: /etc/sysctl.d/60-rke2-cis.conf - remote_src: yes + remote_src: true mode: 0600 register: sysctl_operation_tarball - when: >- - (ansible_facts['os_family'] != 'RedHat' and - ansible_facts['os_family'] != 'Rocky') or - rke2_binary_tarball_check.stat.exists or - (rke2_tarball_url is defined and rke2_tarball_url != "") - - - name: Restart systemd-sysctl - ansible.builtin.service: - state: restarted - name: systemd-sysctl - when: sysctl_operation_yum.changed or sysctl_operation_tarball.changed + when: + - install_method == "tarball" + notify: + - Restart systemd-sysctl + - "Restart {{ service_name }}" + - Reboot the machine # Per CIS hardening guide, if Kubernetes is already running, making changes to sysctl can result in unexpected # side-effects. Rebooting node if RKE2 is already running to prevent potential issues whereas before we were # always rebooting, even if the node was brand new and RKE2 not running yet. - name: Reboot the machine (Wait for 5 min) - ansible.builtin.reboot: - reboot_timeout: 300 + ansible.builtin.set_fact: + rke2_reboot: true when: - (sysctl_operation_yum.changed or sysctl_operation_tarball.changed) - rke2_running is defined diff --git a/roles/rke2/tasks/config.yml b/roles/rke2/tasks/config.yml new file mode 100644 index 00000000..ace77c77 --- /dev/null +++ b/roles/rke2/tasks/config.yml @@ -0,0 +1,20 @@ +--- + +# combine host and group vars to form primary rke2_config +- name: Combine host and group config vars + ansible.builtin.set_fact: + temp_group_rke2_config: "{{ cluster_rke2_config | default({}) | ansible.builtin.combine((group_rke2_config | default({})), list_merge='prepend_rp') }}" + +# combine host and group vars to form primary rke2_config +- name: Combine host and group config vars + ansible.builtin.set_fact: + rke2_config: "{{ temp_group_rke2_config | default({}) | ansible.builtin.combine((host_rke2_config | default({})), list_merge='prepend_rp') }}" + +# write final config +- name: Create config.yaml + ansible.builtin.blockinfile: + path: /etc/rancher/rke2/config.yaml + block: "{{ rke2_config | to_nice_yaml(indent=0) }}" + create: true + mode: "0640" + notify: Restart {{ service_name }} diff --git a/roles/rke2/tasks/configure_rke2.yml b/roles/rke2/tasks/configure_rke2.yml new file mode 100644 index 00000000..5673884c --- /dev/null +++ b/roles/rke2/tasks/configure_rke2.yml @@ -0,0 +1,38 @@ +--- + +- name: Create the /etc/rancher/rke2 config dir + ansible.builtin.file: + path: /etc/rancher/rke2 + state: directory + recurse: true + +- name: Run CIS-Hardening Tasks + ansible.builtin.include_tasks: cis_hardening.yml + +- name: "Include task file add_ansible_managed_config.yml for {{ file_description }}" + ansible.builtin.include_tasks: add_ansible_managed_config.yml + vars: + file_contents: "{{ lookup('file', rke2_registry_config_file_path) }}" + file_destination: "/etc/rancher/rke2/registries.yaml" + file_description: "registry configuration" + file_path: "{{ rke2_registry_config_file_path }}" + +- name: "Include task file add_ansible_managed_config.yml for {{ file_description }}" + ansible.builtin.include_tasks: add_ansible_managed_config.yml + vars: + file_contents: "{{ lookup('file', rke2_audit_policy_config_file_path) }}" + file_destination: "/etc/rancher/rke2/audit-policy.yaml" + file_description: "audit policy configuration" + file_path: "{{ rke2_audit_policy_config_file_path }}" + when: + - inventory_hostname in groups['rke2_servers'] + +- name: "Include task file add_ansible_managed_config.yml for {{ file_description }}" + ansible.builtin.include_tasks: add_ansible_managed_config.yml + vars: + file_contents: "{{ lookup('file', rke2_pod_security_admission_config_file_path) }}" + file_destination: "/etc/rancher/rke2/pod-security-admission-config.yaml" + file_description: "pod security admission config" + file_path: "{{ rke2_pod_security_admission_config_file_path }}" + when: + - inventory_hostname in groups['rke2_servers'] diff --git a/roles/rke2/tasks/first_server.yml b/roles/rke2/tasks/first_server.yml new file mode 100644 index 00000000..c126799f --- /dev/null +++ b/roles/rke2/tasks/first_server.yml @@ -0,0 +1,24 @@ +--- + +- name: Include task file config.yml + ansible.builtin.include_tasks: config.yml + +- name: Flush_handlers + ansible.builtin.meta: flush_handlers + +- name: Ensure rke2 is running + ansible.builtin.service: + state: started + enabled: true + name: "{{ service_name }}" + +- name: Check_node_ready + any_errors_fatal: true + block: + - name: Start check_node_ready.yml + ansible.builtin.include_tasks: check_node_ready.yml + vars: + check_node_ready_timeout: 300 + check_node_ready_retries: 30 + check_node_ready_delay: 10 + check_node_ready_ignore_errors: false diff --git a/roles/rke2/tasks/images_bundle.yml b/roles/rke2/tasks/images_bundle.yml new file mode 100644 index 00000000..9b069f25 --- /dev/null +++ b/roles/rke2/tasks/images_bundle.yml @@ -0,0 +1,28 @@ +--- + +- name: Create images directory + ansible.builtin.file: + path: /var/lib/rancher/rke2/agent/images + state: directory + mode: '0644' + +- name: Download images tar files url + ansible.builtin.get_url: + url: "{{ item }}" + dest: "/var/lib/rancher/rke2/agent/images" + mode: "0644" + when: + - rke2_images_urls != [] + with_items: "{{ rke2_images_urls }}" + notify: "Restart {{ service_name }}" + +- name: Copy local tarball images + ansible.builtin.copy: + src: "{{ item }}" + dest: /var/lib/rancher/rke2/agent/images/ + mode: '0644' + with_items: + - "{{ rke2_images_local_tarball_path }}" + when: + - rke2_images_local_tarball_path != [] + notify: "Restart {{ service_name }}" diff --git a/roles/rke2_common/tasks/iptables_rules.yml b/roles/rke2/tasks/iptables_rules.yml similarity index 100% rename from roles/rke2_common/tasks/iptables_rules.yml rename to roles/rke2/tasks/iptables_rules.yml diff --git a/roles/rke2/tasks/main.yml b/roles/rke2/tasks/main.yml new file mode 100644 index 00000000..07cdbc18 --- /dev/null +++ b/roles/rke2/tasks/main.yml @@ -0,0 +1,135 @@ +--- + +- name: Populate service facts + ansible.builtin.service_facts: {} + +- name: Gather the package facts + ansible.builtin.package_facts: + manager: auto + +- name: Set for install method of tarball + ansible.builtin.set_fact: + install_method: tarball + when: |- + ((ansible_facts['os_family'] != 'RedHat' and ansible_facts['os_family'] != 'Rocky') or + rke2_install_tarball_url != "" or + rke2_install_local_tarball_path != "" or + rke2_force_tarball_install|bool) + +- name: Set for install method of rpm + ansible.builtin.set_fact: + install_method: rpm + when: + - ansible_os_family == 'RedHat' or ansible_os_family == 'Rocky' + - rke2_install_local_tarball_path == "" + - rke2_install_tarball_url == "" + - not rke2_force_tarball_install|bool + +- name: Set as server + ansible.builtin.set_fact: + service_name: rke2-server + when: + - inventory_hostname in groups['rke2_servers'] + +- name: Set as agent + ansible.builtin.set_fact: + service_name: rke2-agent + when: + - inventory_hostname in groups.get('rke2_agents', []) + +- name: Satisfy OS Pre-Reqs + ansible.builtin.include_tasks: pre_reqs.yml + +- name: Has rke2 been installed already + ansible.builtin.include_tasks: previous_install.yml + +- name: Check for images bundle + ansible.builtin.include_tasks: images_bundle.yml + when: + - rke2_images_urls != [] or + rke2_images_local_tarball_path != [] + +- name: Determine rke2_version to install + ansible.builtin.include_tasks: calculate_rke2_version.yml + when: + - rke2_install_local_tarball_path == "" + - rke2_install_tarball_url == "" + +- name: Start check_node_ready.yml + ansible.builtin.include_tasks: check_node_ready.yml + vars: + check_node_ready_timeout: 2 + check_node_ready_retries: 2 + check_node_ready_delay: 2 + check_node_ready_ignore_errors: true + when: + - inventory_hostname in groups['rke2_servers'] + +- name: Create a list of ready servers + ansible.builtin.set_fact: + ready_servers: "{{ groups.rke2_servers | map('extract', hostvars) | selectattr('rke2_node_ready', 'equalto', true) | map(attribute='inventory_hostname') | list }}" + delegate_to: localhost + run_once: true + +- name: Tarball Install + ansible.builtin.include_tasks: tarball_install.yml + when: + - install_method == "tarball" + +- name: RPM Install + ansible.builtin.include_tasks: rpm_install.yml + when: + - install_method == "rpm" + +- name: Set rke2 configuration files + ansible.builtin.include_tasks: configure_rke2.yml + +- name: Include task file add_manifest_addons.yml + ansible.builtin.include_tasks: add_manifest_addons.yml + vars: + source_directory: "{{ rke2_manifest_config_directory }}" + destination_directory: /var/lib/rancher/rke2/server/manifests/ansible_managed_0 + when: + - rke2_manifest_config_directory is defined + - rke2_manifest_config_directory | length > 0 + - inventory_hostname in groups['rke2_servers'][0] + +# is the ready_servers array is empty, we assume it's a new cluster and use the first server in groups['rke2_servers'] +- name: Start the first rke2 node + ansible.builtin.include_tasks: first_server.yml + when: + - inventory_hostname in groups['rke2_servers'][0] + - ready_servers | length == 0 + +- name: Save_generated_token.yml + ansible.builtin.include_tasks: save_generated_token.yml + vars: + token_source_node: "{{ groups['rke2_servers'][0] }}" + when: + - ready_servers | length == 0 + +# is the ready_servers array is > 0, we assume it's an established cluster and treat all nodes equally (no need for initial server procedure) +- name: Save_generated_token.yml + ansible.builtin.include_tasks: save_generated_token.yml + vars: + token_source_node: "{{ ready_servers[0] }}" + when: + - ready_servers | length > 0 + +- name: Start all other rke2 nodes + ansible.builtin.include_tasks: other_nodes.yml + +- name: Configure kubectl,crictl,ctr + ansible.builtin.include_tasks: utilities.yml + when: + - inventory_hostname in groups['rke2_servers'] + +- name: Include task file add_manifest_addons.yml + ansible.builtin.include_tasks: add_manifest_addons.yml + vars: + source_directory: "{{ rke2_manifest_config_post_run_directory }}" + destination_directory: /var/lib/rancher/rke2/server/manifests/ansible_managed_1 + when: + - rke2_manifest_config_post_run_directory is defined + - rke2_manifest_config_post_run_directory | length > 0 + - inventory_hostname in groups['rke2_servers'][0] diff --git a/roles/rke2_common/tasks/network_manager_fix.yaml b/roles/rke2/tasks/network_manager_fix.yaml similarity index 78% rename from roles/rke2_common/tasks/network_manager_fix.yaml rename to roles/rke2/tasks/network_manager_fix.yaml index b891b61a..4e61c1eb 100644 --- a/roles/rke2_common/tasks/network_manager_fix.yaml +++ b/roles/rke2/tasks/network_manager_fix.yaml @@ -3,13 +3,14 @@ # This fixes known issue with NetworkManager # https://docs.rke2.io/known_issues/#networkmanager +# blockinfile or own entire file? - name: Add NetworkManager fix to rke2-canal.conf ansible.builtin.blockinfile: path: /etc/NetworkManager/conf.d/rke2-canal.conf block: | [keyfile] unmanaged-devices=interface-name:cali*;interface-name:flannel* - create: yes + create: true mode: 0600 when: ansible_facts.services["NetworkManager.service"] is defined @@ -25,24 +26,24 @@ owner: root group: root when: rke2_canal_file.stat.exists + notify: "Restart {{ service_name }}" - name: Disable service nm-cloud-setup ansible.builtin.systemd: name: nm-cloud-setup.service - enabled: no + enabled: false state: stopped when: ansible_facts.services["nm-cloud-setup.service"] is defined + notify: + - Reload NetworkManager + - "Restart {{ service_name }}" - name: Disable nm-cloud-setup.timer unit ansible.builtin.systemd: name: nm-cloud-setup.timer state: stopped - enabled: no + enabled: false when: ansible_facts.services["nm-cloud-setup.service"] is defined - -- name: Reload NetworkManager - ansible.builtin.systemd: - name: NetworkManager - state: reloaded - when: (ansible_facts.services["NetworkManager.service"] is defined) and - (ansible_facts.services["NetworkManager.service"].status == "running") + notify: + - Reload NetworkManager + - "Restart {{ service_name }}" diff --git a/roles/rke2/tasks/other_nodes.yml b/roles/rke2/tasks/other_nodes.yml new file mode 100644 index 00000000..1d004b02 --- /dev/null +++ b/roles/rke2/tasks/other_nodes.yml @@ -0,0 +1,27 @@ +--- + +- name: Wait for remote k8s apiserver + ansible.builtin.wait_for: + host: "{{ rke2_kubernetes_api_server_host }}" + port: "6443" + state: present + timeout: "300" + changed_when: false + +# - name: Include task file add-manifest-addons.yml +# ansible.builtin.include_tasks: add-manifest-addons.yml +# when: +# - manifest_config_file_path is defined +# - manifest_config_file_path | length > 0 + +- name: Generate config.yml on other nodes + ansible.builtin.include_tasks: config.yml + +- name: Flush_handlers + ansible.builtin.meta: flush_handlers + +- name: Ensure rke2 is running + ansible.builtin.service: + state: started + enabled: true + name: "{{ service_name }}" diff --git a/roles/rke2/tasks/pre_reqs.yml b/roles/rke2/tasks/pre_reqs.yml new file mode 100644 index 00000000..3a47e02e --- /dev/null +++ b/roles/rke2/tasks/pre_reqs.yml @@ -0,0 +1,40 @@ +--- + +# Disable Firewalld +# We recommend disabling firewalld. For Kubernetes 1.19+, firewalld must be turned off. +- name: Disable FIREWALLD + ansible.builtin.systemd: + name: firewalld + state: stopped + enabled: false + when: + - ansible_facts.services["firewalld.service"] is defined + - ansible_facts.services["firewalld.service"].status != "not-found" + notify: "Restart {{ service_name }}" + +- name: Include task file network_manager_fix.yaml + ansible.builtin.include_tasks: network_manager_fix.yaml + +- name: Add server iptables rules + ansible.builtin.include_tasks: iptables_rules.yml + when: + # - ansible_facts.services["iptables.service"] is defined + - rke2_add_iptables_rules | bool + +- name: Add fapolicyd rules + ansible.builtin.copy: + content: "{{ fapolicyd_rules }}" + dest: /etc/fapolicyd/rules.d/80-rke2.rules + mode: '0644' + owner: root + group: fapolicyd + when: + - ansible_facts.services["fapolicyd.service"] is defined + - ansible_facts.services["fapolicyd.service"].state == "running" + vars: + fapolicyd_rules: | + allow perm=any all : dir=/var/lib/rancher/ + allow perm=any all : dir=/opt/cni/ + allow perm=any all : dir=/run/k3s/ + allow perm=any all : dir=/var/lib/kubelet/ + notify: Restart fapolicyd diff --git a/roles/rke2/tasks/previous_install.yml b/roles/rke2/tasks/previous_install.yml new file mode 100644 index 00000000..3e264a15 --- /dev/null +++ b/roles/rke2/tasks/previous_install.yml @@ -0,0 +1,61 @@ +--- + +- name: Set fact if rke2-server was previously installed + ansible.builtin.set_fact: + rke2_installed: true + when: + - ansible_facts.services["rke2-server.service"] is defined + - not ansible_facts.services["rke2-server.service"].status == 'disabled' + - inventory_hostname in groups['rke2_servers'] + - install_method == "tarball" + +- name: Set fact if rke2-server is running + ansible.builtin.set_fact: + rke2_running: true + when: + - ansible_facts.services["rke2-server.service"] is defined + - ansible_facts.services["rke2-server.service"].state == 'running' + - inventory_hostname in groups['rke2_servers'] + +- name: Set fact if rke2-agent was previously installed + ansible.builtin.set_fact: + rke2_installed: true + when: + - ansible_facts.services["rke2-agent.service"] is defined + - not ansible_facts.services["rke2-agent.service"].status == 'disabled' + - inventory_hostname in groups.get('rke2_agents', []) + - install_method == "tarball" + +- name: Set fact if rke2-agent is running + ansible.builtin.set_fact: + rke2_running: true + when: + - ansible_facts.services["rke2-agent.service"] is defined + - ansible_facts.services["rke2-agent.service"].state == 'running' + - inventory_hostname in groups.get('rke2_agents', []) + +- name: Check for the rke2 binary + ansible.builtin.stat: + path: /usr/local/bin/rke2 + register: rke2_binary + when: install_method == "tarball" + +- name: Get current rke2 version if already installed + ansible.builtin.shell: set -o pipefail && /usr/local/bin/rke2 -v | awk '$1 ~ /rke2/ { print $3 }' + register: rke2_installed_version_tmp + changed_when: false + args: + executable: /usr/bin/bash + when: + - install_method == "tarball" + - rke2_binary.stat.exists + failed_when: > + (rke2_installed_version_tmp.rc != 141) and + (rke2_installed_version_tmp.rc != 0) + +- name: Set fact for current rke2 version + ansible.builtin.set_fact: + rke2_installed_version: "{{ rke2_installed_version_tmp.stdout }}" + when: + - install_method == "tarball" + - rke2_binary.stat.exists diff --git a/roles/rke2/tasks/rpm_install.yml b/roles/rke2/tasks/rpm_install.yml new file mode 100644 index 00000000..189d60dd --- /dev/null +++ b/roles/rke2/tasks/rpm_install.yml @@ -0,0 +1,36 @@ +--- + +# Add RKE2 Common repo +- name: Add the rke2-common repo RHEL/CentOS/Rocky + ansible.builtin.yum_repository: + name: "{{ rke2_common_yum_repo.name }}" + description: "{{ rke2_common_yum_repo.description }}" + baseurl: "{{ rke2_common_yum_repo.baseurl }}" + gpgcheck: "{{ rke2_common_yum_repo.gpgcheck }}" + gpgkey: "{{ rke2_common_yum_repo.gpgkey }}" + enabled: "{{ rke2_common_yum_repo.enabled }}" + +# Add RKE2 versioned repo +- name: Add the rke2 versioned repo CentOS/RHEL/Rocky + ansible.builtin.yum_repository: + name: "{{ rke2_versioned_yum_repo.name }}" + description: "{{ rke2_versioned_yum_repo.description }}" + baseurl: "{{ rke2_versioned_yum_repo.baseurl }}" + gpgcheck: "{{ rke2_versioned_yum_repo.gpgcheck }}" + gpgkey: "{{ rke2_versioned_yum_repo.gpgkey }}" + enabled: "{{ rke2_versioned_yum_repo.enabled }}" + +# - name: Debug install +# ansible.builtin.debug: +# msg: installing {{ service_name }}{{ rke2_version_rpm }} + +- name: YUM-Based Install + ansible.builtin.dnf: + name: "{{ service_name }}{{ rke2_version_rpm }}" + state: "{{ rke2_package_state }}" + allow_downgrade: true + register: result + retries: 10 + until: result is succeeded + delay: 30 + notify: "Restart {{ service_name }}" diff --git a/roles/rke2/tasks/save_generated_token.yml b/roles/rke2/tasks/save_generated_token.yml new file mode 100644 index 00000000..92400b4a --- /dev/null +++ b/roles/rke2/tasks/save_generated_token.yml @@ -0,0 +1,41 @@ +--- + +- name: Wait for node-token + ansible.builtin.wait_for: + path: /var/lib/rancher/rke2/server/node-token + delegate_to: "{{ token_source_node }}" + +- name: Read node-token from master + ansible.builtin.slurp: + src: /var/lib/rancher/rke2/server/node-token + register: node_token + delegate_to: "{{ token_source_node }}" + +- name: Store Master node-token + ansible.builtin.set_fact: + rke2_config_token: "{{ node_token.content | b64decode | regex_replace('\n', '') }}" + delegate_to: "{{ token_source_node }}" + +- name: Set temp fact to store token config line + ansible.builtin.set_fact: + temp_token: + token: "{{ rke2_config_token }}" + +- name: Update host_rke2_config fact to contain server line + ansible.builtin.set_fact: + host_rke2_config: "{{ temp_token | default({}) | ansible.builtin.combine((host_rke2_config | default({})), list_merge='prepend_rp') }}" + +- name: Set temp fact for api host + ansible.builtin.set_fact: + rke2_kubernetes_api_server_host: "{{ token_source_node }}" + when: + - rke2_kubernetes_api_server_host == "" + +- name: Set temp fact to store server config line with custom join server URL + ansible.builtin.set_fact: + temp_host_rke2_config: + server: "https://{{ rke2_kubernetes_api_server_host }}:9345" + +- name: Update host_rke2_config fact to contain server line + ansible.builtin.set_fact: + host_rke2_config: "{{ temp_host_rke2_config | default({}) | ansible.builtin.combine((host_rke2_config | default({})), list_merge='prepend_rp') }}" diff --git a/roles/rke2_common/tasks/tarball_install.yml b/roles/rke2/tasks/tarball_install.yml similarity index 52% rename from roles/rke2_common/tasks/tarball_install.yml rename to roles/rke2/tasks/tarball_install.yml index ca0d3f5f..8f857bd2 100644 --- a/roles/rke2_common/tasks/tarball_install.yml +++ b/roles/rke2/tasks/tarball_install.yml @@ -1,61 +1,50 @@ --- -# Based off of https://get.rke2.io 's do_install_tar functon - -# do_install_tar() { -# setup_tmp -# get_release_version -# info "using ${INSTALL_RKE2_VERSION:-commit $INSTALL_RKE2_COMMIT} as release" -# download_checksums -# download_tarball -# verify_tarball -# unpack_tarball -# } - - name: TARBALL | Make temp dir ansible.builtin.tempfile: state: directory - suffix: rke2-install.XXXXXXXXXX + suffix: .rke2-install.XXXXXXXXXX path: "{{ tarball_tmp_dir | default(omit) }}" register: temp_dir -- name: Send provided tarball if available +- name: Set architecture specific variables + ansible.builtin.set_fact: + arch: "{{ 'arm64' if ansible_architecture == 'aarch64' else 'amd64' }}" + +- name: Determine if current version differs from what is being installed + ansible.builtin.set_fact: + rke2_version_changed: true + when: + - rke2_install_local_tarball_path == "" + - rke2_install_tarball_url == "" + - not rke2_installed or rke2_installed_version != rke2_full_version + +- name: Send provided tarball from local control machine if available ansible.builtin.copy: - src: "{{ playbook_dir }}/tarball_install/rke2.linux-amd64.tar.gz" - dest: "{{ temp_dir.path }}/rke2.linux-amd64.tar.gz" + src: "{{ rke2_install_local_tarball_path }}" + dest: "{{ temp_dir.path }}/rke2.linux-{{ arch }}.tar.gz" mode: '0644' when: - - rke2_binary_tarball_check.stat.exists - - rke2_tarball_url == "" + - rke2_install_local_tarball_path != "" - name: Download Tar from provided URL ansible.builtin.get_url: - url: "{{ rke2_tarball_url }}" - dest: "{{ temp_dir.path }}/rke2.linux-amd64.tar.gz" + url: "{{ rke2_install_tarball_url }}" + dest: "{{ temp_dir.path }}/rke2.linux-{{ arch }}.tar.gz" mode: "0644" when: - - not rke2_binary_tarball_check.stat.exists - - rke2_tarball_url != "" + - rke2_install_tarball_url != "" - -- name: Determine if current version differs what what is being installed - ansible.builtin.set_fact: - rke2_version_changed: true - when: - - not rke2_binary_tarball_check.stat.exists - - rke2_tarball_url == "" - - not installed or installed_rke2_version != rke2_full_version - -- name: TARBALL | Download the tarball +- name: Download the tar from github releases ansible.builtin.get_url: - url: https://github.com/rancher/rke2/releases/download/{{ rke2_full_version }}/rke2.linux-amd64.tar.gz - dest: "{{ temp_dir.path }}/rke2.linux-amd64.tar.gz" + url: "https://github.com/rancher/rke2/releases/download/{{ rke2_full_version }}/rke2.linux-{{ arch }}.tar.gz" + dest: "{{ temp_dir.path }}/rke2.linux-{{ arch }}.tar.gz" mode: "0644" when: - - not rke2_binary_tarball_check.stat.exists - - rke2_tarball_url == "" + - rke2_install_local_tarball_path == "" + - rke2_install_tarball_url == "" - rke2_version_changed -- name: TARBALL | Install tar package +- name: Ensure Tar utility installed on system ansible.builtin.package: name: tar state: present @@ -63,53 +52,54 @@ - name: Get version of provided tarball when: - - (rke2_binary_tarball_check.stat.exists or rke2_tarball_url != "") + - (rke2_install_local_tarball_path != "" or rke2_install_tarball_url != "") block: - name: Unarchive tarball into temp location ansible.builtin.unarchive: - src: "{{ temp_dir.path }}/rke2.linux-amd64.tar.gz" + src: "{{ temp_dir.path }}/rke2.linux-{{ arch }}.tar.gz" dest: "{{ temp_dir.path }}" remote_src: true + changed_when: false - name: Get tarball RKE2 version from temp location ansible.builtin.shell: set -o pipefail && {{ temp_dir.path }}/bin/rke2 -v | awk '$1 ~ /rke2/ { print $3 }' - register: tarball_rke2_version_tmp + register: rke2_tarball_version_tmp changed_when: false args: executable: /usr/bin/bash - name: Set tarball RKE2 version var ansible.builtin.set_fact: - tarball_rke2_version: "{{ tarball_rke2_version_tmp.stdout }}" + rke2_tarball_version: "{{ rke2_tarball_version_tmp.stdout }}" - - name: Determine if current version differs what what is being installed + - name: Determine if current version differs from what is being installed ansible.builtin.set_fact: rke2_version_changed: true when: - - not installed or installed_rke2_version != tarball_rke2_version + - not rke2_installed or rke2_installed_version != rke2_tarball_version - name: TARBALL | Check Target Mountpoint - ansible.builtin.command: mountpoint -q {{ tarball_dir }} - register: tarball_dir_stat + ansible.builtin.command: mountpoint -q {{ rke2_tarball_install_dir }} + register: rke2_tarball_install_dir_stat failed_when: false changed_when: false -- name: TARBALL | tarball_dir is a mountpoint setting dir to /opt/rke2 +- name: TARBALL | rke2_tarball_install_dir is a mountpoint setting dir to /opt/rke2 ansible.builtin.set_fact: - tarball_dir: "/opt/rke2" - when: tarball_dir_stat.rc == 0 + rke2_tarball_install_dir: "/opt/rke2" + when: rke2_tarball_install_dir_stat.rc == 0 - name: TARBALL | Using /opt/rke2 ansible.builtin.debug: msg: "Using /opt/rke2 for install directory" - when: tarball_dir_stat.rc == 0 + when: rke2_tarball_install_dir_stat.rc == 0 -- name: TARBALL | Create {{ tarball_dir }} +- name: TARBALL | Create {{ rke2_tarball_install_dir }} ansible.builtin.file: - path: "{{ tarball_dir }}" + path: "{{ rke2_tarball_install_dir }}" state: directory recurse: true - when: tarball_dir is defined + when: rke2_tarball_install_dir is defined - name: Final extraction/installation of RKE2 Tar when: @@ -118,75 +108,77 @@ - name: Unarchive rke2 tar ansible.builtin.unarchive: - src: "{{ temp_dir.path }}/rke2.linux-amd64.tar.gz" - dest: "{{ tarball_dir }}" + src: "{{ temp_dir.path }}/rke2.linux-{{ arch }}.tar.gz" + dest: "{{ rke2_tarball_install_dir }}" remote_src: true - name: TARBALL | Updating rke2-server.service ansible.builtin.replace: - path: "{{ tarball_dir }}/lib/systemd/system/rke2-server.service" + path: "{{ rke2_tarball_install_dir }}/lib/systemd/system/rke2-server.service" regexp: '/usr/local' - replace: '{{ tarball_dir }}' + replace: '{{ rke2_tarball_install_dir }}' + notify: Restart rke2-server - name: TARBALL | Updating rke2-agent.service ansible.builtin.replace: - path: "{{ tarball_dir }}/lib/systemd/system/rke2-agent.service" + path: "{{ rke2_tarball_install_dir }}/lib/systemd/system/rke2-agent.service" regexp: '/usr/local' - replace: '{{ tarball_dir }}' + replace: '{{ rke2_tarball_install_dir }}' + notify: Restart rke2-agent - name: TARBALL | Updating rke2-uninstall.sh ansible.builtin.replace: - path: "{{ tarball_dir }}/bin/rke2-uninstall.sh" + path: "{{ rke2_tarball_install_dir }}/bin/rke2-uninstall.sh" regexp: '/usr/local' - replace: '{{ tarball_dir }}' + replace: '{{ rke2_tarball_install_dir }}' - name: TARBALL | Moving Systemd units to /etc/systemd/system ansible.builtin.copy: - src: "{{ tarball_dir }}/lib/systemd/system/rke2-server.service" + src: "{{ rke2_tarball_install_dir }}/lib/systemd/system/rke2-server.service" dest: /etc/systemd/system/rke2-server.service mode: '0644' owner: root group: root - remote_src: yes + remote_src: true when: - inventory_hostname in groups['rke2_servers'] - name: TARBALL | Moving Systemd units to /etc/systemd/system ansible.builtin.copy: - src: "{{ tarball_dir }}/lib/systemd/system/rke2-server.env" + src: "{{ rke2_tarball_install_dir }}/lib/systemd/system/rke2-server.env" dest: /etc/systemd/system/rke2-server.env mode: '0644' owner: root group: root - remote_src: yes + remote_src: true when: - inventory_hostname in groups['rke2_servers'] - name: TARBALL | Moving Systemd units to /etc/systemd/system ansible.builtin.copy: - src: "{{ tarball_dir }}/lib/systemd/system/rke2-agent.service" + src: "{{ rke2_tarball_install_dir }}/lib/systemd/system/rke2-agent.service" dest: /etc/systemd/system/rke2-agent.service mode: '0644' owner: root group: root - remote_src: yes + remote_src: true when: - inventory_hostname in groups.get('rke2_agents', []) - name: TARBALL | Moving Systemd units to /etc/systemd/system ansible.builtin.copy: - src: "{{ tarball_dir }}/lib/systemd/system/rke2-agent.env" + src: "{{ rke2_tarball_install_dir }}/lib/systemd/system/rke2-agent.env" dest: /etc/systemd/system/rke2-agent.env mode: '0644' owner: root group: root - remote_src: yes + remote_src: true when: - inventory_hostname in groups.get('rke2_agents', []) - name: TARBALL | Refreshing systemd unit files ansible.builtin.systemd: - daemon-reload: yes + daemon-reload: true - name: Remove the temp_dir ansible.builtin.file: diff --git a/roles/rke2_server/tasks/utilities.yml b/roles/rke2/tasks/utilities.yml similarity index 100% rename from roles/rke2_server/tasks/utilities.yml rename to roles/rke2/tasks/utilities.yml diff --git a/roles/rke2/tasks/wait_for_rke2.yml b/roles/rke2/tasks/wait_for_rke2.yml new file mode 100644 index 00000000..ea027d97 --- /dev/null +++ b/roles/rke2/tasks/wait_for_rke2.yml @@ -0,0 +1,45 @@ +--- + +- name: Start rke2 + ansible.builtin.meta: flush_handlers + +- name: Enable service + ansible.builtin.systemd: + name: "{{ service_name }}" + state: started + enabled: true + +- name: Wait for k8s apiserver + ansible.builtin.wait_for: + host: "{{ rke2_kubernetes_api_server_host }}" + port: "6443" + state: present + timeout: 300 + +- name: Wait for kubelet process to be present on host + ansible.builtin.command: >- + ps -C kubelet -F -ww --no-headers + register: kubelet_check + until: kubelet_check.rc == 0 + retries: 20 + delay: 10 + changed_when: false + +- name: Extract the hostname-override parameter from the kubelet process + ansible.builtin.set_fact: + kubelet_hostname: "{{ kubelet_check.stdout | regex_search('\\s--hostname-override=([^\\s]+)', '\\1') }}" + when: + - inventory_hostname in groups['rke2_servers'] + +- name: Wait for node to show Ready status + ansible.builtin.command: >- + /var/lib/rancher/rke2/bin/kubectl --kubeconfig /etc/rancher/rke2/rke2.yaml + --server https://127.0.0.1:6443 get no {{ kubelet_hostname[0] }} + -o jsonpath='{.status.conditions[?(@.type=="Ready")].status}' + register: status_result + until: status_result.stdout.find("True") != -1 + retries: 20 + delay: 10 + changed_when: false + when: + - inventory_hostname in groups['rke2_servers'] diff --git a/ansible_header.j2 b/roles/rke2/templates/ansible_managed_yaml.j2 similarity index 77% rename from ansible_header.j2 rename to roles/rke2/templates/ansible_managed_yaml.j2 index 0377d97b..3691a008 100644 --- a/ansible_header.j2 +++ b/roles/rke2/templates/ansible_managed_yaml.j2 @@ -1,3 +1,3 @@ ## This is an Ansible managed file, contents will be overwritten ## -{{ file_contents }} +{{ file_contents }} \ No newline at end of file diff --git a/roles/rke2/vars/main.yml b/roles/rke2/vars/main.yml new file mode 100644 index 00000000..879b4f8c --- /dev/null +++ b/roles/rke2/vars/main.yml @@ -0,0 +1,8 @@ +--- + +rke2_installed: false +rke2_version_changed: false +rke2_reboot: false +rke2_version_majmin: "" +rke2_version_rpm: "" +rke2_package_state: "installed" diff --git a/roles/rke2_agent/defaults/main.yml b/roles/rke2_agent/defaults/main.yml deleted file mode 100644 index ae927959..00000000 --- a/roles/rke2_agent/defaults/main.yml +++ /dev/null @@ -1,2 +0,0 @@ ---- -kubernetes_api_server_host: "{{ hostvars[groups['rke2_servers'][0]].inventory_hostname }}" diff --git a/roles/rke2_agent/tasks/main.yml b/roles/rke2_agent/tasks/main.yml deleted file mode 100644 index 4d9cfdeb..00000000 --- a/roles/rke2_agent/tasks/main.yml +++ /dev/null @@ -1,45 +0,0 @@ ---- - -- name: RKE2 agent and server tasks - vars: - rke2_common_caller_role_name: agent - ansible.builtin.include_role: - name: rke2_common - tasks_from: main - -- name: Does config file already have server token? # noqa command-instead-of-shell - ansible.builtin.command: 'grep -i "^token:" /etc/rancher/rke2/config.yaml' - register: server_token_check - failed_when: server_token_check.rc >= 2 - changed_when: false - -- name: Add token to config.yaml - ansible.builtin.lineinfile: - dest: /etc/rancher/rke2/config.yaml - line: "token: {{ hostvars[groups['rke2_servers'][0]].rke2_config_token }}" - state: present - insertbefore: BOF - when: - - '"token:" not in server_token_check.stdout' - -- name: Does config file already have server url? # noqa command-instead-of-shell - ansible.builtin.command: 'grep -i "^server:" /etc/rancher/rke2/config.yaml' - register: server_url_check - failed_when: server_url_check.rc >= 2 - changed_when: false - -- name: Add server url to config file - ansible.builtin.lineinfile: - dest: /etc/rancher/rke2/config.yaml - line: "server: https://{{ kubernetes_api_server_host }}:9345" - state: present - insertbefore: BOF - when: - - '"server:" not in server_url_check.stdout' - -- name: Start rke2-agent - ansible.builtin.systemd: - name: rke2-agent.service - state: started - enabled: yes - daemon_reload: yes diff --git a/roles/rke2_agent/vars/main.yml b/roles/rke2_agent/vars/main.yml deleted file mode 100644 index 53b1ae20..00000000 --- a/roles/rke2_agent/vars/main.yml +++ /dev/null @@ -1,2 +0,0 @@ ---- -tmp_sha1: 55ca6286e3e4f4fba5d0448333fa99fc5a404a73 diff --git a/roles/rke2_common/defaults/main.yml b/roles/rke2_common/defaults/main.yml deleted file mode 100644 index 9c7caf2c..00000000 --- a/roles/rke2_common/defaults/main.yml +++ /dev/null @@ -1,26 +0,0 @@ ---- -tarball_dir: "/usr/local" -rke2_tarball_url: "" -rke2_images_urls: [] -rke2_channel: stable -audit_policy_config_file_path: "" -registry_config_file_path: "" -pod_security_admission_config_file_path: "" -add_iptables_rules: false -rke2_common_yum_repo: - name: rancher-rke2-common - description: "Rancher RKE2 Common Latest" - baseurl: "https://rpm.rancher.io/rke2/stable/common/centos/$releasever/noarch" - gpgcheck: true - gpgkey: "https://rpm.rancher.io/public.key" - enabled: yes - -rke2_versioned_yum_repo: - name: "rancher-rke2-v{{ rke2_version_majmin }}" # noqa jinja[spacing] - description: "Rancher RKE2 Version" - baseurl: "https://rpm.rancher.io/rke2/stable/{{ rke2_version_majmin }}/centos/$releasever/$basearch" - gpgcheck: true - gpgkey: "https://rpm.rancher.io/public.key" - enabled: yes - -rke2_config: {} diff --git a/roles/rke2_common/handlers/main.yml b/roles/rke2_common/handlers/main.yml deleted file mode 100644 index 4f823682..00000000 --- a/roles/rke2_common/handlers/main.yml +++ /dev/null @@ -1,16 +0,0 @@ ---- - -- name: Restart systemd-sysctl - ansible.builtin.service: - state: restarted - name: systemd-sysctl - -- name: Restart rke2-server - ansible.builtin.service: - state: restarted - name: rke2-server - -- name: Restart rke2-agent - ansible.builtin.service: - state: restarted - name: rke2-agent diff --git a/roles/rke2_common/tasks/add-audit-policy-config.yml b/roles/rke2_common/tasks/add-audit-policy-config.yml deleted file mode 100644 index 66bb82ae..00000000 --- a/roles/rke2_common/tasks/add-audit-policy-config.yml +++ /dev/null @@ -1,14 +0,0 @@ ---- -- name: Create the /etc/rancher/rke2 config dir - ansible.builtin.file: - path: /etc/rancher/rke2 - state: directory - recurse: yes - -- name: Add audit policy configuration file - ansible.builtin.copy: - src: "{{ audit_policy_config_file_path }}" - dest: "/etc/rancher/rke2/audit-policy.yaml" - mode: '0640' - owner: root - group: root diff --git a/roles/rke2_common/tasks/add-manifest-addons.yml b/roles/rke2_common/tasks/add-manifest-addons.yml deleted file mode 100644 index a7524f1b..00000000 --- a/roles/rke2_common/tasks/add-manifest-addons.yml +++ /dev/null @@ -1,9 +0,0 @@ ---- - -- name: Add manifest addons files - ansible.builtin.copy: - src: "{{ manifest_config_file_path }}" - dest: "/var/lib/rancher/rke2/server/manifests/" - mode: '0640' - owner: root - group: root diff --git a/roles/rke2_common/tasks/add-registry-config.yml b/roles/rke2_common/tasks/add-registry-config.yml deleted file mode 100644 index 664afe84..00000000 --- a/roles/rke2_common/tasks/add-registry-config.yml +++ /dev/null @@ -1,26 +0,0 @@ ---- -- name: Create the /etc/rancher/rke2 config dir - ansible.builtin.file: - path: /etc/rancher/rke2 - state: directory - recurse: yes - -- name: Add registry configuration file - ansible.builtin.copy: - src: "{{ registry_config_file_path }}" - dest: "/etc/rancher/rke2/registries.yaml" - mode: '0640' - owner: root - group: root - when: rke2_common_caller_role_name == "server" - notify: Restart rke2-server - -- name: Add registry configuration file - ansible.builtin.copy: - src: "{{ registry_config_file_path }}" - dest: "/etc/rancher/rke2/registries.yaml" - mode: '0640' - owner: root - group: root - when: rke2_common_caller_role_name == "agent" - notify: Restart rke2-agent diff --git a/roles/rke2_common/tasks/calculate_rke2_version.yml b/roles/rke2_common/tasks/calculate_rke2_version.yml deleted file mode 100644 index e18ae9c5..00000000 --- a/roles/rke2_common/tasks/calculate_rke2_version.yml +++ /dev/null @@ -1,72 +0,0 @@ ---- - -- name: "Calculate rke2 full version" - when: ( install_rke2_version is not defined ) or ( install_rke2_version | length == 0 ) - block: - - name: Stop if the provided is not valid - ansible.builtin.fail: - msg: "Provided channel is not valid" - when: rke2_channel not in channels - - - name: Get full version name url - ansible.builtin.uri: - url: https://update.rke2.io/v1-release/channels/{{ rke2_channel }} - follow_redirects: safe - remote_src: true - register: rke2_version_url - - - name: Set full version name - ansible.builtin.shell: set -o pipefail && echo {{ rke2_version_url.url }} | sed -e 's|.*/||' - register: rke2_full_version - changed_when: false - args: - executable: /usr/bin/bash - -- name: Set rke2_full_version fact - ansible.builtin.set_fact: - rke2_full_version: "{{ rke2_full_version.stdout if ((install_rke2_version is not defined) or - (install_rke2_version | length == 0)) else install_rke2_version }}" - -- name: Set dot version - ansible.builtin.shell: - cmd: set -o pipefail && echo {{ rke2_full_version }} | /usr/bin/cut -d'+' -f1 - register: rke2_version_dot_tmp - changed_when: false - args: - executable: /usr/bin/bash - -- name: Set rke2_version_dot fact - ansible.builtin.set_fact: - rke2_version_dot: "{{ rke2_version_dot_tmp.stdout }}" - -- name: Set Maj.Min version - ansible.builtin.shell: - cmd: set -o pipefail && echo {{ rke2_full_version }} | /bin/awk -F'.' '{ print $1"."$2 }' | sed "s|^v||g" - register: rke2_version_majmin_tmp - changed_when: false - args: - executable: /usr/bin/bash - -- name: Set rke2_version_majmin fact - ansible.builtin.set_fact: - rke2_version_majmin: "{{ rke2_version_majmin_tmp.stdout }}" - -- name: Set RPM version - ansible.builtin.shell: - cmd: set -o pipefail && echo {{ rke2_full_version }} | sed -E -e "s/[\+-]/~/g" | sed -E -e "s/v(.*)/\1/" - register: rke2_version_rpm_tmp - changed_when: false - args: - executable: /usr/bin/bash - -- name: Set rke2_version_rpm fact - ansible.builtin.set_fact: - rke2_version_rpm: "{{ rke2_version_rpm_tmp.stdout }}" - -- name: Describe versions - ansible.builtin.debug: - msg: - - "Full version, with revision indication: {{ rke2_full_version }}" - - "Version without revision indication: {{ rke2_version_dot }}" - - "Major and Minor Only: {{ rke2_version_majmin }}" - - "RPM Version (tilde): {{ rke2_version_rpm }}" diff --git a/roles/rke2_common/tasks/config.yml b/roles/rke2_common/tasks/config.yml deleted file mode 100644 index b755f8c5..00000000 --- a/roles/rke2_common/tasks/config.yml +++ /dev/null @@ -1,276 +0,0 @@ ---- -- name: Create the /etc/rancher/rke2 config dir - ansible.builtin.file: - path: /etc/rancher/rke2 - state: directory - mode: "0750" - -- name: Does the /etc/rancher/rke2/config.yaml file exist? - ansible.builtin.stat: - path: /etc/rancher/rke2/config.yaml - register: previous_rke2_config - -- name: Read previous_rke2_config - ansible.builtin.slurp: - src: /etc/rancher/rke2/config.yaml - register: full_orig_rke2_config - when: previous_rke2_config.stat.exists - -- name: Decode contents of slurp - ansible.builtin.set_fact: - orig_rke2_config: "{{ full_orig_rke2_config['content'] | b64decode }}" - when: previous_rke2_config.stat.exists - -- name: Create the /etc/rancher/rke2/config.yaml file - ansible.builtin.file: - path: /etc/rancher/rke2/config.yaml - state: touch - mode: "0640" - owner: root - group: root - when: not previous_rke2_config.stat.exists - -# https://github.com/ansible-collections/ansible.utils/issues/135 -- name: Ensure Ansible renders any templated variables in rke2_config - ansible.builtin.set_fact: - rke2_config: "{{ rke2_config | default({}) }}" - -# --node-label value (agent/node) Registering and starting kubelet with set of labels -- name: Get rke2_config node-labels - ansible.builtin.set_fact: - rke2_config_node_labels: "{{ rke2_config['node-label'] | default([]) }}" - -- name: Get host var node-labels - ansible.builtin.set_fact: - host_var_node_labels: "{{ node_labels | default([]) }}" - -- name: Combine rke2_config node labels and hostvar node labels - ansible.builtin.set_fact: - all_node_labels: "{{ rke2_config_node_labels + host_var_node_labels }}" - changed_when: false - -- name: Add node labels to rke2_config - ansible.utils.update_fact: - updates: - - path: rke2_config["node-label"] - value: "{{ all_node_labels }}" - register: updated_rke2_config - changed_when: false - -- name: Update rke2_config to take value of updated_rke2_config - ansible.builtin.set_fact: - rke2_config: "{{ updated_rke2_config.rke2_config }}" - changed_when: false - -# --node-taint value (agent/node) Registering kubelet with set of taints -- name: Get rke2_config node-taints - ansible.builtin.set_fact: - rke2_config_node_taints: "{{ rke2_config['node-taint'] | default([]) }}" - -- name: Get host var node-taints - ansible.builtin.set_fact: - host_var_node_taints: "{{ node_taints | default([]) }}" - -- name: Combine rke2_config node taints and hostvar node taints - ansible.builtin.set_fact: - all_node_taints: "{{ rke2_config_node_taints + host_var_node_taints }}" - changed_when: false - -- name: Add node labels to rke2_config - ansible.utils.update_fact: - updates: - - path: rke2_config["node-taint"] - value: "{{ all_node_taints }}" - register: updated_rke2_config - changed_when: false - -- name: Update rke2_config to take value of updated_rke2_config - ansible.builtin.set_fact: - rke2_config: "{{ updated_rke2_config.rke2_config }}" - changed_when: false - -# --node-ip value, -i value (agent/networking) IPv4/IPv6 addresses to advertise for node -- name: Add node-ip to rke2_config - ansible.utils.update_fact: - updates: - - path: rke2_config["node-ip"] - value: "{{ node_ip }}" - when: (node_ip is defined) and (node_ip|length > 0) - register: updated_rke2_config - changed_when: false - -- name: Update rke2_config to take value of updated_rke2_config # noqa no-handler - ansible.builtin.set_fact: - rke2_config: "{{ updated_rke2_config.rke2_config }}" - when: (node_ip is defined) and (node_ip|length > 0) - changed_when: false - -# --node-name value (agent/node) Node name [$RKE2_NODE_NAME] -- name: Add node-name to rke2_config - ansible.utils.update_fact: - updates: - - path: rke2_config["node-name"] - value: "{{ node_name }}" - when: (node_name is defined) and (node_name|length > 0) - register: updated_rke2_config - changed_when: false - -- name: Update rke2_config to take value of updated_rke2_config # noqa no-handler - ansible.builtin.set_fact: - rke2_config: "{{ updated_rke2_config.rke2_config }}" - when: (node_name is defined) and (node_name|length > 0) - changed_when: false - -# --bind-address value (listener) rke2 bind address (default: 0.0.0.0) -- name: Add bind-address to rke2_config - ansible.utils.update_fact: - updates: - - path: rke2_config["bind-address"] - value: "{{ bind_address }}" - when: (bind_address is defined) and (bind_address|length > 0) - register: updated_rke2_config - changed_when: false - -- name: Update rke2_config to take value of updated_rke2_config # noqa no-handler - ansible.builtin.set_fact: - rke2_config: "{{ updated_rke2_config.rke2_config }}" - when: (bind_address is defined) and (bind_address|length > 0) - changed_when: false - -# --advertise-address value (listener) IPv4 address that apiserver uses -# to advertise to members of the cluster (default: node-external-ip/node-ip) -- name: Add advertise-address to rke2_config - ansible.utils.update_fact: - updates: - - path: rke2_config["advertise-address"] - value: "{{ advertise_address }}" - when: (advertise_address is defined) and (advertise_address|length > 0) - register: updated_rke2_config - changed_when: false - -- name: Update rke2_config to take value of updated_rke2_config # noqa no-handler - ansible.builtin.set_fact: - rke2_config: "{{ updated_rke2_config.rke2_config }}" - when: (advertise_address is defined) and (advertise_address|length > 0) - changed_when: false - -# --node-external-ip value (agent/networking) IPv4/IPv6 external IP addresses to advertise for node -- name: Add node-external-ip to rke2_config - ansible.utils.update_fact: - updates: - - path: rke2_config["node-external-ip"] - value: "{{ node_external_ip }}" - when: (node_external_ip is defined) and (node_external_ip|length > 0) - register: updated_rke2_config - changed_when: false - -- name: Update rke2_config to take value of updated_rke2_config # noqa no-handler - ansible.builtin.set_fact: - rke2_config: "{{ updated_rke2_config.rke2_config }}" - when: (node_external_ip is defined) and (node_external_ip|length > 0) - changed_when: false - -# --cloud-provider-name value (agent/node) Cloud provider name -- name: Add cloud-provider-name to rke2_config - ansible.utils.update_fact: - updates: - - path: rke2_config["cloud-provider-name"] - value: "{{ cloud_provider_name }}" - when: (cloud_provider_name is defined) and (cloud_provider_name|length > 0) - register: updated_rke2_config - -- name: Update rke2_config to take value of updated_rke2_config # noqa no-handler - ansible.builtin.set_fact: - rke2_config: "{{ updated_rke2_config.rke2_config }}" - when: (cloud_provider_name is defined) and (cloud_provider_name|length > 0) - -- name: Remove tmp config file - ansible.builtin.file: - path: /tmp/ansible-config.txt - state: absent - changed_when: false - -- name: Create tmp config.yaml - ansible.builtin.copy: - content: "{{ rke2_config | to_nice_yaml(indent=0) }}" - dest: /tmp/ansible-config.txt - mode: "0600" - owner: root - group: root - changed_when: false - -- name: Get original token - ansible.builtin.set_fact: - original_token: "{{ orig_rke2_config | regex_search('token: (.+)') }}" - when: previous_rke2_config.stat.exists - changed_when: false - -- name: Add token to config.yaml - ansible.builtin.lineinfile: - dest: /tmp/ansible-config.txt - line: "{{ original_token }}" - state: present - insertbefore: BOF - when: previous_rke2_config.stat.exists and original_token | length > 0 - changed_when: false - -- name: Get original server - ansible.builtin.set_fact: - original_server: "{{ orig_rke2_config | regex_search('server: https://(.*):9345') }}" - when: previous_rke2_config.stat.exists - changed_when: false - -- name: Add server url to config file - ansible.builtin.lineinfile: - dest: /tmp/ansible-config.txt - line: "{{ original_server }}" - state: present - insertbefore: BOF - when: previous_rke2_config.stat.exists and original_server | length > 0 - changed_when: false - -- name: Stat tmp config - ansible.builtin.stat: - path: /tmp/ansible-config.txt - register: tmp_config - changed_when: false - -- name: Get cksum of tmp config - ansible.builtin.set_fact: - tmp_sha1: "{{ tmp_config.stat.checksum }}" - changed_when: false - -- name: Drop in final /etc/rancher/rke2/config.yaml - ansible.builtin.copy: - src: /tmp/ansible-config.txt - remote_src: yes - dest: /etc/rancher/rke2/config.yaml - mode: "0640" - owner: root - group: root - backup: yes - when: not previous_rke2_config.stat.exists or (tmp_sha1 != previous_rke2_config.stat.checksum) - -- name: Remove tmp config file - ansible.builtin.file: - path: /tmp/ansible-config.txt - state: absent - changed_when: false - -- name: Restart rke2-server if package installed and config changed or RKE2 version changed - ansible.builtin.service: - state: restarted - name: rke2-server - when: - - ansible_facts.services["rke2-server.service"] is defined - - "ansible_facts.services['rke2-server.service'].state == 'running'" - - (tmp_sha1 != previous_rke2_config.stat.checksum or (rke2_version_changed | default(false))) - -- name: Restart rke2-agent if package installed and config changed or RKE2 version changed - ansible.builtin.service: - state: restarted - name: rke2-agent - when: - - ansible_facts.services["rke2-agent.service"] is defined - - "ansible_facts.services['rke2-agent.service'].state == 'running'" - - (tmp_sha1 != previous_rke2_config.stat.checksum or (rke2_version_changed | default(false))) diff --git a/roles/rke2_common/tasks/images_tarball_install.yml b/roles/rke2_common/tasks/images_tarball_install.yml deleted file mode 100644 index 191c97fe..00000000 --- a/roles/rke2_common/tasks/images_tarball_install.yml +++ /dev/null @@ -1,45 +0,0 @@ ---- -- name: "Check for images tar.gz in {{ playbook_dir }}/tarball_install/rke2-images.linux-amd64.tar.gz" # noqa name[template] yaml[line-length] - ansible.builtin.stat: - path: "{{ playbook_dir }}/tarball_install/rke2-images.linux-amd64.tar.gz" - get_checksum: false - register: got_images_gz - delegate_to: 127.0.0.1 - become: false - -- name: "Check for images tar.zst in {{ playbook_dir }}/tarball_install/rke2-images.linux-amd64.tar.zst" # noqa name[template] yaml[line-length] - ansible.builtin.stat: - path: "{{ playbook_dir }}/tarball_install/rke2-images.linux-amd64.tar.zst" - get_checksum: false - register: got_images_zst - delegate_to: 127.0.0.1 - become: false - -- name: Create images directory - ansible.builtin.file: - path: /var/lib/rancher/rke2/agent/images - state: directory - mode: '0644' - -- name: Download images tar files url - ansible.builtin.get_url: - url: "{{ item }}" - dest: "/var/lib/rancher/rke2/agent/images" - mode: "0644" - when: - - rke2_images_urls != [] - with_items: "{{ rke2_images_urls }}" - -- name: Add images tar.gz to needed directory if provided - ansible.builtin.copy: - src: "{{ playbook_dir }}/tarball_install/rke2-images.linux-amd64.tar.gz" - dest: /var/lib/rancher/rke2/agent/images/ - mode: '0644' - when: got_images_gz.stat.exists - -- name: Add images tar.zst to needed directory if provided - ansible.builtin.copy: - src: "{{ playbook_dir }}/tarball_install/rke2-images.linux-amd64.tar.zst" - dest: /var/lib/rancher/rke2/agent/images/ - mode: '0644' - when: got_images_zst.stat.exists diff --git a/roles/rke2_common/tasks/main.yml b/roles/rke2_common/tasks/main.yml deleted file mode 100644 index 8b8bad68..00000000 --- a/roles/rke2_common/tasks/main.yml +++ /dev/null @@ -1,80 +0,0 @@ ---- - -- name: Populate service facts - ansible.builtin.service_facts: {} - -- name: Gather the package facts - ansible.builtin.package_facts: - manager: auto - -- name: Has rke2 been installed already - ansible.builtin.include_tasks: previous_install.yml - -- name: Include images_tarball_install.yml - ansible.builtin.include_tasks: images_tarball_install.yml - -- name: "Check for binary tarball in tarball_install/rke2.linux-amd64.tar.gz" - ansible.builtin.stat: - path: "{{ playbook_dir }}/tarball_install/rke2.linux-amd64.tar.gz" - register: rke2_binary_tarball_check - delegate_to: 127.0.0.1 - become: false - -- name: Include calculate_rke2_version.yml - ansible.builtin.include_tasks: calculate_rke2_version.yml - when: - - not rke2_binary_tarball_check.stat.exists - - rke2_tarball_url is not defined or rke2_tarball_url == "" - -- name: SLES/Ubuntu/Tarball Installation - ansible.builtin.include_tasks: tarball_install.yml - when: - - |- - ((ansible_facts['os_family'] != 'RedHat' and - ansible_facts['os_family'] != 'Rocky') or - rke2_binary_tarball_check.stat.exists or - (rke2_tarball_url is defined and rke2_tarball_url != "")) - -- name: RHEL/CentOS Installation - when: - - ansible_os_family == 'RedHat' or ansible_os_family == 'Rocky' - - not rke2_binary_tarball_check.stat.exists - - rke2_tarball_url == "" - ansible.builtin.include_tasks: rpm_install.yml - -# Disable Firewalld -# We recommend disabling firewalld. For Kubernetes 1.19+, firewalld must be turned off. -- name: Disable FIREWALLD - ansible.builtin.systemd: - name: firewalld - state: stopped - enabled: no - when: - - ansible_facts.services["firewalld.service"] is defined - - ansible_facts.services["firewalld.service"].status != "not-found" - -- name: Include task file network_manager_fix.yaml - ansible.builtin.include_tasks: network_manager_fix.yaml - -- name: Include task file config.yml - ansible.builtin.include_tasks: config.yml - -- name: Add server iptables rules - ansible.builtin.include_tasks: iptables_rules.yml - when: - - ansible_facts.services["iptables.service"] is defined - - add_iptables_rules | bool - -- name: Include task file add-audit-policy-config.yml - ansible.builtin.include_tasks: add-audit-policy-config.yml - when: - - audit_policy_config_file_path | length > 0 - -- name: Include task file add-registry-config.yml - ansible.builtin.include_tasks: add-registry-config.yml - when: registry_config_file_path | length > 0 - -- name: Run CIS-Hardening Tasks - ansible.builtin.include_role: - name: rke2_common - tasks_from: cis-hardening diff --git a/roles/rke2_common/tasks/previous_install.yml b/roles/rke2_common/tasks/previous_install.yml deleted file mode 100644 index ea1b9c3a..00000000 --- a/roles/rke2_common/tasks/previous_install.yml +++ /dev/null @@ -1,50 +0,0 @@ ---- - -- name: Set fact if rke2-server was previously installed - ansible.builtin.set_fact: - installed: true - when: > - ansible_facts.services["rke2-server.service"] is defined - and not ansible_facts.services["rke2-server.service"].status == 'disabled' - -- name: Set fact if rke2-server is running - ansible.builtin.set_fact: - rke2_running: true - when: > - ansible_facts.services["rke2-server.service"] is defined - and ansible_facts.services["rke2-server.service"].state == 'running' - -- name: Set fact if rke2-agent was previously installed - ansible.builtin.set_fact: - installed: true - when: > - ansible_facts.services["rke2-agent.service"] is defined - and not ansible_facts.services["rke2-agent.service"].status == 'disabled' - -- name: Set fact if rke2-agent is running - ansible.builtin.set_fact: - rke2_running: true - when: > - ansible_facts.services["rke2-agent.service"] is defined - and ansible_facts.services["rke2-agent.service"].state == 'running' - -- name: Check for the rke2 binary - ansible.builtin.stat: - path: /usr/local/bin/rke2 - register: rke2_binary - -- name: Get current RKE2 version if already installed - ansible.builtin.shell: set -o pipefail && /usr/local/bin/rke2 -v | awk '$1 ~ /rke2/ { print $3 }' - register: installed_rke2_version_tmp - changed_when: false - args: - executable: /usr/bin/bash - when: rke2_binary.stat.exists - failed_when: > - (installed_rke2_version_tmp.rc != 141) and - (installed_rke2_version_tmp.rc != 0) - -- name: Determine if current version differs what what is being installed - ansible.builtin.set_fact: - installed_rke2_version: "{{ installed_rke2_version_tmp.stdout }}" - when: rke2_binary.stat.exists diff --git a/roles/rke2_common/tasks/rpm_install.yml b/roles/rke2_common/tasks/rpm_install.yml deleted file mode 100644 index 15b2f696..00000000 --- a/roles/rke2_common/tasks/rpm_install.yml +++ /dev/null @@ -1,49 +0,0 @@ ---- - -# Add RKE2 Common repo -- name: Add the rke2-common repo RHEL/CentOS/Rocky - ansible.builtin.yum_repository: - name: "{{ rke2_common_yum_repo.name }}" - description: "{{ rke2_common_yum_repo.description }}" - baseurl: "{{ rke2_common_yum_repo.baseurl }}" - gpgcheck: "{{ rke2_common_yum_repo.gpgcheck }}" - gpgkey: "{{ rke2_common_yum_repo.gpgkey }}" - enabled: "{{ rke2_common_yum_repo.enabled }}" - when: - - ansible_facts['os_family'] == "RedHat" or ansible_facts['os_family'] == "Rocky" - - ansible_facts['distribution_major_version'] == "7" or - ansible_facts['distribution_major_version'] == "8" or - ansible_facts['distribution_major_version'] == "9" - -# Add RKE2 versioned repo -- name: Add the rke2 versioned repo CentOS/RHEL/Rocky - ansible.builtin.yum_repository: - name: "{{ rke2_versioned_yum_repo.name }}" - description: "{{ rke2_versioned_yum_repo.description }}" - baseurl: "{{ rke2_versioned_yum_repo.baseurl }}" - gpgcheck: "{{ rke2_versioned_yum_repo.gpgcheck }}" - gpgkey: "{{ rke2_versioned_yum_repo.gpgkey }}" - enabled: "{{ rke2_versioned_yum_repo.enabled }}" - when: - - ansible_facts['os_family'] == "RedHat" or ansible_facts['os_family'] == "Rocky" - - ansible_facts['distribution_major_version'] == "7" or - ansible_facts['distribution_major_version'] == "8" or - ansible_facts['distribution_major_version'] == "9" - -- name: YUM-Based | Install rke2-server - ansible.builtin.yum: - name: "rke2-server-{{ rke2_version_rpm }}" - state: latest # noqa package-latest - when: - - ansible_facts['os_family'] == 'RedHat' or ansible_facts['os_family'] == 'Rocky' - - not rke2_binary_tarball_check.stat.exists - - inventory_hostname in groups['rke2_servers'] - -- name: YUM-Based | Install rke2-agent - ansible.builtin.yum: - name: "rke2-agent-{{ rke2_version_rpm }}" - state: latest # noqa package-latest - when: - - ansible_facts['os_family'] == 'RedHat' or ansible_facts['os_family'] == 'Rocky' - - not rke2_binary_tarball_check.stat.exists - - inventory_hostname in groups.get('rke2_agents', []) diff --git a/roles/rke2_common/vars/main.yml b/roles/rke2_common/vars/main.yml deleted file mode 100644 index da8e48d7..00000000 --- a/roles/rke2_common/vars/main.yml +++ /dev/null @@ -1,18 +0,0 @@ ---- -# Possible RKE2 Channels -channels: - - stable - - latest - - v1.18 - - v1.19 - - v1.20 - - v1.21 - - v1.22 - - v1.23 - - v1.24 - - v1.25 - - v1.26 - - v1.27 - - v1.28 -installed: false -rke2_version_changed: false diff --git a/roles/rke2_server/defaults/main.yml b/roles/rke2_server/defaults/main.yml deleted file mode 100644 index ae927959..00000000 --- a/roles/rke2_server/defaults/main.yml +++ /dev/null @@ -1,2 +0,0 @@ ---- -kubernetes_api_server_host: "{{ hostvars[groups['rke2_servers'][0]].inventory_hostname }}" diff --git a/roles/rke2_server/tasks/add-pod-security-admission-config.yml b/roles/rke2_server/tasks/add-pod-security-admission-config.yml deleted file mode 100644 index 4b7a1937..00000000 --- a/roles/rke2_server/tasks/add-pod-security-admission-config.yml +++ /dev/null @@ -1,45 +0,0 @@ ---- -- name: Create the /etc/rancher/rke2 config dir - ansible.builtin.file: - path: /etc/rancher/rke2 - state: directory - recurse: yes - -- name: Add pod security admission config file - vars: - file_contents: "{{ lookup('file', pod_security_admission_config_file_path) }}" - ansible.builtin.template: - src: ansible_header.j2 - dest: "/etc/rancher/rke2/pod-security-admission-config.yaml" - mode: '0640' - owner: root - group: root - when: - - pod_security_admission_config_file_path is defined - - pod_security_admission_config_file_path|length != 0 - notify: Restart rke2-server - -- name: Remove pod security admission config file - when: - - pod_security_admission_config_file_path is not defined or pod_security_admission_config_file_path|length == 0 - block: - - name: Check that the PSA config file exists - ansible.builtin.stat: - path: "/etc/rancher/rke2/pod-security-admission-config.yaml" - register: stat_result - - - name: "Check that the PSA config file has ansible managed comments" - ansible.builtin.lineinfile: - name: "/etc/rancher/rke2/pod-security-admission-config.yaml" - line: '## This is an Ansible managed file, contents will be overwritten ##' - state: present - check_mode: yes - register: ansible_managed_check - when: stat_result.stat.exists | bool is true - - - name: Remove the PSA config file if exists and has ansible managed comments - ansible.builtin.file: - path: "/etc/rancher/rke2/pod-security-admission-config.yaml" - state: absent - when: - - ansible_managed_check.changed | bool is false diff --git a/roles/rke2_server/tasks/first_server.yml b/roles/rke2_server/tasks/first_server.yml deleted file mode 100644 index 0b71ea88..00000000 --- a/roles/rke2_server/tasks/first_server.yml +++ /dev/null @@ -1,63 +0,0 @@ ---- - -- name: Add manifest files - ansible.builtin.include_role: - name: rke2_common - tasks_from: add-manifest-addons.yml - when: - - manifest_config_file_path is defined - - manifest_config_file_path | length > 0 - -- name: Start rke2-server - ansible.builtin.systemd: - name: rke2-server - state: started - enabled: yes - -- name: Wait for k8s apiserver - ansible.builtin.wait_for: - host: localhost - port: "6443" - state: present - timeout: 300 - -- name: Wait for kubelet process to be present on host - ansible.builtin.command: >- - ps -C kubelet -F -ww --no-headers - register: kubelet_check - until: kubelet_check.rc == 0 - retries: 20 - delay: 10 - changed_when: false - -- name: Extract the hostname-override parameter from the kubelet process - ansible.builtin.set_fact: - kubelet_hostname_override_parameter: "{{ kubelet_check.stdout | \ - regex_search('\\s--hostname-override=((([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\\-]*[a-zA-Z0-9])\\.)*([A-Za-z0-9]|[A-Za-z0-9][A-Za-z0-9\\-]*[A-Za-z0-9]))\\s',\ - '\\1') }}" - -- name: Wait for node to show Ready status - ansible.builtin.command: >- - /var/lib/rancher/rke2/bin/kubectl --kubeconfig /etc/rancher/rke2/rke2.yaml - --server https://127.0.0.1:6443 get no {{ kubelet_hostname_override_parameter[0] }} - -o jsonpath='{.status.conditions[?(@.type=="Ready")].status}' - register: status_result - until: status_result.stdout.find("True") != -1 - retries: 20 - delay: 10 - changed_when: false - -- name: Add generated Token if none provided - block: - - name: Wait for node-token - ansible.builtin.wait_for: - path: /var/lib/rancher/rke2/server/node-token - - - name: Read node-token from master - ansible.builtin.slurp: - src: /var/lib/rancher/rke2/server/node-token - register: node_token - - - name: Store Master node-token - ansible.builtin.set_fact: - rke2_config_token: "{{ node_token.content | b64decode | regex_replace('\n', '') }}" diff --git a/roles/rke2_server/tasks/main.yml b/roles/rke2_server/tasks/main.yml deleted file mode 100644 index ef402d14..00000000 --- a/roles/rke2_server/tasks/main.yml +++ /dev/null @@ -1,22 +0,0 @@ ---- - -- name: RKE2 agent and server tasks - vars: - rke2_common_caller_role_name: server - ansible.builtin.include_role: - name: rke2_common - tasks_from: main - -- name: Include task file add-pod-security-admission-config.yml - ansible.builtin.include_tasks: add-pod-security-admission-config.yml - -- name: Setup initial server - ansible.builtin.include_tasks: first_server.yml - when: inventory_hostname in groups['rke2_servers'][0] - -- name: Setup other servers - ansible.builtin.include_tasks: other_servers.yml - when: inventory_hostname in groups['rke2_servers'][1:] - -- name: Configure Utilities - ansible.builtin.include_tasks: utilities.yml diff --git a/roles/rke2_server/tasks/other_servers.yml b/roles/rke2_server/tasks/other_servers.yml deleted file mode 100644 index c075b058..00000000 --- a/roles/rke2_server/tasks/other_servers.yml +++ /dev/null @@ -1,71 +0,0 @@ ---- - -- name: Does config file already have server token? # noqa command-instead-of-shell - ansible.builtin.command: 'grep -i "^token:" /etc/rancher/rke2/config.yaml' - register: server_token_check - failed_when: server_token_check.rc >= 2 - changed_when: false - -- name: Add token to config.yaml - ansible.builtin.lineinfile: - dest: /etc/rancher/rke2/config.yaml - line: "token: {{ hostvars[groups['rke2_servers'][0]].rke2_config_token }}" - state: present - insertbefore: BOF - when: - - '"token:" not in server_token_check.stdout' - -- name: Does config file already have server url? # noqa command-instead-of-shell - ansible.builtin.command: 'grep -i "^server:" /etc/rancher/rke2/config.yaml' - register: server_url_check - failed_when: server_url_check.rc >= 2 - changed_when: false - -- name: Add server url to config file - ansible.builtin.lineinfile: - dest: /etc/rancher/rke2/config.yaml - line: "server: https://{{ kubernetes_api_server_host }}:9345" - state: present - insertbefore: BOF - when: - - '"server:" not in server_url_check.stdout' - -- name: Start rke2-server - throttle: 1 - ansible.builtin.systemd: - name: rke2-server - state: started - enabled: yes - -- name: Wait for k8s apiserver reachability - ansible.builtin.wait_for: - host: "{{ kubernetes_api_server_host }}" - port: "6443" - state: present - timeout: 300 - -- name: Wait for kubelet process to be present on host - ansible.builtin.command: >- - ps -C kubelet -F -ww --no-headers - register: kubelet_check - until: kubelet_check.rc == 0 - retries: 20 - delay: 10 - changed_when: false - -- name: Extract the hostname-override parameter from the kubelet process - ansible.builtin.set_fact: - kubelet_hostname_override_parameter: "{{ kubelet_check.stdout | \ - regex_search('\\s--hostname-override=((([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\\-]*[a-zA-Z0-9])\\.)*([A-Za-z0-9]|[A-Za-z0-9][A-Za-z0-9\\-]*[A-Za-z0-9]))\\s',\ - '\\1') }}" - -- name: Wait for node to show Ready status - ansible.builtin.command: >- - /var/lib/rancher/rke2/bin/kubectl --kubeconfig /etc/rancher/rke2/rke2.yaml - --server https://127.0.0.1:6443 get no {{ kubelet_hostname_override_parameter[0] }} - -o jsonpath='{.status.conditions[?(@.type=="Ready")].status}' - register: status_result - until: status_result.stdout.find("True") != -1 - retries: 20 - delay: 10 - changed_when: false diff --git a/roles/rke2_server/vars/main.yml b/roles/rke2_server/vars/main.yml deleted file mode 100644 index ed97d539..00000000 --- a/roles/rke2_server/vars/main.yml +++ /dev/null @@ -1 +0,0 @@ ---- diff --git a/roles/testing/tasks/basic_tests.yml b/roles/testing/tasks/basic_tests.yml index 5eb79a40..d4ff5c5a 100644 --- a/roles/testing/tasks/basic_tests.yml +++ b/roles/testing/tasks/basic_tests.yml @@ -9,7 +9,7 @@ ansible.builtin.lineinfile: path: /etc/rancher/rke2/config.yaml line: "selinux: true" - check_mode: yes + check_mode: true register: test_is_selinux_true - name: Assertions diff --git a/sample_files/manifest/manifest-example.yaml b/sample_files/manifests/manifest-example.yaml similarity index 100% rename from sample_files/manifest/manifest-example.yaml rename to sample_files/manifests/manifest-example.yaml diff --git a/tarball_install/README.md b/sample_files/tarball_install/README.md similarity index 100% rename from tarball_install/README.md rename to sample_files/tarball_install/README.md diff --git a/site.yml b/site.yml index 0d555ce9..9d204c83 100644 --- a/site.yml +++ b/site.yml @@ -1,24 +1,8 @@ --- -- name: Server play - hosts: rke2_servers +- name: RKE2 play + hosts: all any_errors_fatal: true - become: true + # become: true roles: - - role: rke2_server - serial: 5 - -- name: Agent play - hosts: rke2_agents - any_errors_fatal: true - become: true - roles: - - role: rke2_agent - serial: 10 - -- name: Cluster manifest play - hosts: rke2_servers - any_errors_fatal: true - become: true - roles: - - role: cluster_manifest + - role: rke2 diff --git a/testing.yml b/testing.yml index 8e6c89be..57be9470 100644 --- a/testing.yml +++ b/testing.yml @@ -1,6 +1,6 @@ --- - name: Testing play hosts: all - become: yes + become: true roles: - role: testing