-
Notifications
You must be signed in to change notification settings - Fork 4
/
platform2024_vm_deploy_microk8s.yml
111 lines (101 loc) · 5.06 KB
/
platform2024_vm_deploy_microk8s.yml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
---
- name: Simple vm deploy microk8s single node #edit vmname variable - use -l filter to specify cluster vs. full inventory
hosts: edge
vars:
- vmname: microk8s-
- image_url:
- "https://cloud-images.ubuntu.com/jammy/current/jammy-server-cloudimg-amd64.img"
- image_path: "/Users/davedemlow/tmp/" #~/tmp/" #path to download file
connection: local
gather_facts: false
strategy: host_pinned # free #allows each cluster to start next task before all clusters have finished current task
environment: # if set here - hypercore modules will automatically use this for each remote cluster - avoiding need to specify cluster_instance for each test
SC_HOST: "https://{{ inventory_hostname }}"
SC_USERNAME: "{{ scale_user }}"
SC_PASSWORD: "{{ scale_pass }}"
roles:
- url2template
tasks:
- name: generate 5 digit random character password using lower case ans set as variable named ran5
ansible.builtin.set_fact:
ran5: "{{ lookup('password', '/dev/null chars=ascii_lowercase,digits length=5') }}"
- name: Clone and configure ad hoc "{{ vmname }} {{ran5}}"
scale_computing.hypercore.vm_clone:
vm_name: "{{ vmname }}{{ ran5 }}"
source_vm_name: "{{ image_name }}"
tags:
- platform2024
cloud_init:
user_data: |
#cloud-config
password: "password"
chpasswd: { expire: False }
ssh_pwauth: True
ssh_authorized_keys: # Add your ssh public key for publickey authentication
- ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIDihWWhjoPj8KVLtdLDwNJQ71zi9An0iUFjefRWu2Eju [email protected]
disable_root: false # allow ssh root login
ssh_pwauth: True
ssh_import_id: gh:ddemlow
packages: [snapd, qemu-guest-agent]
snap:
commands:
00: snap install microk8s
bootcmd:
- [ sh, -c, 'sudo echo GRUB_CMDLINE_LINUX="nomodeset" >> /etc/default/grub' ]
- [ sh, -c, 'sudo echo GRUB_GFXPAYLOAD_LINUX="1024x768" >> /etc/default/grub' ]
- [ sh, -c, 'sudo echo GRUB_DISABLE_LINUX_UUID=true >> /etc/default/grub' ]
- [ sh, -c, 'sudo update-grub' ]
runcmd:
- [ systemctl, restart, --no-block, qemu-guest-agent ]
- snap install microk8s --classic
# Add the default user to the microk8s group
- adduser ubuntu microk8s
- chown -f -R ubuntu ~/.kube
# Enable MicroK8s addons
- microk8s enable dns dashboard storage host-access
# Wait for MicroK8s to become available
- microk8s status --wait-ready
# Make kubectl available to the default user
- snap alias microk8s.kubectl kubectl
#helm repo add
- microk8s helm registry login --username='robot-edgelabs-clients' --password='LWb7bsa9msWphOy8brbXu6XJUlzXhfSV' registry.edgelabs.ai
- microk8s kubectl create secret docker-registry regcred --docker-server=registry.edgelabs.ai --docker-username=robot-edgelabs-clients --docker-password='LWb7bsa9msWphOy8brbXu6XJUlzXhfSV'
- microk8s helm install ai-sensor --set api.key='id1756317079|IWz602oCKaOuwBlhwEo8' oci://registry.edgelabs.ai/charts/ai-sensor
#install portainer agent and register
- PORTAINER_EDGE_ID=$(hostname)
- 'curl https://downloads.portainer.io/ee2-19/portainer-edge-agent-setup.sh | bash -s -- "$PORTAINER_EDGE_ID" "aHR0cHM6Ly8yMC44OC4yMi4yMjc6OTQ0M3wyMC44OC4yMi4yMjc6ODAwMHwydXByOUtuYTd6ZHBNWExNMm9meDNubHZEOHh2THpLVjN5WnlXM1lsWFdvPXww" "1" "" "EDGE_ASYNC=1,PORTAINER_GROUP=3"'
- echo "cloud-init complete" > /dev/tty
write_files:
- content: "{{ inventory_hostname }}"
path: /clusterip.txt
meta_data: |
dsmode: local
local-hostname: "{{ vmname }}{{ ran5 }}"
- name: Disk desired configuration for "{{ vmname }}{{ran5}}"
scale_computing.hypercore.vm_disk:
cluster_instance:
host: "https://{{inventory_hostname }}"
username: "{{scale_user}}"
password: "{{scale_pass}}"
vm_name: "{{ vmname }}{{ran5}}"
items:
- disk_slot: 0
type: virtio_disk
size: "{{ '300 GB' | human_to_bytes }}" # 50GB | human to bytes results in 53.7GB VSD in Hypercore
state: present
- name: Vm desired configuration and state for "{{ vmname }}{{ran5}}"
scale_computing.hypercore.vm_params:
cluster_instance:
host: "https://{{inventory_hostname }}"
username: "{{scale_user}}"
password: "{{scale_pass}}"
vm_name: "{{vmname}}{{ran5}}"
memory: "{{ '4 GB' | human_to_bytes }}"
description:
tags:
- platform2024
- "{{ site_name }}"
- ansible_group__microk8s # this will create tag used by hypercore inventory plugin when executing towards VM hosts
- SERIAL
vcpu: 8
power_state: start