diff --git a/hosts/binary-cache/configuration.nix b/hosts/binary-cache/configuration.nix index fb7f4b96..20268045 100644 --- a/hosts/binary-cache/configuration.nix +++ b/hosts/binary-cache/configuration.nix @@ -22,10 +22,6 @@ device = "/dev/disk/by-lun/10"; fsType = "ext4"; options = [ - # Due to https://github.com/hashicorp/terraform-provider-azurerm/issues/6117 - # disks get attached later during boot. - # The default of 90s doesn't seem to be sufficient. - "x-systemd.device-timeout=5min" "x-systemd.makefs" "x-systemd.growfs" ]; diff --git a/hosts/jenkins-controller/configuration.nix b/hosts/jenkins-controller/configuration.nix index 4d0c7566..30c12963 100644 --- a/hosts/jenkins-controller/configuration.nix +++ b/hosts/jenkins-controller/configuration.nix @@ -18,10 +18,6 @@ device = "/dev/disk/by-lun/10"; fsType = "ext4"; options = [ - # Due to https://github.com/hashicorp/terraform-provider-azurerm/issues/6117 - # disks get attached later during boot. - # The default of 90s doesn't seem to be sufficient. - "x-systemd.device-timeout=5min" "x-systemd.makefs" "x-systemd.growfs" ]; diff --git a/nix/devshell.nix b/nix/devshell.nix index ed5b777b..cd6c6060 100644 --- a/nix/devshell.nix +++ b/nix/devshell.nix @@ -24,6 +24,7 @@ p.azurerm p.external p.null + p.random p.sops ])) ]; diff --git a/terraform/jenkins/binary_cache.tf b/terraform/jenkins/binary_cache.tf index fa1a6280..7a26ee37 100644 --- a/terraform/jenkins/binary_cache.tf +++ b/terraform/jenkins/binary_cache.tf @@ -49,6 +49,17 @@ module "binary_cache_vm" { })]) subnet_id = azurerm_subnet.binary_cache.id + + # Attach disk to the VM + data_disks = [{ + name = azurerm_managed_disk.binary_cache_caddy_state.name + managed_disk_id = azurerm_managed_disk.binary_cache_caddy_state.id + virtual_machine_id = module.jenkins_controller_vm.virtual_machine_id + lun = "10" + create_option = "Attach" + caching = "None" + disk_size_gb = azurerm_managed_disk.binary_cache_caddy_state.disk_size_gb + }] } resource "azurerm_subnet" "binary_cache" { @@ -98,11 +109,3 @@ resource "azurerm_managed_disk" "binary_cache_caddy_state" { create_option = "Empty" disk_size_gb = 1 } - -# Attach to the VM -resource "azurerm_virtual_machine_data_disk_attachment" "binary_cache_vm_caddy_state" { - managed_disk_id = azurerm_managed_disk.binary_cache_caddy_state.id - virtual_machine_id = module.binary_cache_vm.virtual_machine_id - lun = "10" - caching = "None" -} diff --git a/terraform/jenkins/jenkins_controller.tf b/terraform/jenkins/jenkins_controller.tf index 1b780f5b..4d4a7a39 100644 --- a/terraform/jenkins/jenkins_controller.tf +++ b/terraform/jenkins/jenkins_controller.tf @@ -40,6 +40,16 @@ module "jenkins_controller_vm" { })]) subnet_id = azurerm_subnet.jenkins.id + + # Attach disk to the VM + data_disks = [{ + name = azurerm_managed_disk.jenkins_controller_jenkins_state.name + managed_disk_id = azurerm_managed_disk.jenkins_controller_jenkins_state.id + lun = "10" + # create_option = "Attach" + caching = "None" + disk_size_gb = azurerm_managed_disk.jenkins_controller_jenkins_state.disk_size_gb + }] } resource "azurerm_network_interface_security_group_association" "jenkins_controller_vm" { @@ -74,11 +84,3 @@ resource "azurerm_managed_disk" "jenkins_controller_jenkins_state" { create_option = "Empty" disk_size_gb = 10 } - -# Attach to the VM -resource "azurerm_virtual_machine_data_disk_attachment" "jenkins_controller_vm_jenkins_state" { - managed_disk_id = azurerm_managed_disk.jenkins_controller_jenkins_state.id - virtual_machine_id = module.jenkins_controller_vm.virtual_machine_id - lun = "10" - caching = "None" -} diff --git a/tf-modules/azurerm-linux-vm/README.md b/tf-modules/azurerm-linux-vm/README.md index 7dde4a7b..c0cc5aee 100644 --- a/tf-modules/azurerm-linux-vm/README.md +++ b/tf-modules/azurerm-linux-vm/README.md @@ -7,3 +7,13 @@ SPDX-License-Identifier: Apache-2.0 # azurerm-linux-vm Terraform module spinning up a Azure VM. + +This uses the `azurerm_virtual_machine` resource to spin up the VM, as it allows +data disks to be attached on boot. + +This is due to +https://github.com/hashicorp/terraform-provider-azurerm/issues/6117 +- with `azurerm_linux_virtual_machine` and +`azurerm_virtual_machine_data_disk_attachment` the disk only gets attached once +the VM is booted up, and the VM can't boot up if it waits for the data disk +to appear. diff --git a/tf-modules/azurerm-linux-vm/variables.tf b/tf-modules/azurerm-linux-vm/variables.tf index 4a6bd79c..3ae57201 100644 --- a/tf-modules/azurerm-linux-vm/variables.tf +++ b/tf-modules/azurerm-linux-vm/variables.tf @@ -32,3 +32,6 @@ variable "subnet_id" { description = "The subnet ID to attach to the VM and allocate an IP from" } +variable "data_disks" { + description = "List of dict containing keys of the storage_data_disk block" +} diff --git a/tf-modules/azurerm-linux-vm/virtual_machine.tf b/tf-modules/azurerm-linux-vm/virtual_machine.tf index 321168b9..45cecb22 100644 --- a/tf-modules/azurerm-linux-vm/virtual_machine.tf +++ b/tf-modules/azurerm-linux-vm/virtual_machine.tf @@ -2,46 +2,77 @@ # # SPDX-License-Identifier: Apache-2.0 -resource "azurerm_linux_virtual_machine" "main" { +resource "azurerm_virtual_machine" "main" { name = var.virtual_machine_name resource_group_name = var.resource_group_name location = var.location - size = var.virtual_machine_size + vm_size = var.virtual_machine_size - # Unused, but required by the API. May not be root either - admin_username = "foo" - admin_password = "S00persecret" - - # We *don't* support password auth, and this doesn't change anything. - # However, if we don't set this to false we need to - # specify additional pubkeys. - disable_password_authentication = false - # We can't use admin_ssh_key, as it only works for the admin_username. + delete_os_disk_on_termination = true + delete_data_disks_on_termination = false network_interface_ids = [azurerm_network_interface.default.id] - source_image_id = var.virtual_machine_source_image + + storage_image_reference { + id = var.virtual_machine_source_image + } identity { type = "SystemAssigned" } - # We only set custom_data here, not user_data. - # user_data is more recent, and allows updates without recreating the machine, - # but at least cloud-init 23.1.2 blocks boot if custom_data is not set. - # (It logs about not being able to mount /dev/sr0 to /metadata). - # This can be worked around by setting custom_data to a static placeholder, - # but user_data is still ignored. - # TODO: check this again with a more recent cloud-init version. - custom_data = (var.virtual_machine_custom_data == "") ? null : base64encode(var.virtual_machine_custom_data) + os_profile { + computer_name = var.virtual_machine_name + # Unused, but required by the API. May not be root either + admin_username = "foo" + admin_password = "S00persecret" + + # We only set custom_data here, not user_data. + # user_data is more recent, and allows updates without recreating the machine, + # but at least cloud-init 23.1.2 blocks boot if custom_data is not set. + # (It logs about not being able to mount /dev/sr0 to /metadata). + # This can be worked around by setting custom_data to a static placeholder, + # but user_data is still ignored. + # TODO: check this again with a more recent cloud-init version. + custom_data = (var.virtual_machine_custom_data == "") ? null : base64encode(var.virtual_machine_custom_data) + } + + os_profile_linux_config { + # We *don't* support password auth, and this doesn't change anything. + # However, if we don't set this to false we need to + # specify additional pubkeys. + disable_password_authentication = false + # We can't use admin_ssh_key, as it only works for the admin_username. + } - # Enable boot diagnostics, use the managed storage account to store them boot_diagnostics { - storage_account_uri = null + enabled = true + # azurerm_virtual_machine doesn't support the managed storage account + storage_uri = azurerm_storage_account.boot_diag.primary_blob_endpoint + } + + storage_os_disk { + name = "${var.virtual_machine_name}-osdisk" # needs to be unique + caching = "ReadWrite" + create_option = "FromImage" + managed_disk_type = "Standard_LRS" } - os_disk { - caching = "ReadWrite" - storage_account_type = "Standard_LRS" + dynamic "storage_data_disk" { + for_each = var.data_disks + + content { + # use lookup here, so keys can be set optionally + name = lookup(storage_data_disk.value, "name", null) + caching = lookup(storage_data_disk.value, "caching", null) + create_option = "Attach" + # This has to be passed, even for "Attach" + disk_size_gb = lookup(storage_data_disk.value, "disk_size_gb", null) + lun = lookup(storage_data_disk.value, "lun", null) + + managed_disk_type = lookup(storage_data_disk.value, "managed_disk_type", null) + managed_disk_id = lookup(storage_data_disk.value, "managed_disk_id", null) + } } } @@ -66,12 +97,28 @@ resource "azurerm_public_ip" "default" { allocation_method = "Static" } +# Create a random string, and a storage account using that random string. +resource "random_string" "boot_diag" { + length = "8" + special = "false" + upper = false +} + +resource "azurerm_storage_account" "boot_diag" { + name = "${random_string.boot_diag.result}bootdiag" + resource_group_name = var.resource_group_name + location = var.location + account_tier = "Standard" + account_replication_type = "GRS" +} + + output "virtual_machine_id" { - value = azurerm_linux_virtual_machine.main.id + value = azurerm_virtual_machine.main.id } output "virtual_machine_identity_principal_id" { - value = azurerm_linux_virtual_machine.main.identity[0].principal_id + value = azurerm_virtual_machine.main.identity[0].principal_id } output "virtual_machine_network_interface_id" {