published: 14th of February 2024
As part of my home lab rebuild journey, I am commited to building as many of the components using Infrastructure as Code (IaC) as possible. I am running Proxmox as my hypervisor, which supports bootstrapping VMs via cloud-init. This allows you to apply configuration on first-boot to virtual machines.
In this post, I will show you how to use Terraform and cloud-init to bootstrap virutal machines on Proxmox and join them to your SaltStack environment.
The following software versions were used in this post.
I created a Terraform module to manage the cloud-init virtual machines. The module is called cloud-init-vm and is located in the modules directory.
The directory structure of the project is shown below.
.
├── cloud-init-config/
├── main.tf
├── modules/
│ └── cloud-init-vm/
│ ├── main.tf
│ ├── outputs.tf
│ ├── providers.tf
│ ├── scripts/
│ │ ├── salt-master-cloud-config.yaml
│ │ ├── salt-minion-cloud-config.yaml
│ │ └── vyos-salt-minion-cloud-config.yaml
│ └── variables.tf
├── provider.tf
└── variables.tf
Define the Proxmox provider details in the provider.tf file.
terraform {
required_providers {
proxmox = {
source = "Telmate/proxmox"
version = "2.9.14"
}
}
}
provider "proxmox" {
pm_api_url = "https://${var.proxmox_host}:8006/api2/json"
pm_tls_insecure = true
}
We use the Terraform template_file resource to build a cloud-init file for each virtual machine. The template files are shown later in this post.
data "template_file" "cloud_init" {
for_each = var.cloud_init_virtual_machines
template = file("${path.module}/scripts/${each.value.cloud_init_file}")
vars = {
hostname = each.value.hostname
domain = var.domain
salt_repo_source = var.salt_repo_source
salt_repo_filename = var.salt_repo_filename
salt_repo_gpg_key = var.salt_repo_gpg_key
salt_master_ipv4_addr = var.salt_master_ipv4_addr
salt_config_repo = var.salt_config_repo
saltpx_ssh_key = var.saltpx_ssh_key
saltpx_ssh_key_priv = var.saltpx_ssh_key_priv
saltdeploy_ssh_key_priv = var.saltdeploy_ssh_key_priv
saltdeploy_ssh_key_priv_name = var.saltdeploy_ssh_key_priv_name
admin_ssh_key = var.admin_ssh_key
}
}
Next we use the local_file resource to create a file rendered from the previous template_file resource.
Rendered files are stored in the cloud-init-config directory, which is located in the root of the Terraform project.
# Create a local copy of the file, to transfer to Proxmox
resource "local_file" "cloud_init" {
for_each = var.cloud_init_virtual_machines
content = data.template_file.cloud_init[each.key].rendered
filename = "./cloud-init-config/user_data_cloud_init_${each.value.hostname}.cfg"
}
With the null_resource we use ssh and the file provisioner to transfer the cloud-init files to the Proxmox host that the VM is deployed onto.
# Transfer the file to the Proxmox Host
resource "null_resource" "cloud_init" {
for_each = var.cloud_init_virtual_machines
connection {
type = "ssh"
user = var.proxmox_user
private_key = var.proxmox_user_key_priv
host = var.proxmox_host_ipv4_addrs[each.value.target_node]
}
provisioner "file" {
source = local_file.cloud_init[each.key].filename
destination = "/var/lib/vz/snippets/cloud_init_${each.value.hostname}.yml"
}
}
With the Proxmox provider, you can create VM's by cloning a VM template. I name my templates ubuntu-x and vyos-x, where x is the version number. This allows me to have a different resource definition for each VM type with parameters specific to each type.
Ubuntu VM's use the following resource definition.
resource "proxmox_vm_qemu" "ubuntu_virtual_machines" {
for_each = { for key, value in var.cloud_init_virtual_machines : key => value if startswith(value.vm_template, "ubuntu") == true }
name = each.value.hostname
target_node = each.value.target_node
clone = each.value.vm_template
agent = 1
os_type = "cloud-init"
cores = each.value.cpu_cores
sockets = each.value.cpu_sockets
cpu = "host"
memory = each.value.memory
scsihw = "virtio-scsi-pci"
bootdisk = "scsi0"
qemu_os = each.value.qemu_os
onboot = each.value.onboot
disk {
slot = 0
size = each.value.hdd_size
type = "scsi"
storage = each.value.hdd_storage
iothread = 0
}
network {
model = "virtio"
bridge = "vmbr0"
tag = each.value.vlan_tag
}
# Cloud Init
ipconfig0 = "ip=${each.value.ip_address},gw=${each.value.gateway}"
cicustom = "user=local:snippets/cloud_init_${each.value.hostname}.yml"
}
Vyos VM's use the following resource definition.
resource "proxmox_vm_qemu" "vyos_virtual_machines" {
for_each = { for key, value in var.cloud_init_virtual_machines : key => value if startswith(value.vm_template, "vyos") == true }
name = each.value.hostname
target_node = each.value.target_node
clone = each.value.vm_template
agent = 1
os_type = "cloud-init"
cores = each.value.cpu_cores
sockets = each.value.cpu_sockets
cpu = "host"
memory = each.value.memory
scsihw = "virtio-scsi-pci"
bootdisk = "scsi0"
qemu_os = each.value.qemu_os
onboot = each.value.onboot
disk {
slot = 0
size = each.value.hdd_size
type = "scsi"
storage = each.value.hdd_storage
iothread = 0
}
network {
model = "virtio"
bridge = "vmbr0"
tag = each.value.vlan_tag
}
network {
model = "virtio"
bridge = "vmbr0"
tag = 52
}
network {
model = "virtio"
bridge = "vmbr0"
tag = 54
}
network {
model = "virtio"
bridge = "vmbr0"
tag = 56
}
network {
model = "virtio"
bridge = "vmbr0"
tag = 58
}
network {
model = "virtio"
bridge = "vmbr0"
tag = 60
}
network {
model = "virtio"
bridge = "vmbr0"
tag = 62
}
network {
model = "virtio"
bridge = "vmbr0"
tag = 64
}
network {
model = "virtio"
bridge = "vmbr0"
tag = 66
}
network {
model = "virtio"
bridge = "vmbr0"
tag = 666
}
# Cloud Init
ipconfig0 = "ip=${each.value.ip_address},gw=${each.value.gateway}"
cicustom = "user=local:snippets/cloud_init_${each.value.hostname}.yml"
}
One of the main goals of this project is to use SaltStack to manage my infrastructure. Therefore, I am using cloud-init to bootstrap the Salt master and minions. This allows me to spin up a VM, and have it configured automagicly by SaltStack.
The following cloud-init templates are used by the previous Terraform resources to create the actual cloud-init files.
First up, we have the Salt master cloud-init template. Here we install the dependencies for SaltStack, and configure the Salt master and minion. Im also setting up SSH to allow the Salt master to pull the Salt configuration from a private git repository.
#cloud-config
datasource:
NoCloud:
meta-data:
local-hostname: "${hostname}"
package_update: true
package_upgrade: true
apt:
sources:
salt.list:
source: "${salt_repo_source}"
filename: "${salt_repo_filename}"
key: |
${salt_repo_gpg_key}
packages:
- "salt-master"
- "salt-minion"
- "salt-api"
users:
- name: "admin"
groups: "sudo"
shell: "/bin/bash"
sudo: ["ALL=(ALL) NOPASSWD:ALL"]
ssh_authorized_keys:
- ${admin_ssh_key}
write_files:
- path: /root/.ssh/id_rsa_saltdeploy
owner: "root:root"
permissions: 0600
encoding: b64
content: ${saltdeploy_ssh_key_priv}
stopsalt:
- &stop_salt |
systemctl stop salt-master && systemctl stop salt-minion && systemctl stop salt-api
confighost:
- &config_host |
hostnamectl set-hostname "${hostname}.${domain}" > /etc/hostname
echo ${salt_master_ipv4_addr} salt-master.${domain} salt-master >> /etc/hosts
configssh:
- &config_ssh |
cat <<EOT | tee /root/.ssh/config
Host github.com
User git
Hostname github.com
IdentityFile /root/.ssh/${saltdeploy_ssh_key_priv_name}
EOT
chmod 0600 /root/.ssh/config
cat <<EOT | tee /root/.ssh/known_hosts
|1|TOFVhxFepwQNim6g0eyGgZdisXw=|E9sqgrzqOsVNuPw8VZnyxmGjrEM= ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIOMqqnkVzrm0SdG6UOoqKLsabgH5C9okWi0dh2l9GKJl
|1|hQbBNpkvKU5oDWG22dUZBCLyXWI=|9D/gTABu7UICDBr0SHpG8fbAi08= ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQCj7ndNxQowgcQnjshcLrqPEiiphnt+VTTvDP6mHBL9j1aNUkY4Ue1gvwnGLVlOhGeYrnZaMgRK6+PKCUXaDbC7qtbW8gIkhL7aGCsOr/C56SJMy/BCZfxd1nWzAOxSDPgVsmerOBYfNqltV9/hWCqBywINIR+5dIg6JTJ72pcEpEjcYgXkE2YEFXV1JHnsKgbLWNlhScqb2UmyRkQyytRLtL+38TGxkxCflmO+5Z8CSSNY7GidjMIZ7Q4zMjA2n1nGrlTDkzwDCsw+wqFPGQA179cnfGWOWRVruj16z6XyvxvjJwbz0wQZ75XK5tKSb7FNyeIEs4TT4jk+S4dhPeAUC5y+bDYirYgM4GC7uEnztnZyaVWQ7B381AK4Qdrwt51ZqExKbQpTUNn+EjqoTwvqNj4kqx5QUCI0ThS/YkOxJCXmPUWZbhjpCg56i+2aB6CmK2JGhn57K5mj0MNdBXA4/WnwH6XoPWJzK5Nyu2zB3nAZp+S5hpQs+p1vN1/wsjk=
|1|fevXg4VHnsePvXSEHEVrPkWIBsI=|uIA/ACObGuRNgxe0WLe4iVdzc9c= ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBEmKSENjQEezOmxkZMy7opKgwFB9nkt5YRrYMjNuG5N87uRgg6CLrbo5wAdT/y6v0mKV0U2w0WZ2YB/++Tpockg=
EOT
chmod 0600 /root/.ssh/known_hosts
clonegitrepo:
- &clone_git_repo |
git clone ${salt_config_repo} /srv/salt-config
configsaltmaster:
- &config_salt_master |
rm /etc/salt/master
cat <<EOT | tee /etc/salt/master
user: "root"
auto_accept: True
file_roots:
base:
- "/srv/salt-config/"
- "/srv/salt-config/states"
- "/srv/salt-config/templates"
- "/srv/salt-config/files"
- "/srv/salt-config/data"
pillar_roots:
base:
- "/srv/salt-config/pillar"
EOT
configsaltminion:
- &config_salt_minion |
rm /etc/salt/minion
cat <<EOT | tee /etc/salt/minion
master: "salt-master.${domain}"
id: "${hostname}"
startup_states: "highstate"
log_level_logfile: "info"
EOT
pythonsaltdependencies:
- &python_salt_dependencies |
/usr/bin/salt-pip install python-dateutil croniter
restartsalt:
- &restart_salt |
systemctl restart salt-master && systemctl restart salt-minion && systemctl restart salt-api
runcmd:
- [ sh, -c, *stop_salt ]
- [ sh, -c, *config_host ]
- [ sh, -c, *config_ssh ]
- [ sh, -c, *clone_git_repo ]
- [ sh, -c, *config_salt_master ]
- [ sh, -c, *config_salt_minion ]
- [ sh, -c, *python_salt_dependencies ]
- [ sh, -c, *restart_salt ]
Next up we have the Salt minion cloud-init template for Ubuntu VMs. Here we install the Salt minion, and configure it to connect to the Salt master.
#cloud-config
datasource:
NoCloud:
meta-data:
local-hostname: "${hostname}"
package_update: true
package_upgrade: true
apt:
sources:
salt.list:
source: "${salt_repo_source}"
filename: "${salt_repo_filename}"
key: |
${salt_repo_gpg_key}
packages:
- "salt-minion"
users:
- name: "admin"
groups: "sudo"
shell: "/bin/bash"
sudo: ["ALL=(ALL) NOPASSWD:ALL"]
ssh_authorized_keys:
- ${admin_ssh_key}
write_files:
- path: /root/.ssh/id_rsa_saltpx
owner: "root:root"
permissions: 0600
encoding: b64
content: ${saltpx_ssh_key_priv}
stopsalt:
- &stop_salt |
systemctl stop salt-minion
confighost:
- &config_host |
hostnamectl set-hostname "${hostname}.${domain}" > /etc/hostname
echo ${salt_master_ipv4_addr} salt-master.${domain} salt-master >> /etc/hosts
configsaltminion:
- &config_salt_minion |
rm /etc/salt/minion
cat <<EOT | tee /etc/salt/minion
master: "salt-master.${domain}"
id: "${hostname}"
startup_states: "highstate"
log_level_logfile: "info"
EOT
configsaltproxy:
- &config_salt_proxy |
rm /etc/salt/proxy
cat <<EOT | tee /etc/salt/proxy
master: "salt-master.${domain}"
multiprocessing: False
log_level_logfile: "info"
EOT
restartsalt:
- &restart_salt |
systemctl restart salt-minion
runcmd:
- [ sh, -c, *stop_salt ]
- [ sh, -c, *config_host ]
- [ sh, -c, *config_salt_minion ]
- [ sh, -c, *config_salt_proxy ]
- [ sh, -c, *restart_salt ]
The Salt minion cloud-init template for Vyos VMs is slightly different. Vyos only accpets a subset of the cloud-init configs. In VyOS the Salt minion runs as the minion user but it has no home directory. I create a home directory for the minion user as I save configurations here for the Salt minion to use.
I also create the salt-px user, which is used by the netmiko proxy to run operational commands.
#cloud-config
users:
- name: "admin"
groups: "sudo"
shell: "/bin/vbash"
sudo: ["ALL=(ALL) NOPASSWD:ALL"]
ssh_authorized_keys:
- ${admin_ssh_key}
- name: "saltpx"
shell: "/bin/vbash"
ssh_authorized_keys:
- ${saltpx_ssh_key}
write_files:
- path: "/opt/vyatta/etc/config/scripts/vyos-postconfig-bootup.script"
owner: "root:vyattacfg"
permissions: 0775
content: |
#! /bin/vbash
systemctl stop salt-minion
usermod -d /home/minion minion
mkdir -p /home/minion/
chown minion:users /home/minion/
usermod --shell /bin/vbash minion
systemctl start salt-minion
vyos_config_commands:
- set system host-name '${hostname}'
- set system domain-name '${domain}'
- set service salt-minion id '${hostname}'
- set system static-host-mapping host-name salt-master.${domain} inet '${salt_master_ipv4_addr}'
- set service salt-minion master 'salt-master.${domain}'
The following variables are used by the cloud-init module.
variable "domain" {
type = string
}
variable "proxmox_host" {
type = string
}
variable "proxmox_host_ipv4_addrs" {
type = map(string)
}
variable "proxmox_user" {
type = string
}
variable "proxmox_user_key_priv" {
type = string
}
variable "saltpx_ssh_key" {
type = string
}
variable "saltpx_ssh_key_priv" {
type = string
}
variable "saltdeploy_ssh_key_priv" {
type = string
}
variable "admin_ssh_key" {
type = string
}
variable "salt_repo_filename" {
type = string
}
variable "salt_repo_source" {
type = string
}
variable "salt_repo_gpg_key" {
type = string
}
variable "saltdeploy_ssh_key_priv_name" {
type = string
}
variable "salt_config_repo" {
type = string
}
variable "salt_master_ipv4_addr" {
type = string
}
variable "cloud_init_virtual_machines" {
type = map(object({
hostname = string
ip_address = string
gateway = string
vlan_tag = number
target_node = string
cpu_cores = number
cpu_sockets = number
memory = string
qemu_os = string
hdd_size = string
hdd_storage = string
onboot = bool
vm_template = string
cloud_init_file = string
}))
}
With the cloud-init module doing most of the heavy lifting, our root module is pretty simple.
The main.tf file declares a module that is used to call the cloud-init module with all the required variables.
# Source the Cloud Init Config file
module "vyos_vms" {
source = "./modules/cloud-init-vm"
domain = var.domain
proxmox_host = var.proxmox_host
proxmox_host_ipv4_addrs = var.proxmox_host_ipv4_addrs
proxmox_user = var.proxmox_user
proxmox_user_key_priv = file("~/.ssh/id_rsa")
saltpx_ssh_key = file("~/.ssh/id_rsa_saltpx.pub")
saltpx_ssh_key_priv = filebase64("~/.ssh/id_rsa_saltpx")
saltdeploy_ssh_key_priv = filebase64("~/.ssh/id_rsa_saltdeploy")
admin_ssh_key = file("~/.ssh/id_rsa.pub")
salt_repo_filename = var.salt_repo_filename
salt_repo_source = var.salt_repo_source
salt_repo_gpg_key = var.salt_repo_gpg_key
saltdeploy_ssh_key_priv_name = var.saltdeploy_ssh_key_priv_name
salt_config_repo = var.salt_config_repo
salt_master_ipv4_addr = var.salt_master_ipv4_addr
cloud_init_virtual_machines = var.cloud_init_virtual_machines
}
In the variables.tf file we define the required variables used in the main.tf module as well as the virtual machines we want to create.
variable "proxmox_host" {
default = "10.100.52.11"
}
variable "proxmox_host_ipv4_addrs" {
type = map(string)
default = {
pmx01 = "10.100.52.11"
pmx02 = "10.100.52.12"
pmx03 = "10.100.52.13"
}
}
variable "proxmox_user" {
default = "root"
}
variable "domain" {
default = "krazy.house"
}
variable "saltdeploy_ssh_key_priv_name" {
default = "id_rsa_saltdeploy"
}
variable "salt_config_repo" {
default = "git@github.com:<github-user>/salt-config.git"
}
variable "salt_master_ipv4_addr" {
default = "10.100.54.16"
}
variable "salt_repo_filename" {
default = "salt.list"
}
variable "salt_repo_source" {
default = "deb https://repo.saltproject.io/salt/py3/ubuntu/22.04/amd64/latest jammy main"
}
# The indentation of this looks weird. However, it is correct.
# After the template is rendered, the key will have the correct indentation.
variable "salt_repo_gpg_key" {
default = <<EOT
-----BEGIN PGP PUBLIC KEY BLOCK-----
mQGNBGPazmABDAC6qc2st6/Uh/5AL325OB5+Z1XMFM2HhQNjB/VcYbLvcCx9AXsU
eaEmNPm6OY3p5+j8omjpXPYSU7DUQ0lIutuAtwkDMROH7uH/r9IY7iu88S6w3q89
bgbnqhu4mrSik2RNH2NqEiJkylz5rwj4F387y+UGH3aXIGryr+Lux9WxfqoRRX7J
WCf6KOaduLSp9lF4qdpAb4/Z5yExXtQRA9HULSJZqNVhfhWInTkVPw+vUo/P9AYv
mJVv6HRNlTb4HCnl6AZGcAYv66J7iWukavmYKxuIbdn4gBJwE0shU9SaP70dh/LT
WqIUuGRZBVH/LCuVGzglGYDh2iiOvR7YRMKf26/9xlR0SpeU/B1g6tRu3p+7OgjA
vJFws+bGSPed07asam3mRZ0Y9QLCXMouWhQZQpx7Or1pUl5Wljhe2W84MfW+Ph6T
yUm/j0yRlZJ750rGfDKA5gKIlTUXr+nTvsK3nnRiHGH2zwrC1BkPG8K6MLRluU/J
ChgZo72AOpVNq9MAEQEAAbQ5U2FsdCBQcm9qZWN0IFBhY2thZ2luZyA8c2FsdHBy
b2plY3QtcGFja2FnaW5nQHZtd2FyZS5jb20+iQHSBBMBCAA8FiEEEIV//dP5Hq5X
eiHWZMu8gXPXaz8FAmPazmACGwMFCwkIBwIDIgIBBhUKCQgLAgQWAgMBAh4HAheA
AAoJEGTLvIFz12s/yf0L/jyP/LfduA4DwpjKX9Vpk26tgis9Q0I54UerpD5ibpTA
krzZxK1yFOPddcOjo+Xqg+I8aA+0nJkf+vsfnRgcpLs2qHZkikwZbPduZwkNUHX7
6YPSXTwyFlzhaRycwPtvBPLFjfmjjjTi/aH4V/frfxfjH/wFvH/xiaiFsYbP3aAP
sJNTLh3im480ugQ7P54ukdte2QHKsjJ3z4tkjnu1ogc1+ZLCSZVDxfR4gLfE6GsN
YFNd+LF7+NtAeJRuJceXIisj8mTQYg+esTF9QtWovdg7vHVPz8mmcsrG9shGr+G9
iwwtCig+hAGtXFAuODRMur9QfPlP6FhJw0FX/36iJ2p6APZB0EGqn7LJ91EyOnWv
iRimLLvlGFiVB9Xxw1TxnQMNj9jmB1CA4oNqlromO/AA0ryh13TpcIo5gbn6Jcdc
fD4Rbj5k+2HhJTkQ78GpZ0q95P08XD2dlaM2QxxKQGqADJOdV2VgjB2NDXURkInq
6pdkcaRgAKme8b+xjCcVjLkBjQRj2s5gAQwAxmgflHInM8oKQnsXezG5etLmaUsS
EkV5jjQFCShNn9zJEF/PWJk5Df/mbODj02wyc749dSJbRlTY3LgGz1AeywOsM1oQ
XkhfRZZqMwqvfx8IkEPjMvGIv/UI9pqqg/TY7OiYLEDahYXHJDKmlnmCBlnU96cL
yh7a/xY3ZC20/JwbFVAFzD4biWOrAm1YPpdKbqCPclpvRP9N6nb6hxvKKmDo7MqS
uANZMaoqhvnGazt9n435GQkYRvtqmqmOvt8I4oCzV0Y39HfbCHhhy64HSIowKYE7
YWIujJcfoIDQqq2378T631BxLEUPaoSOV4B8gk/Jbf3KVu4LNqJive7chR8F1C2k
eeAKpaf2CSAe7OrbAfWysHRZ060bSJzRk3COEACk/UURY+RlIwh+LQxEKb1YQueS
YGjxIjV1X7ScyOvam5CmqOd4do9psOS7MHcQNeUbhnjm0TyGT9DF8ELoE0NSYa+J
PvDGHo51M33s31RUO4TtJnU5xSRb2sOKzIuBABEBAAGJAbYEGAEIACAWIQQQhX/9
0/kerld6IdZky7yBc9drPwUCY9rOYAIbDAAKCRBky7yBc9drP8ctC/9wGi01cBAW
BPEKEnfrKdvlsaLeRxotriupDqGSWxqVxBVd+n0Xs0zPB/kuZFTkHOHpbAWkhPr+
hP+RJemxCKMCo7kT2FXVR1OYej8Vh+aYWZ5lw6dJGtgo3Ebib2VSKdasmIOI2CY/
03G46jv05qK3fP6phz+RaX+9hHgh1XW9kKbdkX5lM9RQSZOof3/67IN8w+euy61O
UhNcrsDKrp0kZxw3S+b/02oP1qADXHz2BUerkCZa4RVK1pM0UfRUooOHiEdUxKKM
DE501hwQsMH7WuvlIR8Oc2UGkEtzgukhmhpQPSsVPg54y9US+LkpztM+yq+zRu33
gAfssli0MvSmkbcTDD22PGbgPMseyYxfw7vuwmjdqvi9Z4jdln2gyZ6sSZdgUMYW
PGEjZDoMzsZx9Zx6SO9XCS7XgYHVc8/B2LGSxj+rpZ6lBbywH88lNnrm/SpQB74U
4QVLffuw76FanTH6advqdWIqtlWPoAQcEkKf5CdmfT2ei2wX1QLatTs=
=ZKPF
-----END PGP PUBLIC KEY BLOCK-----
EOT
}
variable "cloud_init_virtual_machines" {
default = {
"rfw01" = {
hostname = "rfw01"
ip_address = "10.100.50.2/23"
gateway = "10.100.51.254"
vlan_tag = 50
target_node = "pmx01"
cpu_cores = 2
cpu_sockets = 1
memory = "4096"
qemu_os = "l26"
hdd_size = "20G"
hdd_storage = "local-lvm"
onboot = true
vm_template = "vyos-1-5-rolling-202401180024-cloud-init"
cloud_init_file = "vyos-salt-minion-cloud-config.yaml"
}
"slt01" = {
hostname = "slt01"
ip_address = "10.100.54.16/23"
gateway = "10.100.54.1"
vlan_tag = 54
target_node = "pmx02"
cpu_cores = 2
cpu_sockets = 1
memory = "2048"
qemu_os = "l26"
hdd_size = "40G"
hdd_storage = "zfs02"
onboot = true
vm_template = "ubuntu-2204-cloud-init"
cloud_init_file = "salt-master-cloud-config.yaml"
}
"slt02" = {
hostname = "slt02"
ip_address = "10.100.54.17/23"
gateway = "10.100.54.1"
vlan_tag = 54
target_node = "pmx03"
cpu_cores = 2
cpu_sockets = 1
memory = "2048"
qemu_os = "l26"
hdd_size = "40G"
hdd_storage = "zfs03"
onboot = true
vm_template = "ubuntu-2204-cloud-init"
cloud_init_file = "salt-minion-cloud-config.yaml"
}
}
}
With all that out of the way, we can now create our VM's with the terraform apply and watch the magic happen.
In this post, we stitched together the pieces to deploy VM's on Proxmox with Terraform and configure them on boot with cloud-init. As part of the cloud-init configuration, we setup SaltStack to manage the VM's.
Thanks for tunning in. Stay weird ✌️
https://www.golinuxcloud.com/customize-cloud-init-user-data-ubuntu/
https://yetiops.net/posts/proxmox-terraform-cloudinit-saltstack-prometheus/