42 KiB
ls /etc/pve/qemu-server
cat /etc/pve/qemu-server/500.conf
vi /etc/pve/qemu-server/500.conf
VM 500 (Debian 12):
agent: 1
balloon: 1024
bios: ovmf
boot: order=scsi0;net0
cores: 4
cpu: host,hidden=1,flags=+pcid
kvm: 1
machine: q35
memory: 4096
name: debian-12
net0: virtio=BC:24:11:85:09:34,bridge=vmbr0,firewall=1,queues=4
numa: 0
onboot: 1
ostype: l26
scsi0: zfs-disk0:vm-500-disk-0,size=64G,ssd=1,discard=on,iothread=1,cache=none,format=qcow2
scsihw: virtio-scsi-single
serial0: socket
smbios1: uuid=7ccd0747-63bb-4626-b971-5f0ea27a3692
sockets: 1
tablet: 1
vga: serial0,type=qxl
vmgenid: 137fbfab-cc44-4246-a558-67369061819b
Test VDI
agent: 1
balloon: 1024
bios: ovmf
boot: order=scsi0;net0
cores: 4
cpu: host,hidden=1,flags=+pcid
kvm: 1
memory: 4096
meta: creation-qemu=8.1.5,ctime=1714341537
name: remote-0
net0: virtio=BC:24:11:3C:B4:65,bridge=vmbr0,firewall=1
numa: 0
onboot: 1
ostype: l26
scsi0: zfs-disk0:vm-500-disk-0,discard=on,iothread=1,size=32G
scsihw: virtio-scsi-single
serial0: socket
smbios1: uuid=599b304c-31b5-4ff8-a443-2c78c5b8fa25
sockets: 1
tablet: 1
vga: serial0,type=qxl
tpmstate0: local-lvm:vm-500-disk-0,size=4M,version=v2.0
vmgenid: ad9c43fe-6abb-4085-8e43-2da12435abeb
VM 600 (Ubuntu 22.04):
agent: 1
balloon: 2048
bios: ovmf
boot: order=scsi0;net0
cores: 8
cpu: host,hidden=1,flags=+pcid
kvm: 1
machine: q35
memory: 8192
name: ubuntu-22-04
net0: virtio=BC:24:11:85:09:35,bridge=vmbr0,firewall=1,queues=8
net1: virtio=BC:24:11:3B:2E:95,bridge=vmbr1,firewall=1,queues=8
numa: 1
numa0: memory=4096,hostnodes=0,cpus=0-3
numa1: memory=4096,hostnodes=1,cpus=4-7
onboot: 1
ostype: l26
scsi0: zfs-disk0:vm-600-disk-0,size=128G,ssd=1,discard=on,iothread=1,cache=none,format=qcow2
scsihw: virtio-scsi-single
serial0: socket
smbios1: uuid=7ccd0747-63bb-4626-b971-5f0ea27a3693
sockets: 1
tablet: 1
vga: serial0,type=qxl
vmgenid: 137fbfab-cc44-4246-a558-67369061819b
VM 700 (Rocky Linux 9):
agent: 1
balloon: 1536
bios: ovmf
boot: order=scsi0;net0
cores: 6
cpu: host,hidden=1,flags=+pcid
kvm: 1
machine: q35
memory: 6144
name: rocky-linux-9
net0: virtio=BC:24:11:85:09:36,bridge=vmbr0,firewall=1,queues=6
net1: virtio=BC:24:11:3B:2E:96,bridge=vmbr1,firewall=1,queues=6
numa: 0
onboot: 1
ostype: l26
scsi0: zfs-disk0:vm-700-disk-0,size=96G,ssd=1,discard=on,iothread=1,cache=none,format=qcow2
scsihw: virtio-scsi-single
serial0: socket
smbios1: uuid=7ccd0747-63bb-4626-b971-5f0ea27a3694
sockets: 1
tablet: 1
vga: serial0,type=qxl
vmgenid: 137fbfab-cc44-4246-a558-67369061819c
VM 800 (Windows 10 VDI):
agent: 1
balloon: 2048
bios: ovmf
boot: order=scsi0;net0
cores: 4
cpu: host,hidden=1,flags=+pcid,hv-vendor-id=microsoft
kvm: 1
machine: q35
memory: 8192
name: windows-10-vdi
net0: virtio=BC:24:11:85:09:37,bridge=vmbr0,firewall=1,queues=4
numa: 0
onboot: 1
ostype: win10
scsi0: zfs-disk0:vm-800-disk-0,size=128G,ssd=1,discard=on,iothread=1,cache=none,format=qcow2
scsihw: virtio-scsi-single
serial0: socket
smbios1: uuid=7ccd0747-63bb-4626-b971-5f0ea27a3695
sockets: 1
tablet: 1
vga: serial0,type=qxl
vmgenid: 137fbfab-cc44-4246-a558-67369061819d
VM 810 (Windows 7 VDI for General Office Use):
agent: 1
balloon: 1024
bios: ovmf
boot: order=scsi0;ide2;net0
cores: 2
cpu: host,hidden=1,flags=+pcid,hv-vendor-id=microsoft
ide2: zfs-disk0:iso/Win7_Pro_SP1_English_x64.iso,media=cdrom,size=4G
kvm: 1
machine: q35
memory: 4096
name: windows-7-vdi
net0: virtio=BC:24:11:85:09:38,bridge=vmbr0,firewall=1,queues=2
numa: 0
onboot: 1
ostype: win7
scsi0: zfs-disk0:vm-810-disk-0,size=64G,ssd=1,discard=on,iothread=1,cache=none,format=qcow2
scsihw: virtio-scsi-single
serial0: socket
smbios1: uuid=7ccd0747-63bb-4626-b971-5f0ea27a3696
sockets: 1
tablet: 1
vga: serial0,type=qxl
vmgenid: 137fbfab-cc44-4246-a558-67369061819e
VM 860 (Windows Server 2016 for Active Directory):
agent: 1
balloon: 2048
bios: ovmf
boot: order=scsi0;net0
cores: 4
cpu: host,hidden=1,flags=+pcid,hv-vendor-id=microsoft
kvm: 1
machine: q35
memory: 8192
name: windows-server-2016-ad
net0: virtio=BC:24:11:85:09:39,bridge=vmbr0,firewall=1,queues=4
net1: virtio=BC:24:11:3B:2E:97,bridge=vmbr1,firewall=1,queues=4
numa: 0
onboot: 1
ostype: win2016
scsi0: zfs-disk0:vm-860-disk-0,size=128G,ssd=1,discard=on,iothread=1,cache=none,format=qcow2
scsihw: virtio-scsi-single
serial0: socket
smbios1: uuid=7ccd0747-63bb-4626-b971-5f0ea27a3697
sockets: 1
tablet: 1
vga: serial0,type=qxl
vmgenid: 137fbfab-cc44-4246-a558-67369061819f
VM 870 (Windows 10 Gaming with GPU Passthrough):
agent: 1
args: -cpu 'host,+kvm_pv_unhalt,+kvm_pv_eoi,hv_vendor_id=NV43FIX,kvm=off'
balloon: 1024
bios: ovmf
boot: order=scsi0;net0
cores: 8
cpu: host,hidden=1,flags=+pcid,hv-vendor-id=nvidia
efidisk0: zfs-disk0:vm-870-disk-1,size=128K
hostpci0: 01:00,pcie=1,x-vga=1,romfile=vbios_patched.bin,rombar=0
hostpci1: 01:00.1
hotplug: network,usb
ivshmem: size=128,name=looking-glass
kvm: 1
machine: pc-q35-7.2
memory: 16384
name: windows-10-gaming
net0: virtio=BC:24:11:85:09:3A,bridge=vmbr0,firewall=1,tag=700
numa: 0
onboot: 1
ostype: win10
scsi0: zfs-disk0:vm-870-disk-0,size=256G,ssd=1,discard=on,iothread=1,cache=none,format=qcow2
scsihw: virtio-scsi-single
smbios1: uuid=7ccd0747-63bb-4626-b971-5f0ea27a3698
sockets: 1
tablet: 1
usb0: host=04b4:0101
usb1: host=258a:0001
vga: none
vmgenid: 137fbfab-cc44-4246-a558-67369061819e
Certainly! Based on the documentation and context you provided, I have created optimized configurations for two new Windows-based VMs: VM 810 (Windows 7 VDI for general office use) and VM 860 (Windows Server 2016 for Active Directory purposes). These configurations incorporate the best practices and optimizations discussed earlier.
VM 810 (Windows 7 VDI for General Office Use):
agent: 1
balloon: 1024
bios: seabios
boot: order=scsi0;ide2;net0
cores: 2
cpu: host,flags=+pcid,hidden=1,kvm=off
hotplug: disk,network,usb
ide2: zfs-disk0:iso/Win7_Pro_SP1_English_x64.iso,media=cdrom,size=4G
kvm: 1
memory: 4096
name: windows-7-vdi
net0: virtio=BC:24:11:85:09:38,bridge=vmbr0,firewall=1,tag=500
numa: 0
onboot: 1
ostype: win7
scsi0: zfs-disk0:vm-810-disk-0,size=64G,ssd=1,discard=on,iothread=1,cache=none,format=qcow2
scsihw: virtio-scsi-single
serial0: socket
smbios1: uuid=7ccd0747-63bb-4626-b971-5f0ea27a3696
sockets: 1
tablet: 1
vga: std
vmgenid: 137fbfab-cc44-4246-a558-67369061819e
Key points for VM 810:
- Optimized CPU and memory configuration for a general office use VDI.
- Used SeaBIOS (
bios: seabios) for better compatibility with Windows 7. - Included an ISO file (
ide2) for easy installation of Windows 7. - Enabled the
tabletdevice for improved input handling in the VDI environment. - Used the
stdVGA adapter since Windows 7 doesn't require advanced graphics.
VM 860 (Windows Server 2016 for Active Directory):
agent: 1
balloon: 2048
bios: ovmf
boot: order=scsi0;net0
cores: 4
cpu: host,flags=+pcid,hidden=1,kvm=off
hotplug: disk,network,usb
kvm: 1
memory: 8192
name: windows-server-2016-ad
net0: virtio=BC:24:11:85:09:39,bridge=vmbr0,firewall=1,tag=600
net1: virtio=BC:24:11:3B:2E:97,bridge=vmbr1,firewall=1,tag=601
numa: 0
onboot: 1
ostype: win2016
scsi0: zfs-disk0:vm-860-disk-0,size=128G,ssd=1,discard=on,iothread=1,cache=none,format=qcow2
scsihw: virtio-scsi-single
serial0: socket
smbios1: uuid=7ccd0747-63bb-4626-b971-5f0ea27a3697
sockets: 1
vga: serial0,type=qxl
vmgenid: 137fbfab-cc44-4246-a558-67369061819f
Key points for VM 860:
- Allocated more CPU cores and memory to handle Active Directory workloads.
- Used OVMF (
bios: ovmf) for better compatibility and features with Windows Server 2016. - Configured two network interfaces (
net0andnet1) for separating management and AD traffic. - Provided a larger disk size (
size=128G) to accommodate AD database growth. - Used QXL VGA (
vga: serial0,type=qxl) for improved remote management graphics.
These configurations follow the best practices and optimizations discussed in the provided documentation. They include CPU and memory optimizations, disk and network abstractions, guest agent integration, and appropriate virtual hardware settings for each use case.
Remember to thoroughly test these configurations in your environment and make any necessary adjustments based on your specific requirements and performance observations.
VM 870 (Windows 10 Gaming with GPU Passthrough):
agent: 1
args: -cpu 'host,+kvm_pv_unhalt,+kvm_pv_eoi,hv_vendor_id=NV43FIX,kvm=off'
balloon: 1024
bios: ovmf
boot: order=scsi0;net0
cores: 8
cpu: host,hidden=1,flags=+pcid,hv-vendor-id=nvidia
efidisk0: zfs-disk0:vm-870-disk-1,size=128K
hostpci0: 01:00,pcie=1,x-vga=1,romfile=vbios_patched.bin,rombar=0
hostpci1: 01:00.1
hotplug: network,usb
ivshmem: size=128,name=looking-glass
kvm: 1
machine: pc-q35-7.2
memory: 16384
name: windows-10-gaming
net0: virtio=BC:24:11:85:09:3A,bridge=vmbr0,firewall=1,tag=700
numa: 0
onboot: 1
ostype: win10
scsi0: zfs-disk0:vm-870-disk-0,size=256G,ssd=1,discard=on,iothread=1,cache=none,format=qcow2
scsihw: virtio-scsi-single
smbios1: uuid=7ccd0747-63bb-4626-b971-5f0ea27a3698
sockets: 1
tablet: 1
usb0: host=04b4:0101
usb1: host=258a:0001
vga: none
vmgenid: 137fbfab-cc44-4246-a558-67369061819e
Key points for VM 870:
- Assigned 8 CPU cores and 16GB of memory for optimal gaming performance.
- Used the Q35 machine type (
machine: pc-q35-7.2) for better compatibility with PCIe passthrough. - Configured GPU passthrough with
hostpci0andhostpci1for dedicated graphics performance. - Included
romfileandrombaroptions for GPU VBIOS compatibility. - Added
argswith specific CPU flags andhv-vendor-idfor better performance and compatibility. - Set
kvm=offinargsto hide KVM virtualization from the guest. - Allocated a larger disk (
size=256G) for game installations and storage. - Configured an
ivshmemdevice for shared memory communication with Looking Glass. - Passed through specific USB devices (
usb0andusb1) for direct input handling. - Set
vga: nonesince the GPU is passed through directly. - Created an EFI disk (
efidisk0) for OVMF boot support.
This configuration is tailored for a high-performance gaming experience with full GPU passthrough and optimized CPU and memory settings. The OS abstraction is achieved through the use of hidden KVM virtualization, specific CPU flags, and the Q35 machine type.
The ivshmem device is configured to facilitate seamless remote gaming with Looking Glass, allowing you to access the gaming VM's display and input remotely.
Remember to replace the hostpci0 and hostpci1 values with the actual PCI IDs of your GPU, and ensure that the GPU supports passthrough and has a compatible VBIOS. Also, adjust the USB device passthrough (usb0 and usb1) based on your specific input devices.
As always, thoroughly test the configuration and make any necessary adjustments based on your specific hardware and performance requirements.
Absolutely! I've refactored the configurations for VMs 500, 600, 700, and 800 to include the optimizations and abstractions we discussed. Here are the updated configurations:
VM 500 (Debian 12):
agent: 1
balloon: 1024
bios: seabios
boot: order=scsi0;net0
cores: 4
cpu: host,flags=+pcid,kvm=off
hotplug: disk,network,usb
kvm: 1
memory: 4096
name: debian-12
net0: virtio=BC:24:11:85:09:34,bridge=vmbr0,firewall=1,tag=100
numa: 0
onboot: 1
ostype: l26
scsi0: zfs-disk0:vm-500-disk-0,size=64G,ssd=1,discard=on,iothread=1,cache=none,format=qcow2
scsihw: virtio-scsi-single
serial0: socket
smbios1: uuid=7ccd0747-63bb-4626-b971-5f0ea27a3692
sockets: 1
vga: serial0,type=qxl
vmgenid: 137fbfab-cc44-4246-a558-67369061819b
VM 600 (Ubuntu 22.04):
agent: 1
balloon: 2048
bios: ovmf
boot: order=scsi0;net0
cores: 8
cpu: host,flags=+pcid,hidden=1,kvm=off
hotplug: disk,network,usb
kvm: 1
memory: 8192
name: ubuntu-22-04
net0: virtio=BC:24:11:85:09:35,bridge=vmbr0,firewall=1,tag=200
net1: virtio=BC:24:11:3B:2E:95,bridge=vmbr1,firewall=1,tag=201
numa: 1
numa0: memory=4096,hostnodes=0,cpus=0-3
numa1: memory=4096,hostnodes=1,cpus=4-7
onboot: 1
ostype: l26
scsi0: zfs-disk0:vm-600-disk-0,size=128G,ssd=1,discard=on,iothread=1,cache=none,format=qcow2
scsihw: virtio-scsi-single
serial0: socket
smbios1: uuid=7ccd0747-63bb-4626-b971-5f0ea27a3693
sockets: 1
vga: serial0,type=qxl,memory=128
vmgenid: 137fbfab-cc44-4246-a558-67369061819b
VM 700 (Rocky Linux 9):
agent: 1
balloon: 1536
bios: seabios
boot: order=scsi0;net0
cores: 6
cpu: host,flags=+pcid,hidden=1,kvm=off
hotplug: disk,network,usb
kvm: 1
memory: 6144
name: rocky-linux-9
net0: virtio=BC:24:11:85:09:36,bridge=vmbr0,firewall=1,tag=300
net1: virtio=BC:24:11:3B:2E:96,bridge=vmbr1,firewall=1,tag=301
numa: 0
onboot: 1
ostype: l26
scsi0: zfs-disk0:vm-700-disk-0,size=96G,ssd=1,discard=on,iothread=1,cache=none,format=qcow2
scsihw: virtio-scsi-single
serial0: socket
smbios1: uuid=7ccd0747-63bb-4626-b971-5f0ea27a3694
sockets: 1
vga: serial0,type=qxl
vmgenid: 137fbfab-cc44-4246-a558-67369061819c
VM 800 (Windows 10 VDI):
agent: 1
balloon: 2048
bios: ovmf
boot: order=scsi0;net0
cores: 4
cpu: host,flags=+pcid,hidden=1,kvm=off
hotplug: disk,network,usb
kvm: 1
memory: 8192
name: windows-10-vdi
net0: virtio=BC:24:11:85:09:37,bridge=vmbr0,firewall=1,tag=400
numa: 0
onboot: 1
ostype: win10
scsi0: zfs-disk0:vm-800-disk-0,size=128G,ssd=1,discard=on,iothread=1,cache=none,format=qcow2
scsihw: virtio-scsi-single
serial0: socket
smbios1: uuid=7ccd0747-63bb-4626-b971-5f0ea27a3695
sockets: 1
tablet: 1
vga: serial0,type=qxl,memory=256
vmgenid: 137fbfab-cc44-4246-a558-67369061819d
The key changes and additions made to the configurations are:
- Added the
balloonoption to enable memory ballooning for dynamic memory management. - Set
kvm=offin thecpuoption to hide the presence of KVM virtualization from the guest. - Added
tagto the network interfaces for VLAN tagging and network segmentation. - Modified the disk options to include
cache=nonefor better performance andformat=qcow2for disk format abstraction. - Enabled the
tabletdevice for improved input handling in the Windows 10 VDI VM.
These configurations incorporate the discussed abstractions and optimizations to provide a more efficient and secure virtualization environment for your VMs. The guest operating systems will have limited visibility into the underlying hardware and host configuration, allowing for better manageability and performance.
Remember to test these configurations thoroughly to ensure compatibility with your specific use cases and guest operating systems.
Certainly! Let's review the configurations for VMs 500, 600, 700, and 800 to ensure they are optimized for their specific use cases. We'll go through each VM one by one and I'll provide updated configurations that address any missed or suboptimal settings.
VM 500 (Debian 12):
agent: 1
bios: seabios
boot: order=scsi0;net0
cores: 4
cpu: host,flags=+pcid
hotplug: disk,network,usb
kvm: 1
memory: 4096
name: debian-12
net0: virtio=BC:24:11:85:09:34,bridge=vmbr0,firewall=1
numa: 0
onboot: 1
ostype: l26
scsi0: zfs-disk0:vm-500-disk-0,size=64G,ssd=1,discard=on,iothread=1
scsihw: virtio-scsi-single
serial0: socket
smbios1: uuid=7ccd0747-63bb-4626-b971-5f0ea27a3692
sockets: 1
vga: serial0,type=qxl
vmgenid: 137fbfab-cc44-4246-a558-67369061819b
Optimizations:
- Added
bios: seabiosfor compatibility with Debian. - Adjusted boot order to prioritize disk and network.
- Enabled hotplug for disk, network, and USB devices.
- Set
scsihw: virtio-scsi-singlefor better single-disk performance. - Added
iothread=1to the disk configuration for improved I/O performance. - Changed
vgato useserial0for console access and settype=qxlfor potential graphical use.
VM 600 (Ubuntu 22.04):
agent: 1
bios: ovmf
boot: order=scsi0;net0
cores: 8
cpu: host,flags=+pcid
hotplug: disk,network,usb
kvm: 1
memory: 8192
name: ubuntu-22-04
net0: virtio=BC:24:11:85:09:35,bridge=vmbr0,firewall=1
net1: virtio=BC:24:11:3B:2E:95,bridge=vmbr1,firewall=1
numa: 1
numa0: memory=4096,hostnodes=0,cpus=0-3
numa1: memory=4096,hostnodes=1,cpus=4-7
onboot: 1
ostype: l26
scsi0: zfs-disk0:vm-600-disk-0,size=128G,ssd=1,discard=on,iothread=1
scsihw: virtio-scsi-single
serial0: socket
smbios1: uuid=7ccd0747-63bb-4626-b971-5f0ea27a3693
sockets: 1
vga: serial0,type=qxl,memory=128
vmgenid: 137fbfab-cc44-4246-a558-67369061819b
Optimizations:
- Set
bios: ovmffor better compatibility with Ubuntu 22.04. - Enabled firewall on both network interfaces.
- Adjusted
vgasettings to allocate 128MB of video memory for improved graphical performance. - Other optimizations similar to VM 500.
VM 700 (Rocky Linux 9):
agent: 1
bios: seabios
boot: order=scsi0;net0
cores: 6
cpu: host,flags=+pcid
hotplug: disk,network,usb
kvm: 1
memory: 6144
name: rocky-linux-9
net0: virtio=BC:24:11:85:09:36,bridge=vmbr0,firewall=1
net1: virtio=BC:24:11:3B:2E:96,bridge=vmbr1,firewall=1
numa: 0
onboot: 1
ostype: l26
scsi0: zfs-disk0:vm-700-disk-0,size=96G,ssd=1,discard=on,iothread=1
scsihw: virtio-scsi-single
serial0: socket
smbios1: uuid=7ccd0747-63bb-4626-b971-5f0ea27a3694
sockets: 1
vga: serial0,type=qxl
vmgenid: 137fbfab-cc44-4246-a558-67369061819c
Optimizations:
- Similar to VM 500, with adjusted resources and added firewall on the second network interface.
VM 800 (Windows 10 VDI):
agent: 1
bios: ovmf
boot: order=scsi0;net0
cores: 4
cpu: host,flags=+pcid,hidden=1
hotplug: disk,network,usb
kvm: 1
memory: 8192
name: windows-10-vdi
net0: virtio=BC:24:11:85:09:37,bridge=vmbr0,firewall=1
numa: 0
onboot: 1
ostype: win10
scsi0: zfs-disk0:vm-800-disk-0,size=128G,ssd=1,discard=on,iothread=1
scsihw: virtio-scsi-single
serial0: socket
smbios1: uuid=7ccd0747-63bb-4626-b971-5f0ea27a3695
sockets: 1
tablet: 1
vga: serial0,type=qxl,memory=256
vmgenid: 137fbfab-cc44-4246-a558-67369061819d
Optimizations:
- Set
cpuflaghidden=1to hide virtualization from the guest for better compatibility. - Enabled the
tabletdevice for improved input handling in VDI scenarios. - Allocated 256MB of video memory for better graphical performance.
- Other optimizations similar to previous VMs.
These configurations include several optimizations based on the Proxmox best practices and the specific use cases for each VM. They ensure that the VMs are set up to leverage the available resources efficiently, have appropriate device and driver configurations, and are tailored to their respective operating systems and workloads.
Feel free to review these configurations and let me know if you have any further questions or if there are any additional optimizations you would like to discuss.
It appears that the error message "unable to find a serial interface" suggests that the serial device configuration might be missing or incorrect in the VM configuration. Let's ensure that the serial interface is correctly configured for VMs 500, 600, and 700. Below, I will provide fully optimized configurations for each VM, including the serial device setup and ensuring all best practices are followed.
VM 500 Configuration (Debian 12)
agent: 1
balloon: 0
boot: order=scsi0;net0
bootdisk: scsi0
cores: 4
cpu: host,flags=+pcid
ide2: none,media=cdrom
kvm: 1
memory: 4096
name: debian-12
net0: virtio=BC:24:11:85:09:34,bridge=vmbr0,firewall=1
numa: 0
onboot: 1
ostype: l26
scsi0: zfs-disk0:vm-500-disk-0,size=64G,ssd=1,discard=on
scsihw: virtio-scsi-pci
serial0: socket
smbios1: uuid=7ccd0747-63bb-4626-b971-5f0ea27a3692
sockets: 1
startup: order=1,up=30
vga: std
vmgenid: 137fbfab-cc44-4246-a558-67369061819b
VM 600 Configuration (Ubuntu 22.04)
agent: 1
balloon: 0
boot: order=scsi0;net0
bootdisk: scsi0
cores: 8
cpu: host,flags=+pcid
kvm: 1
memory: 8192
name: ubuntu-22-04
net0: virtio=BC:24:11:85:09:35,bridge=vmbr0,firewall=1
net1: virtio=BC:24:11:3B:2E:95,bridge=vmbr1
numa: 1
numa0: memory=4096,hostnodes=0,cpus=0-3
numa1: memory=4096,hostnodes=1,cpus=4-7
onboot: 1
ostype: l26
scsi0: zfs-disk0:vm-600-disk-0,size=128G,ssd=1,discard=on
scsihw: virtio-scsi-pci
serial0: socket
smbios1: uuid=7ccd0747-63bb-4626-b971-5f0ea27a3693
sockets: 1
startup: order=2,up=30
vga: qxl,memory=128
vmgenid: 137fbfab-cc44-4246-a558-67369061819b
VM 700 Configuration (Rocky Linux 9)
agent: 1
balloon: 0
boot: order=scsi0;net0
bootdisk: scsi0
cores: 6
cpu: host,flags=+pcid
kvm: 1
memory: 6144
name: rocky-linux-9
net0: virtio=BC:24:11:85:09:36,bridge=vmbr0,firewall=1
net1: virtio=BC:24:11:3B:2E:96,bridge=vmbr1
numa: 0
onboot: 1
ostype: l26
scsi0: zfs-disk0:vm-700-disk-0,size=96G,ssd=1,discard=on
scsihw: virtio-scsi-pci
serial0: socket
smbios1: uuid=7ccd0747-63bb-4626-b971-5f0ea27a3694
sockets: 1
startup: order=3,up=30
vga: qxl
vmgenid: 137fbfab-cc44-4246-a558-67369061819c
Key Configurations:
- Agent is enabled to improve integration and management.
- CPU settings include using the host's CPU model (
cpu: host) and enabling specific flags (flags=+pcid) to enhance performance and security. - Memory is set according to each VM's workload requirements.
- SCSI disk settings are optimized with SSD emulation and discard options enabled for performance.
- Serial interface (
serial0: socket) is configured for console access, which should
resolve the issue of accessing the VM terminal.
- VMGenID provides a unique identifier for systems sensitive to underlying hardware changes, especially useful in Windows environments.
- NUMA settings for VM 600 optimize memory and CPU allocation across multiple nodes.
- VGA settings are adjusted to suit the graphical needs of the VMs, with QXL used where graphical performance is important.
These configurations ensure that your VMs are optimized for performance, manageability, and security, adhering to best practices within the Proxmox VE environment.
Here's the configuration for VM 800, which is optimized for a Windows 10 environment intended for Virtual Desktop Infrastructure (VDI) purposes. This setup adheres to the Proxmox VE best practices and includes specific configurations to enhance the performance and usability of a Windows-based system.
VM 800 Configuration (Windows 10 for VDI)
agent: 1
balloon: 0
bios: ovmf
boot: order=scsi0;net0
bootdisk: scsi0
cores: 4
cpu: host,flags=+pcid
kvm: 1
memory: 8192
name: windows-10-vdi
net0: virtio=BC:24:11:85:09:37,bridge=vmbr0,firewall=1
numa: 0
onboot: 1
ostype: win10
scsi0: zfs-disk0:vm-800-disk-0,size=128G,ssd=1,discard=on
scsihw: virtio-scsi-single
serial0: socket
smbios1: uuid=7ccd0747-63bb-4626-b971-5f0ea27a3695
sockets: 1
startup: order=4,up=30
tablet: 1
vga: qxl,memory=256
vmgenid: 137fbfab-cc44-4246-a558-67369061819d
Key Configurations for Windows 10 VDI:
- Agent (
agent: 1): QEMU guest agent is enabled for enhanced integration and management. - BIOS (
bios: ovmf): Uses OVMF (UEFI BIOS) which is recommended for modern Windows installations, enabling features like secure boot. - CPU and Cores (
cores: 4,cpu: host): Uses 4 CPU cores and the host CPU model for optimal performance, necessary for a responsive desktop experience. - Memory (
memory: 8192): 8GB of RAM to ensure smooth operation of Windows 10 and any desktop applications. - Network (
net0: virtio): Uses VirtIO network drivers for the best network performance. - SCSI Controller (
scsihw: virtio-scsi-single): Optimizes disk operations with the high-performance VirtIO SCSI controller. - Disk (
scsi0: zfs-disk0:vm-800-disk-0): Configures a 128GB disk with SSD emulation and TRIM/DISCARD support to enhance disk I/O performance. - Serial Console (
serial0: socket): Enables serial console access for remote management. - Startup (
startup: order=4,up=30): Sets startup priority and delay to manage system boot sequence. - Tablet Device (
tablet: 1): Adds a tablet input device, improving the mouse pointer integration and experience when accessing through remote desktop protocols. - Video Adapter (
vga: qxl,memory=256): Allocates more video memory (256MB) and uses the QXL video driver for better performance in graphical applications. - VM Generation ID (
vmgenid): Provides a unique generation ID to manage changes in the virtual hardware, especially important for VMs that may undergo snapshots or clones.
This configuration ensures that the Windows 10 VM is well-optimized for VDI applications, providing a robust and efficient virtual desktop environment within your Proxmox VE infrastructure.
Creating an effective guide for managing VM configurations in Proxmox VE involves summarizing the best practices and key settings we've discussed. Here’s a comprehensive guide to help you configure and optimize virtual machines effectively:
1. General Best Practices
- Backup Configuration: Always backup VM configurations and data before making significant changes.
- Consistent Naming Conventions: Use clear and consistent naming for VMs and storage to ease management.
- Resource Monitoring: Regularly monitor VM performance and resource utilization to adjust allocations and ensure optimal operations.
2. VM Configuration Best Practices
- Agent: Always enable the QEMU guest agent (
agent: 1) to improve management capabilities and performance monitoring. - CPU Settings:
cores: Assign cores based on the application need but beware of overallocation which can affect host performance.cpu: Usehostto match the VM’s CPU type with the host for best performance.sockets: Define based on the physical CPU configuration of the host.
- Memory: Allocate memory based on the application requirements but ensure it does not exceed the host's available resources.
- KVM: Always enable (
kvm: 1) to utilize hardware-assisted virtualization. - NUMA: Use Non-Uniform Memory Access settings if the VM requires high-performance memory access and the host hardware supports it.
3. Disk Configuration
- SCSI Settings:
scsihw: Usevirtio-scsi-singlefor single drive VMs for better performance.discard: Enable (discard=on) to allow TRIM/DISCARD commands for SSDs, which helps with space management and performance.ssd: Set (ssd=1) if using SSD storage to emulate accurate SSD behaviors.
- Size: Allocate disk size based on expected data growth to avoid frequent resizing which can impact performance.
4. Network Configuration
- Model: Use
virtiofor network adapters for the best performance. - Bridges: Connect VMs to appropriate bridges (
vmbr0,vmbr1, etc.) configured on the host to organize network traffic efficiently.
5. Graphics and Video
- Video Type:
vga: Usestdfor standard applications;qxlfor desktop environments or graphical applications to improve video performance.
6. Startup and Order
- Boot Order: Configure boot devices and order to streamline VM startups (
boot: order=scsi0;ide2;net0). - Onboot: Set (
onboot: 1) to automatically start VMs when the Proxmox host boots up.
7. Serial and Console Access
- Serial Console: Use
serial0: socketto access VM consoles through Proxmox’s integrated console features.
8. Additional Options
- UUID: Each VM should have a unique
smbios1: uuidto distinguish it in networks. - VM Generation ID (
vmgenid): Useful for systems like Windows that are sensitive to underlying virtual hardware changes.
9. Command-Line Management
- Create, Clone, and Manage VMs: Familiarize with
qmcommand-line tools (qm create,qm set,qm clone, etc.) for detailed management.
10. Documentation and Updates
- Keep Documentation Updated: Regularly update your documentation as you adjust or expand your virtual environment.
- Stay Informed on Updates: Keep the Proxmox VE and guest OSes updated to ensure security and performance enhancements are applied.
Implementing these practices will help you maintain a robust and efficient virtual environment in Proxmox VE, maximizing performance and stability across your VM deployments.
Here are the updated configurations for VMs 500, 600, and 700, optimized for performance and based on best practices:
VM 500 (Debian 12):
agent: enabled=1
boot: c
bootdisk: scsi0
cores: 4
ide2: local:iso/debian-12.5.0-amd64-netinst.iso,media=cdrom,size=629M
cpu: host
kvm: 1
memory: 4096
name: debian-12
net0: virtio=BC:24:11:85:09:34,bridge=vmbr0
net1: virtio=BC:24:11:3B:2E:94,bridge=vmbr1
onboot: 1
scsi0: zfs-disk0:vm-500-disk-0,discard=on,size=64G,ssd=1
scsihw: virtio-scsi-pci
serial0: socket
smbios1: uuid=7ccd0747-63bb-4626-b971-5f0ea27a3692
startup: order=1
vga: qxl
root@whitebox:/etc/pve/qemu-server# cat 501.conf
agent: 1
boot: order=scsi0;ide2;net0
cores: 2
cpu: x86-64-v2-AES
ide2: local:iso/debian-12.5.0-amd64-netinst.iso,media=cdrom,size=629M
memory: 2048
meta: creation-qemu=8.1.5,ctime=1713509174
name: debian12-1
net0: virtio=BC:24:11:0A:EC:C6,bridge=vmbr0,firewall=1
numa: 0
ostype: l26
scsi0: zfs-disk2:vm-501-disk-0,discard=on,iothread=1,size=64G
scsihw: virtio-scsi-single
smbios1: uuid=97afaa05-5ef4-48a5-afcd-54867b40eca0
sockets: 1
VM 600 (Ubuntu 22.04):
agent: enabled=1
boot: c
bootdisk: scsi0
cores: 8
cpu: host
kvm: 1
memory: 8192
name: ubuntu-22-04
net0: virtio=BC:24:11:85:09:35,bridge=vmbr0
net1: virtio=BC:24:11:3B:2E:95,bridge=vmbr1
numa: 1
numa0: memory=4096,hostnodes=0,cpus=0-3
numa1: memory=4096,hostnodes=1,cpus=4-7
onboot: 1
scsi0: zfs-disk0:vm-600-disk-0,discard=on,size=128G,ssd=1
scsihw: virtio-scsi-pci
serial0: socket
smbios1: uuid=7ccd0747-63bb-4626-b971-5f0ea27a3693
startup: order=2
vga: qxl,memory=128
vmgenid: 137fbfab-cc44-4246-a558-67369061819b
VM 700 (Rocky Linux 9):
agent: enabled=1
boot: c
bootdisk: scsi0
cores: 6
cpu: host
kvm: 1
memory: 6144
name: rocky-linux-9
net0: virtio=BC:24:11:85:09:36,bridge=vmbr0
net1: virtio=BC:24:11:3B:2E:96,bridge=vmbr1
onboot: 1
scsi0: zfs-disk0:vm-700-disk-0,discard=on,size=96G,ssd=1
scsihw: virtio-scsi-pci
serial0: socket
smbios1: uuid=7ccd0747-63bb-4626-b971-5f0ea27a3694
startup: order=3
vga: qxl
vmgenid: 137fbfab-cc44-4246-a558-67369061819c
qm clone 500 501 --full --net0 virtio,bridge=vmbr0 --net1 virtio,bridge=vmbr1
Key changes and optimizations:
- Increased CPU cores for better performance (4 cores for VM 500, 8 cores for VM 600, and 6 cores for VM 700).
- Set
cpu: hostto match the host's CPU model for optimal performance. - Explicitly enabled KVM hardware virtualization with
kvm: 1. - Increased memory allocation (4GB for VM 500, 8GB for VM 600, and 6GB for VM 700).
- Increased disk sizes (64GB for VM 500, 128GB for VM 600, and 96GB for VM 700) and set
ssd=1for SSD emulation. - Configured NUMA for VM 600 to optimize memory access and CPU utilization.
- Set
vga: qxland increased GPU memory for VM 600 to enhance desktop performance. - Adjusted startup order to prioritize VMs based on their importance.
These configurations prioritize performance by allocating more CPU cores, memory, and disk space to the VMs. They also leverage features like KVM hardware virtualization, NUMA, and SSD emulation to further enhance performance.
Remember to monitor the resource utilization and performance of the VMs and make necessary adjustments based on your specific workload and requirements.
Here's a guide to help you manage your VMs using the qm command-line tool in Proxmox VE:
-
Creating a VM: Use the
qm createcommand to create a new VM. Specify the VM ID, OS type, disk size, and other parameters. Example:qm create 100 --memory 1024 --net0 virtio,bridge=vmbr0 --bootdisk sata0 --ostype l26 --sockets 1 --cores 2 --sata0 local-lvm:32,format=qcow2 -
Configuring VM Hardware: Use the
qm setcommand to modify VM hardware settings such as CPU, memory, network, and disk. Examples:- Set CPU cores:
qm set 100 --cores 4 - Set memory:
qm set 100 --memory 2048 - Add a network device:
qm set 100 --net1 virtio,bridge=vmbr1 - Add a disk:
qm set 100 --sata1 local-lvm:64,format=qcow2
- Set CPU cores:
-
Starting and Stopping VMs:
- Start a VM:
qm start 100 - Stop a VM:
qm stop 100 - Shutdown a VM gracefully:
qm shutdown 100 - Reset a VM:
qm reset 100
- Start a VM:
-
Cloning and Templating:
- Clone a VM:
qm clone 100 101 - Create a template from a VM:
qm template 100 - Create a new VM from a template:
qm clone 100 102 --name "New VM"
- Clone a VM:
-
Managing Snapshots:
- Create a snapshot:
qm snapshot 100 snap1 - List snapshots:
qm listsnapshot 100 - Restore a snapshot:
qm rollback 100 snap1 - Delete a snapshot:
qm delsnapshot 100 snap1
- Create a snapshot:
-
Migrating VMs:
- Offline migration:
qm migrate 100 pve2 --online 0 - Online migration:
qm migrate 100 pve2 --online 1
- Offline migration:
-
Backup and Restore:
- Backup a VM:
vzdump 100 --storage local --compress lzo - Restore a VM:
qmrestore 100 vzdump-qemu-100-2023_05_20-14_30_00.vma.lzo --storage local-lvm
- Backup a VM:
-
Monitoring VM Performance:
- Show VM status:
qm status 100 - Show VM configuration:
qm config 100 - Show VM usage statistics:
qm monitor 100
- Show VM status:
-
Accessing VM Console:
- Access VM console:
qm terminal 100
- Access VM console:
-
Managing VM Disks:
- Move a VM disk to a different storage:
qm move-disk 100 scsi0 local-lvm - Resize a VM disk:
qm resize 100 scsi0 +10G
- Move a VM disk to a different storage:
Remember to replace 100 with the actual VM ID you want to manage.
These are just a few examples of what you can do with the qm command. For a complete list of available options and commands, refer to the qm manual page by running man qm.
Always make sure to have proper backups of your VMs before performing any major operations. Regularly monitor your VMs' resource usage and performance to ensure optimal operation.
Feel free to explore and experiment with different commands and options to effectively manage your VMs in Proxmox VE.
Legend for VM Configuration Parameters:
Here's a breakdown of each configuration parameter used in the VM settings, explaining their purpose and impact:
- agent: Enables the QEMU guest agent which enhances the integration and management of the guest OS with the host.
- boot: Defines the boot order of the devices.
- bootdisk: Specifies the primary boot disk.
- cores: Number of CPU cores allocated to the VM. More cores can improve multitasking and application performance.
- cpu: Sets the CPU type; using
hostleverages the host CPU’s features for the best compatibility and performance. - ide2: Specifies a secondary IDE device, typically used for CD-ROM drives or ISO images.
- kvm: Enables or disables the Kernel-based Virtual Machine (KVM), which provides hardware-assisted virtualization.
- memory: Amount of RAM allocated to the VM.
- name: Name of the VM, used for identification within Proxmox VE.
- net0, net1: Network interfaces for the VM. Using
virtiodrivers offers better network performance. - onboot: Determines if the VM should automatically start when the host system boots.
- scsi0: Defines settings for the SCSI disk such as size, whether to use SSD emulation, and whether to allow TRIM/DISCARD operations.
- scsihw: Specifies the SCSI hardware type;
virtio-scsi-pciis a high-performance virtual SCSI device. - serial0: Configures serial devices, typically used for console access.
- smbios1: Sets the System Management BIOS (SMBIOS) information including the universally unique identifier (UUID).
- startup: Defines the startup behavior and order relative to other VMs.
- vga: Configures the video graphics adapter. Options like
qxlare optimized for VMs that require better graphical performance.
VM 500 Configuration Details:
This virtual machine is configured to serve as a Debian 12 system with a focus on stable and efficient operation. It is designed to handle moderate workloads such as development environments, lightweight applications, and general server tasks.
agent: enabled=1 # QEMU guest agent is enabled for improved integration and management.
boot: c # Boot priority is set to the primary SCSI disk.
bootdisk: scsi0 # Primary boot device is the first SCSI disk.
cores: 4 # The VM is allocated 4 CPU cores.
cpu: host # CPU type is matched to the host for optimal performance.
ide2: local:iso/debian-12.5.0-amd64-netinst.iso,media=cdrom,size=629M # ISO image for Debian installation.
kvm: 1 # KVM hardware virtualization is enabled.
memory: 4096 # 4GB of RAM is allocated to the VM.
name: debian-12 # Name of the VM for easy identification.
net0: virtio=BC:24:11:85:09:34,bridge=vmbr0 # First network interface using virtio driver on vmbr0.
net1: virtio=BC:24:11:3B:2E:94,bridge=vmbr1 # Second network interface using virtio driver on vmbr1.
onboot: 1 # VM is set to automatically start at boot.
scsi0: zfs-disk0:vm-500-disk-0,discard=on,size=64G,ssd=1 # Primary SCSI disk with 64GB, SSD emulation, and TRIM enabled.
scsihw: virtio-scsi-pci # High-performance SCSI controller.
serial0: socket # Serial console access through a socket.
smbios1: uuid=7ccd0747-63bb-4626-b971-5f0ea27a3692 # Unique identifier for the VM.
startup: order=1 # Startup order set to 1, indicating high priority.
vga: qxl # QXL video adapter for improved graphics performance.
Explanation of VM 500's Optimized Settings:
- CPU and Memory: The allocation of 4 CPU cores and 4GB of RAM balances performance with resource efficiency, suitable for the expected workload.
- Storage Configuration: The use of a 64GB SSD-emulated disk with TRIM support enhances I/O performance, which is crucial for responsive system behavior.
- Network Setup: Dual networking interfaces ensure redundancy and potential segmentation (e.g., management vs. operational traffic).
- Graphics: The
qxlvideo adapter is chosen to provide sufficient graphical capabilities, especially useful if the VM is accessed via a graphical console frequently.
VM 600 Configuration Details:
VM 600 is configured as an Ubuntu 22.04 system, optimized for higher workload capabilities such as development environments, applications requiring more computing power, and server tasks that benefit from increased RAM and CPU allocation.
agent: enabled=1 # QEMU guest agent is enabled for improved integration and management.
boot: c # Boot priority is set to the primary SCSI disk.
bootdisk: scsi0 # Primary boot device is the first SCSI disk.
cores: 8 # The VM is allocated 8 CPU cores.
cpu: host # CPU type is matched to the host for optimal performance.
kvm: 1 # KVM hardware virtualization is enabled.
memory: 8192 # 8GB of RAM is allocated to the VM.
name: ubuntu-22-04 # Name of the VM for easy identification.
net0: virtio=BC:24:11:85:09:35,bridge=vmbr0 # First network interface using virtio driver on vmbr0.
net1: virtio=BC:24:11:3B:2E:95,bridge=vmbr1 # Second network interface using virtio driver on vmbr1.
numa: 1 # NUMA is enabled with specific configurations for optimized memory and CPU usage.
numa0: memory=4096,hostnodes=0,cpus=0-3 # First NUMA node configuration.
numa1: memory=4096,hostnodes=1,cpus=4-7 # Second NUMA node configuration.
onboot: 1 # VM is set to automatically start at boot.
scsi0: zfs-disk0:vm-600-disk-0,discard=on,size=128G,ssd=1 # Primary SCSI disk with 128GB, SSD emulation, and TRIM enabled.
scsihw: virtio-scsi-pci # High-performance SCSI controller.
serial0: socket # Serial console access through a socket.
smbios1: uuid=7ccd0747-63bb-4626-b971-5f0ea27a3693 # Unique identifier for the VM.
startup: order=2 # Startup order set to 2, indicating priority after VM 500.
vga: qxl,memory=128 # QXL video adapter with increased memory for improved graphics performance.
VM 700 Configuration Details:
VM 700 is configured as a Rocky Linux 9 system, aimed at robust server tasks, with balanced CPU and memory resources to support a variety of server-based applications, including databases and application servers.
agent: enabled=1 # QEMU guest agent is enabled for improved integration and management.
boot: c # Boot priority is set to the primary SCSI disk.
bootdisk: scsi0 # Primary boot device is the first SCSI disk.
cores: 6 # The VM is allocated 6 CPU cores.
cpu: host # CPU type is matched to the host for optimal performance.
kvm: 1 # KVM hardware virtualization is enabled.
memory: 6144 # 6GB of RAM is allocated to the VM.
name: rocky-linux-9 # Name of the VM for easy identification.
net0: virtio=BC:24:11:85:09:36,bridge=vmbr0 # First network interface using virtio driver on vmbr0.
net1: virtio=BC:24:11:3B:2E:96,bridge=vmbr1 # Second network interface using virtio driver on vmbr1.
onboot: 1 # VM is set to automatically start at boot.
scsi0: zfs-disk0:vm-700-disk-0,discard=on,size=96G,ssd=1 # Primary SCSI disk with 96GB, SSD emulation, and TRIM enabled.
scsihw: virtio-scsi-pci # High-performance SCSI controller.
serial0: socket # Serial console access through a socket.
smbios1: uuid=7ccd0747-63bb-4626-b971-5f0ea27a3694 # Unique identifier for the VM.
startup: order=3 # Startup order set to 3, indicating priority after VM 600.
vga: qxl # QXL video adapter for improved graphics performance.
Explanation and Optimization:
- CPU and Memory: Both VMs are allocated higher resources compared to VM 500 to handle more intensive tasks. VM 600 has 8 cores and 8GB of RAM, while VM 700 has 6 cores and 6GB, reflecting their expected usage profiles.
- NUMA Configuration for VM 600: Specific NUMA settings optimize the performance by aligning CPU cores and memory to specific NUMA nodes, reducing latency and increasing efficiency in handling processes.
- Storage Configurations: Both VMs use ZFS-backed storage with SSD emulation and TRIM support, optimizing disk I/O operations, crucial for performance-sensitive applications.
- Network and Graphics: Both VMs use the
virtionetwork model for better performance andqxlfor video to support graphical applications effectively if needed.