Python code mostly for the OGL and VSL Course
'''
Patrik Martinsson 2025
Script to check if QEMU guest agent is active on VMs in Proxmox
you need to install the following packages:
pip install proxmoxer
pip install requests
pip install paramiko
pip install openssh_wrapper
'''
from proxmoxer import ProxmoxAPI
# Connect to Proxmox node
proxmox = ProxmoxAPI(
"xxx.xxx.xxx.xxx", # IP address of Proxmox node
user="CHANGEME@pam", # Username for Proxmox node
password="CHANGEME", # Password for Proxmox node
verify_ssl=False # set to True if using SSL with valid certificate
)
node = "groupXX-srvXX" # Name of your Proxmox node
# Get list of VMs on the node
vms = proxmox.nodes(node).qemu.get()
for vm in vms:
vmid = vm["vmid"]
name = vm.get("name", f"vm-{vmid}")
try:
# Check if the agent is responding
proxmox.nodes(node).qemu(vmid).agent("ping").post()
# Get IP-info
net_info = proxmox.nodes(node).qemu(vmid).agent("network-get-interfaces").get()
ips = []
for iface in net_info.get("result", []):
for ip in iface.get("ip-addresses", []):
# Filter for link-local fe80::, ::1 and loopback
addr = ip.get("ip-address")
if not addr.startswith("fe80") and addr !="::1" and addr != "127.0.0.1":
ips.append(addr)
ip_list = ", ".join(ips) if ips else "No IP"
print(f"{vmid} ({name}) -> Guest agent active, IP: {ip_list}") # Print VM name, ID and IPs
except Exception as e:
print(f"{vmid} ({name}) -> no guest agent: {e}") # Print VM ID, Name and error message if agent is not active
# Patrik Martinsson 2025
# Simple script to ping an IP range
import subprocess
import platform
def ping_host(ip):
# Ping a single host. Returns True if reachable, False otherwise.
# Choose the right param: -n for Windows, -c for Unix
param = "-n" if platform.system().lower() == "windows" else "-c"
command = ["ping", param, "1", ip]
try:
result = subprocess.run(command, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)
return result.returncode == 0
except Exception:
return False
def scan_range(base_ip, start, end):
# Scan a given IP range and print which hosts respond.
for i in range(start, end + 1):
ip = f"{base_ip}.{i}"
if ping_host(ip):
print(f"{ip} is alive")
else:
print(f"{ip} is unreachable")
if __name__ == "__main__":
# Example usage: 192.168.20.101–109
base_ip = "192.168.20" # change base IP if you need
scan_range(base_ip, 101, 109) # Change to what range you want to ping ( 4th octet )
from proxmoxer import ProxmoxAPI
from requests import *
ides = [ 'ide1', 'ide2', 'ide3', 'ide4' ]
proxmox = ProxmoxAPI('192.168.20.222', # IP address of Proxmox node
user='root@pam', # Username and domain (@pve or @pam)
password='Network!337',
verify_ssl=False) # set to True if using SSL with valid certificate
nodes = proxmox.nodes.get() # Get list of all hosts in the cluster
for nodenr in range( len(nodes) ):
nodename = nodes[nodenr]['node']
print("===== ", nodes[nodenr]['node'], " =====")
vmlist = proxmox.nodes( nodes[nodenr]['node'] ).qemu.get()
# print("++++++", vmlist , "+++++++++")
for vm in vmlist:
print(f"ID: {vm['vmid']} ( {vm['name']} )", " "*( 18-len(vm['name']) ), f"Status: {vm['status']} ", end='')
# Get VM configuration
vm_config = proxmox.nodes(nodename).qemu(vm['vmid']).config.get()
for ide in ides:
if ide in vm_config:
ide_config = vm_config[ide]
if 'cdrom' in ide_config and 'size' in ide_config:
print("CD is mounted: ", vm_config[ide].split(',')[0] )
break # We found the CD
if 'cdrom' in ide_config and 'size' not in ide_config:
print("No CD is mounted")
break # We found an empty CD-device
else: # Loop finished without finding any CD-device (no break)
print("No CD device")
Output:
root@group11-srv2:/home/imra# python3 cd.py ===== group11-srv1 ===== ID: 102 ( thin-iscsi-test ) Status: running No CD is mounted ID: 103 ( test-dir ) Status: stopped CD is mounted: local:iso/xubuntu-24.04.3-desktop-amd64.iso ID: 104 ( test14 ) Status: stopped no CD device ID: 101 ( XubuntuVLAN16 ) Status: running No CD is mounted ===== group11-srv2 =====
from proxmoxer import ProxmoxAPI
from requests import *
import json
proxmox = ProxmoxAPI('192.168.18.10', # Hercules
#proxmox = ProxmoxAPI('192.168.20.222',
user='root@pam',
password=input("Password:"),
# password="Network!337",
verify_ssl=False)
nodes = proxmox.nodes.get()
nodename = nodes[0]['node']
# nodename = 'hercules'
print("+++++++++ Node= ", nodename, " +++++++++++++++++++++++++")
vmlist = proxmox.nodes(nodename).qemu.get()
#vmlist = [ { 'vmid':103, 'name':"FOG", 'status':'True' } ]
for vm in vmlist:
print("====================================")
print(f"{vm['vmid']}, ({vm['name']})", " "*( 20-len(vm['name']) ), f"Status: {vm['status']}" , end=" ")
vm_config = proxmox.nodes(nodename).qemu(vm['vmid']).config.get()
# print("==========\n",json.dumps(vm_config, indent=4) )
if "agent" not in vm_config:
print("MISSING: Please reconfigure VM in Proxmox with QEMU Guest Agent")
continue
if vm['status'] != "running":
print("VM-NOT-RUNNING: Can't determine status of QEMU Guest Agent when VM is not running")
continue
try:
q=proxmox.nodes(nodename).qemu(vm['vmid']).agent.info.get()
# print("----------\n",json.dumps(q, indent=4) )
print(" QEMU Version: ",q['result']['version'] , end=" ")
except Exception as e:
print("QEMU-not-running:", e)
Simon förslag - det försvann en / i sökvägen innan agent/ ?
above = proxmox(f"nodes/{nodename}/qemu/{vm['vmid']}/agent/network-get-interfaces").get()
OUTPUT
hej
fhtj
j
+++++++++ Node= hercules +++++++++++++++++++++++++
====================================
333, (raCpuTestKillMe) Status: running MISSING: Please reconfigure VM in Proxmox with QEMU Guest Agent
====================================
107, (GPU-Ubuntu-pcm) Status: stopped VM-NOT-RUNNING: Can't determine status of QEMU Guest Agent when VM is not running
====================================
123, (MyFirstVM) Status: stopped VM-NOT-RUNNING: Can't determine status of QEMU Guest Agent when VM is not running
====================================
109, (KaliLinux) Status: running MISSING: Please reconfigure VM in Proxmox with QEMU Guest Agent
====================================
113, (devpcm) Status: stopped VM-NOT-RUNNING: Can't determine status of QEMU Guest Agent when VM is not running
====================================
110, (KaliLinuxClone) Status: stopped MISSING: Please reconfigure VM in Proxmox with QEMU Guest Agent
====================================
112, (OGL202-Simon) Status: running QEMU Version: 8.2.2 ====================================
201, (alfa) Status: running QEMU Version: 8.2.2 ====================================
106, (SNMP) Status: running QEMU-not-running: 500 Internal Server Error: QEMU guest agent is not running
====================================
111, (CiscoCML) Status: stopped MISSING: Please reconfigure VM in Proxmox with QEMU Guest Agent
====================================
104, (FortigateVMOutside) Status: running MISSING: Please reconfigure VM in Proxmox with QEMU Guest Agent
====================================
100, (MCPSimon) Status: running QEMU-not-running: 500 Internal Server Error: No QEMU guest agent configured
====================================
105, (Windows11) Status: running QEMU Version: 108.0.2 ====================================
103, (FOG) Status: running QEMU Version: 8.2.2 ====================================
108, (Portainer) Status: running QEMU Version: 8.2.2 ====================================
101, (catch-up2.cnap.hv.se) Status: running QEMU Version: 5.2.0 ====================================
102, (FortigateVM) Status: running MISSING: Please reconfigure VM in Proxmox with QEMU Guest Agent
from proxmoxer import ProxmoxAPI
from requests import *
import json
proxmox = ProxmoxAPI('192.168.18.10',
user='root@pam',
password=input("Password:"),
verify_ssl=False)
nodes = proxmox.nodes.get()
nodename = nodes[0]['node']
nodename = 'hercules'
print("Node= ", nodename)
vmlist = proxmox.nodes(nodename).qemu.get()
#vmlist = [ { 'vmid':103, 'name':"FOG", 'status':'True' } ]
for vm in vmlist:
print("====================================")
print(f"ID: {vm['vmid']}, Name: {vm['name']}, Status: {vm['status']}", end=" ")
# Untested code below !!!!!!!!!!!!!!!!!!!
# Get VM configuration
vm_config = proxmox.nodes(nodename).qemu(vm['vmid']).config.get()
# print("vm config=", vm_config)
# Check if the CD-ROM is mounted
if 'ide2' in vm_config:
ide2_config = vm_config['ide2']
else:
ide2_config = "FALSE"
# print("ide2_config=", ide2_config)
if 'cdrom' in ide2_config and 'size' in ide2_config:
print(f"CD is mounted: ==== {vm_config['ide2']} ====", end=" ")
else:
print("No CD is mounted", end= " ")
# else:
# print("No CD-ROM device found")
# More untested code !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# Check guest agent status and request version
try:
# guest_version = proxmox.nodes(nodename).qemu(vm['vmid']).running_qemu.get('version')
# guest_version = proxmox.nodes(nodename).qemu(vm['vmid']).get('status')
q=proxmox.nodes(nodename).qemu(vm['vmid']).agent.info.get()
# q=proxmox.nodes(nodename).qemu(vm['vmid']).agent.info.result.get()
# strang=f"nodes/{nodename}//qemu{vm['vmid']).agent.get
# q=proxmox(f"nodes/{nodename}/capabilities/qemu{vm['vmid']).agent.get()
# print("QEMU Guest Agent Version:", q)
# print("==========\n",json.dumps(q, indent=4) )
print(" QEMU Version: ",q['result']['version'] , end=" ")
except Exception as e:
print("ERROR: Could not retrieve guest agent version:", e)
try:
q = proxmox(f"/nodes/{nodename}/qemu/{vm['vmid']}/agent/network-get-interfaces").get()
q = q['result']
# print("==========\n",json.dumps(q, indent=4) )
for interface in q:
if not interface['name'] == "lo":
# print("looping ", json.dumps(interface["ip-addresses"] , indent=4) )
for ip in interface['ip-addresses']:
if "127.0.0.1" not in ip and "ipv4" == ip['ip-address-type']:
print("IP=", ip['ip-address'])
except Exception as e:
print("ERROR: Could not retrieve guest agent version:", e)
import torch
import time
def benchmark_gpu(matrix_size=8192, iterations=100):
# Kontrollera att CUDA (GPU-stöd) är tillgängligt
if not torch.cuda.is_available():
print("Fel: CUDA är inte tillgängligt. Kontrollera dina Nvidia-drivrutiner.")
return
device = torch.device("cuda")
name = torch.cuda.get_device_name(device)
print(f"Hittade GPU: {name}")
# 1. Skapa slumpmässiga matriser direkt på GPU:n
print(f"Skapar två {matrix_size}x{matrix_size} matriser (FP32)...")
a = torch.randn(matrix_size, matrix_size, dtype=torch.float32, device=device)
b = torch.randn(matrix_size, matrix_size, dtype=torch.float32, device=device)
# 2. Uppvärmning (Warm-up)
# Det är viktigt att värma upp GPU:n för att få korrekta tidsmätningar
print("Kör uppvärmning...")
for _ in range(10):
_ = torch.matmul(a, b)
torch.cuda.synchronize() # Vänta tills GPU:n är helt klar
# 3. Benchmark
print(f"Kör benchmark med {iterations} iterationer. Pressar GPU:n...")
start_event = torch.cuda.Event(enable_timing=True)
end_event = torch.cuda.Event(enable_timing=True)
start_event.record()
for _ in range(iterations):
_ = torch.matmul(a, b)
end_event.record()
# Vänta på att alla beräkningar ska bli klara
torch.cuda.synchronize()
# 4. Beräkna tid och prestanda
elapsed_time_ms = start_event.elapsed_time(end_event)
elapsed_time_s = elapsed_time_ms / 1000.0
# Matematiken bakom matrismultiplikation (N x N)
# Totalt antal operationer är 2 * N^3 per iteration
ops_per_iteration = 2.0 * (matrix_size ** 3)
total_ops = ops_per_iteration * iterations
# Beräkna TFLOPS (Tera = 10^12)
tflops = (total_ops / elapsed_time_s) / 1e12
print("\n" + "=" * 40)
print(f" RESULTAT FÖR {name.upper()}")
print("=" * 40)
print(f"Tid totalt: {elapsed_time_s:.3f} sekunder")
print(f"Uppmätt effekt: {tflops:.2f} TFLOPS (FP32)")
print("=" * 40)
if __name__ == "__main__":
benchmark_gpu()
import torch
import torchvision.models as models
import time
def benchmark_ai(base_batch_size=64, iterations=50):
if not torch.cuda.is_available():
print("Fel: Inget CUDA-stöd hittades.")
return
num_gpus = torch.cuda.device_count()
print(f"Hittade {num_gpus} GPU(er).")
# 1. Ladda AI-modellen
print("Laddar ResNet50 (Standardmodell för Vision)...")
model = models.resnet50()
# Justera batch size om vi har flera kort (DataParallel)
# Vi dubblar mängden data om vi har två kort för att mätta båda.
batch_size = base_batch_size * num_gpus
if num_gpus > 1:
print(f"Aktiverar DataParallel. Sprider ut batchen på {batch_size} bilder över {num_gpus} kort.")
model = torch.nn.DataParallel(model)
model = model.cuda()
model.eval() # Sätt modellen i inferensläge (vi tränar inte modellen nu)
# 2. Skapa "Dummy"-data
# En tensor som representerar en batch med standard RGB-bilder (224x224 pixlar)
print(f"Skapar dummy-data...")
dummy_input = torch.randn(batch_size, 3, 224, 224).cuda()
# 3. Uppvärmning
print("Värmer upp grafikkorten...")
with torch.no_grad(): # Vi stänger av gradientberäkning för inferens
for _ in range(10):
_ = model(dummy_input)
torch.cuda.synchronize()
# 4. Benchmark
print(f"Kör benchmark ({iterations} iterationer)...")
start_time = time.time()
with torch.no_grad():
for _ in range(iterations):
_ = model(dummy_input)
torch.cuda.synchronize()
end_time = time.time()
# 5. Sammanställ resultat
total_time = end_time - start_time
total_images = batch_size * iterations
images_per_sec = total_images / total_time
print("\n" + "="*45)
print(" AI/ML INFERENS BENCHMARK (ResNet50)")
print("="*45)
print(f"Antal GPU:er använda: {num_gpus}")
print(f"Batch size per steg: {batch_size}")
print(f"Totalt antal bilder: {total_images}")
print(f"Tid totalt: {total_time:.2f} sekunder")
print(f"Prestanda: {images_per_sec:.2f} bilder/sekund")
print("="*45)
if __name__ == "__main__":
benchmark_ai()
import torch
import torchvision.models as models
import time
def benchmark_ai_fp16(base_batch_size=1024, iterations=50):
if not torch.cuda.is_available():
print("Fel: Inget CUDA-stöd hittades.")
return
num_gpus = torch.cuda.device_count()
print(f"Hittade {num_gpus} GPU(er).")
# 1. Ladda AI-modellen
print("Laddar ResNet50...")
model = models.resnet50()
batch_size = base_batch_size * num_gpus
if num_gpus > 1:
model = torch.nn.DataParallel(model)
model = model.cuda()
model.eval()
# 2. Skapa "Dummy"-data
print(f"Skapar dummy-data för en batch på {batch_size} bilder...")
dummy_input = torch.randn(batch_size, 3, 224, 224).cuda()
# 3. Uppvärmning med Mixed Precision (FP16)
print("Värmer upp grafikkorten och Tensor Cores i FP16...")
with torch.no_grad():
# Här är magin! Autocast tvingar modellen att använda FP16 där det går.
with torch.autocast(device_type="cuda", dtype=torch.float16):
for _ in range(10):
_ = model(dummy_input)
torch.cuda.synchronize()
# 4. Benchmark
print(f"Kör benchmark ({iterations} iterationer)...")
start_time = time.time()
with torch.no_grad():
# Samma magi här under själva mätningen
with torch.autocast(device_type="cuda", dtype=torch.float16):
for _ in range(iterations):
_ = model(dummy_input)
torch.cuda.synchronize()
end_time = time.time()
# 5. Sammanställ resultat
total_time = end_time - start_time
total_images = batch_size * iterations
images_per_sec = total_images / total_time
print("\n" + "="*45)
print(" AI/ML INFERENS BENCHMARK (ResNet50 - FP16)")
print("="*45)
print(f"Antal GPU:er använda: {num_gpus}")
print(f"Batch size per steg: {batch_size}")
print(f"Totalt antal bilder: {total_images}")
print(f"Tid totalt: {total_time:.2f} sekunder")
print(f"Prestanda: {images_per_sec:.2f} bilder/sekund")
print("="*45)
if __name__ == "__main__":
benchmark_ai_fp16()