using System;
using System.Runtime.InteropServices;
using System.Text;
class Program
{
// NVML Constants
private const int NVML_SUCCESS = 0;
// NVML Structs
[StructLayout(LayoutKind.Sequential)]
public struct nvmlMemory_t
{
public ulong total;
public ulong free;
public ulong used;
}
// NVML Function Imports
[DllImport("nvml.dll", CallingConvention = CallingConvention.Cdecl)]
public static extern int nvmlInit_v2();
[DllImport("nvml.dll", CallingConvention = CallingConvention.Cdecl)]
public static extern int nvmlShutdown();
[DllImport("nvml.dll", CallingConvention = CallingConvention.Cdecl)]
public static extern int nvmlDeviceGetCount_v2(ref int deviceCount);
[DllImport("nvml.dll", CallingConvention = CallingConvention.Cdecl)]
public static extern int nvmlDeviceGetHandleByIndex_v2(int index, ref IntPtr device);
[DllImport("nvml.dll", CallingConvention = CallingConvention.Cdecl)]
public static extern int nvmlDeviceGetName(IntPtr device, StringBuilder name, int length);
[DllImport("nvml.dll", CallingConvention = CallingConvention.Cdecl)]
public static extern int nvmlDeviceGetMemoryInfo(IntPtr device, ref nvmlMemory_t memory);
static void Main(string[] args)
{
int result = nvmlInit_v2();
if (result != NVML_SUCCESS)
{
Console.WriteLine("Kunde inte initiera NVML.");
return;
}
int deviceCount = 0;
result = nvmlDeviceGetCount_v2(ref deviceCount);
if (result != NVML_SUCCESS)
{
Console.WriteLine("Kunde inte hämta antal enheter.");
nvmlShutdown();
return;
}
Console.WriteLine($"Antal NVIDIA-GPUs: {deviceCount}");
for (int i = 0; i < deviceCount; i++)
{
IntPtr device = IntPtr.Zero;
result = nvmlDeviceGetHandleByIndex_v2(i, ref device);
if (result != NVML_SUCCESS)
{
Console.WriteLine($"Kunde inte hämta enhet {i}.");
continue;
}
StringBuilder name = new StringBuilder(64);
result = nvmlDeviceGetName(device, name, name.Capacity);
if (result != NVML_SUCCESS)
{
Console.WriteLine($"Kunde inte hämta namn för enhet {i}.");
continue;
}
nvmlMemory_t memory = new nvmlMemory_t();
result = nvmlDeviceGetMemoryInfo(device, ref memory);
if (result != NVML_SUCCESS)
{
Console.WriteLine($"Kunde inte hämta minnesinfo för enhet {i}.");
continue;
}
Console.WriteLine($"GPU {i}: {name}");
Console.WriteLine($" - Total minne: {memory.total / 1024.0 / 1024.0:F2} MB");
Console.WriteLine($" - Använt minne: {memory.used / 1024.0 / 1024.0:F2} MB");
Console.WriteLine($" - Ledigt minne: {memory.free / 1024.0 / 1024.0:F2} MB");
}
nvmlShutdown();
}
}
import pynvml
pynvml.nvmlInit()
device_count = pynvml.nvmlDeviceGetCount()
print(f"Antal NVIDIA-GPUs: {device_count}")
for i in range(device_count):
handle = pynvml.nvmlDeviceGetHandleByIndex(i)
name = pynvml.nvmlDeviceGetName(handle)
memory = pynvml.nvmlDeviceGetMemoryInfo(handle)
print(f"GPU {i}: {name}")
print(f" - Total minne: {memory.total / 1024**2:.2f} MB")
print(f" - Använt minne: {memory.used / 1024**2:.2f} MB")
print(f" - Ledigt minne: {memory.free / 1024**2:.2f} MB")
pynvml.nvmlShutdown()
import torch
def gpu_test():
print("Kollar GPU-status...\n")
# Finns CUDA?
if torch.cuda.is_available():
print("✅ CUDA är tillgängligt!")
print(f"Antal CUDA-enheter: {torch.cuda.device_count()}")
for i in range(torch.cuda.device_count()):
print(f" - Enhet {i}: {torch.cuda.get_device_name(i)}")
print(f" - Minne totalt: {torch.cuda.get_device_properties(i).total_memory / 1024**3:.2f} GB")
print(f" - Compute Capability: {torch.cuda.get_device_properties(i).major}.{torch.cuda.get_device_properties(i).minor}")
else:
print("❌ CUDA är INTE tillgängligt på detta system.")
if __name__ == "__main__":
gpu_test()
Om du kan göra beräkningar så får du något liknande detta:
✅ CUDA är tillgängligt!
Antal CUDA-enheter: 1
- Enhet 0: NVIDIA A2
- Minne totalt: 16.00 GB
- Compute Capability: 8.6
om du inte kan göra beräkningar får du:
❌ CUDA är INTE tillgängligt på detta system.
installera
import torch
import time
from tqdm import tqdm
def do_computation(device, size, log_file):
print(f"Kör beräkning på {device} med matrisstorlek {size}x{size}...")
a = torch.randn(size, size, device=device)
b = torch.randn(size, size, device=device)
start_time = time.time()
steps = 10
chunk_size = size // steps
results = []
for i in tqdm(range(steps), desc="Beräknar", unit="delar"):
start_row = i * chunk_size
end_row = (i + 1) * chunk_size if i < steps - 1 else size
partial_result = torch.matmul(a[start_row:end_row], b)
results.append(partial_result)
result = torch.cat(results, dim=0)
end_time = time.time()
elapsed = end_time - start_time
print(f"Beräkning klar på {elapsed:.4f} sekunder.\n")
# Logga till fil
with open(log_file, 'a') as f:
f.write(f"Enhet: {device.upper()}, Matrisstorlek: {size}x{size}, Tid: {elapsed:.4f} sekunder\n")
def main():
log_file = "logg.txt"
print("Resultat kommer att sparas i", log_file)
while True:
choice = input("Välj enhet (cpu/gpu) eller 'q' för att avsluta: ").strip().lower()
if choice == 'q':
print("Avslutar programmet.")
break
elif choice in ['cpu', 'gpu']:
size_input = input("Ange matrisstorlek (t.ex. 5000) eller tryck Enter för standard (5000): ").strip()
if size_input == '':
size = 5000
else:
try:
size = int(size_input)
if size <= 0:
print("Storleken måste vara ett positivt heltal.\n")
continue
except ValueError:
print("Ogiltig storlek, ange ett heltal.\n")
continue
if choice == 'gpu' and not torch.cuda.is_available():
print("GPU är inte tillgänglig! Kontrollera installationen.\n")
else:
device = 'cuda' if choice == 'gpu' else 'cpu'
do_computation(device, size, log_file)
else:
print("Ogiltigt val, skriv 'cpu', 'gpu' eller 'q'.\n")
if __name__ == "__main__":
main()