46 lines
1.7 KiB
Python
Executable File
46 lines
1.7 KiB
Python
Executable File
import torch
|
||
|
||
def check_gpu_availability():
|
||
"""检查GPU可用性并打印详细信息"""
|
||
print("="*50)
|
||
print("GPU/CUDA 可用性检查")
|
||
print("="*50)
|
||
|
||
# 1. 检查PyTorch是否支持CUDA
|
||
if not torch.cuda.is_available():
|
||
print("❌ 错误: PyTorch未编译CUDA支持或未检测到NVIDIA驱动")
|
||
return False
|
||
|
||
# 2. 检查GPU数量
|
||
gpu_count = torch.cuda.device_count()
|
||
print(f"✅ 找到 {gpu_count} 个GPU设备")
|
||
|
||
# 3. 打印每个GPU的详细信息
|
||
for i in range(gpu_count):
|
||
print(f"\nGPU {i} 详细信息:")
|
||
print(f" 名称: {torch.cuda.get_device_name(i)}")
|
||
print(f" CUDA计算能力: {torch.cuda.get_device_capability(i)}")
|
||
print(f" 总显存: {torch.cuda.get_device_properties(i).total_memory/1024**3:.2f} GB")
|
||
|
||
# 检查当前是否被选中
|
||
current_device = torch.cuda.current_device()
|
||
print(f" {'⭐' if i == current_device else ' '} 当前使用设备: {'是' if i == current_device else '否'}")
|
||
|
||
# 4. 执行实际GPU计算测试
|
||
try:
|
||
print("\n正在执行GPU计算测试...")
|
||
x = torch.randn(1000, 1000).cuda()
|
||
y = torch.randn(1000, 1000).cuda()
|
||
z = (x @ y).mean() # 矩阵乘法
|
||
print(f"✅ GPU计算测试成功完成! 结果: {z.item():.4f}")
|
||
return True
|
||
except Exception as e:
|
||
print(f"❌ GPU计算测试失败! 错误信息:")
|
||
print(str(e))
|
||
return False
|
||
|
||
if __name__ == "__main__":
|
||
if check_gpu_availability():
|
||
print("\n🎉 您的GPU已准备好用于PyTorch训练!")
|
||
else:
|
||
print("\n⚠️ 您的GPU不可用,请检查以上错误信息") |