Add DETR+CLIP based logo detection library and test framework: - DetectLogosDETR class for logo detection and matching - Test script with margin-based and multi-ref matching methods - Data preparation script for test database - Documentation for API usage and test methodology
276 lines
9.4 KiB
Python
Executable File
276 lines
9.4 KiB
Python
Executable File
#!/usr/bin/env python3
|
|
"""
|
|
CUDA Support Test for Nvidia Jetson Hardware
|
|
|
|
This script verifies that OpenCV and PyTorch are properly configured
|
|
with CUDA support on Jetson devices.
|
|
|
|
Usage:
|
|
python test_cuda_support.py
|
|
|
|
Returns:
|
|
Exit code 0 if CUDA is properly configured
|
|
Exit code 1 if CUDA support is missing or misconfigured
|
|
"""
|
|
|
|
import sys
|
|
import platform
|
|
|
|
|
|
def print_section(title):
|
|
"""Print a section header."""
|
|
print("\n" + "=" * 60)
|
|
print(f" {title}")
|
|
print("=" * 60)
|
|
|
|
|
|
def test_pytorch_cuda():
|
|
"""Test PyTorch CUDA support."""
|
|
print_section("PyTorch CUDA Support")
|
|
|
|
try:
|
|
import torch
|
|
print(f"✓ PyTorch imported successfully")
|
|
print(f" Version: {torch.__version__}")
|
|
|
|
# Check CUDA availability
|
|
cuda_available = torch.cuda.is_available()
|
|
print(f"\nCUDA Available: {'✓ YES' if cuda_available else '✗ NO'}")
|
|
|
|
if cuda_available:
|
|
print(f" CUDA Version: {torch.version.cuda}")
|
|
print(f" cuDNN Version: {torch.backends.cudnn.version()}")
|
|
print(f" cuDNN Enabled: {torch.backends.cudnn.enabled}")
|
|
|
|
# Get device information
|
|
device_count = torch.cuda.device_count()
|
|
print(f"\n GPU Devices: {device_count}")
|
|
|
|
for i in range(device_count):
|
|
props = torch.cuda.get_device_properties(i)
|
|
print(f"\n Device {i}: {props.name}")
|
|
print(f" Compute Capability: {props.major}.{props.minor}")
|
|
print(f" Total Memory: {props.total_memory / 1024**3:.2f} GB")
|
|
print(f" Multi-Processor Count: {props.multi_processor_count}")
|
|
|
|
# Test tensor operations
|
|
print("\n Testing GPU tensor operations...")
|
|
try:
|
|
x = torch.randn(3, 3).cuda()
|
|
y = torch.randn(3, 3).cuda()
|
|
z = x @ y
|
|
print(f" ✓ GPU tensor operations successful")
|
|
|
|
# Check current device
|
|
print(f" Current Device: {torch.cuda.current_device()}")
|
|
print(f" Device Name: {torch.cuda.get_device_name(0)}")
|
|
|
|
except Exception as e:
|
|
print(f" ✗ GPU tensor operations failed: {e}")
|
|
return False
|
|
else:
|
|
print("\n ⚠ PyTorch CUDA is NOT available")
|
|
print(" Possible reasons:")
|
|
print(" - PyTorch not built with CUDA support")
|
|
print(" - CUDA drivers not installed")
|
|
print(" - Incompatible CUDA version")
|
|
return False
|
|
|
|
return cuda_available
|
|
|
|
except ImportError as e:
|
|
print(f"✗ Failed to import PyTorch: {e}")
|
|
return False
|
|
except Exception as e:
|
|
print(f"✗ Error testing PyTorch: {e}")
|
|
return False
|
|
|
|
|
|
def test_opencv_cuda():
|
|
"""Test OpenCV CUDA support."""
|
|
print_section("OpenCV CUDA Support")
|
|
|
|
try:
|
|
import cv2
|
|
print(f"✓ OpenCV imported successfully")
|
|
print(f" Version: {cv2.__version__}")
|
|
|
|
# Check build information
|
|
build_info = cv2.getBuildInformation()
|
|
|
|
# Parse build info for CUDA
|
|
cuda_enabled = "CUDA:" in build_info and "YES" in build_info.split("CUDA:")[1].split("\n")[0]
|
|
|
|
print(f"\nCUDA Support: {'✓ YES' if cuda_enabled else '✗ NO'}")
|
|
|
|
if cuda_enabled:
|
|
# Extract CUDA-related information from build info
|
|
lines = build_info.split('\n')
|
|
cuda_section = False
|
|
|
|
print("\n CUDA Build Configuration:")
|
|
for line in lines:
|
|
if 'CUDA' in line or cuda_section:
|
|
if 'CUDA' in line:
|
|
cuda_section = True
|
|
if cuda_section:
|
|
# Print relevant CUDA lines
|
|
if any(keyword in line for keyword in ['CUDA', 'cuDNN', 'NVIDIA', 'GPU']):
|
|
print(f" {line.strip()}")
|
|
# Stop at next major section
|
|
if line.strip() and not line.startswith(' ') and 'CUDA' not in line:
|
|
break
|
|
|
|
# Check for CUDA device count
|
|
try:
|
|
cuda_device_count = cv2.cuda.getCudaEnabledDeviceCount()
|
|
print(f"\n CUDA Devices: {cuda_device_count}")
|
|
|
|
if cuda_device_count > 0:
|
|
# Test CUDA operations
|
|
print("\n Testing CUDA operations...")
|
|
try:
|
|
# Create a simple GPU matrix
|
|
import numpy as np
|
|
test_img = np.random.randint(0, 255, (100, 100, 3), dtype=np.uint8)
|
|
gpu_mat = cv2.cuda_GpuMat()
|
|
gpu_mat.upload(test_img)
|
|
result = gpu_mat.download()
|
|
print(f" ✓ CUDA operations successful")
|
|
except Exception as e:
|
|
print(f" ✗ CUDA operations failed: {e}")
|
|
return False
|
|
else:
|
|
print(" ⚠ No CUDA devices detected")
|
|
return False
|
|
|
|
except AttributeError:
|
|
print(" ⚠ cv2.cuda module not available")
|
|
print(" OpenCV may not be built with CUDA support")
|
|
return False
|
|
else:
|
|
print("\n ⚠ OpenCV CUDA is NOT available")
|
|
print(" Possible reasons:")
|
|
print(" - OpenCV not built with CUDA support")
|
|
print(" - Need to install opencv-contrib-python with CUDA")
|
|
print(" - For Jetson, may need to build from source")
|
|
return False
|
|
|
|
return cuda_enabled
|
|
|
|
except ImportError as e:
|
|
print(f"✗ Failed to import OpenCV: {e}")
|
|
return False
|
|
except Exception as e:
|
|
print(f"✗ Error testing OpenCV: {e}")
|
|
return False
|
|
|
|
|
|
def print_system_info():
|
|
"""Print system information."""
|
|
print_section("System Information")
|
|
|
|
print(f"Platform: {platform.platform()}")
|
|
print(f"Python Version: {platform.python_version()}")
|
|
print(f"Architecture: {platform.machine()}")
|
|
print(f"Processor: {platform.processor()}")
|
|
|
|
# Try to detect Jetson
|
|
try:
|
|
with open('/etc/nv_tegra_release', 'r') as f:
|
|
jetson_version = f.read().strip()
|
|
print(f"\n✓ Jetson Device Detected")
|
|
print(f" {jetson_version}")
|
|
except FileNotFoundError:
|
|
print("\n Not running on Jetson device (or /etc/nv_tegra_release not found)")
|
|
|
|
# Check for CUDA toolkit
|
|
import subprocess
|
|
try:
|
|
result = subprocess.run(['nvcc', '--version'],
|
|
capture_output=True,
|
|
text=True,
|
|
timeout=5)
|
|
if result.returncode == 0:
|
|
print("\n✓ CUDA Toolkit detected:")
|
|
# Print version line
|
|
for line in result.stdout.split('\n'):
|
|
if 'release' in line.lower():
|
|
print(f" {line.strip()}")
|
|
else:
|
|
print("\n⚠ nvcc not found - CUDA toolkit may not be installed")
|
|
except FileNotFoundError:
|
|
print("\n⚠ nvcc not found - CUDA toolkit may not be installed")
|
|
except Exception as e:
|
|
print(f"\n⚠ Error checking CUDA toolkit: {e}")
|
|
|
|
|
|
def print_memory_info():
|
|
"""Print GPU memory information."""
|
|
print_section("GPU Memory Information")
|
|
|
|
try:
|
|
import torch
|
|
if torch.cuda.is_available():
|
|
device = torch.cuda.current_device()
|
|
total_mem = torch.cuda.get_device_properties(device).total_memory
|
|
allocated = torch.cuda.memory_allocated(device)
|
|
cached = torch.cuda.memory_reserved(device)
|
|
|
|
print(f"Total GPU Memory: {total_mem / 1024**3:.2f} GB")
|
|
print(f"Allocated: {allocated / 1024**3:.2f} GB")
|
|
print(f"Cached: {cached / 1024**3:.2f} GB")
|
|
print(f"Free: {(total_mem - allocated) / 1024**3:.2f} GB")
|
|
else:
|
|
print("CUDA not available - cannot query GPU memory")
|
|
except Exception as e:
|
|
print(f"Error querying GPU memory: {e}")
|
|
|
|
|
|
def main():
|
|
"""Main test function."""
|
|
print("\n" + "=" * 60)
|
|
print(" CUDA Support Verification for Nvidia Jetson")
|
|
print("=" * 60)
|
|
|
|
# Print system info
|
|
print_system_info()
|
|
|
|
# Test PyTorch
|
|
pytorch_cuda = test_pytorch_cuda()
|
|
|
|
# Test OpenCV
|
|
opencv_cuda = test_opencv_cuda()
|
|
|
|
# Print memory info
|
|
if pytorch_cuda:
|
|
print_memory_info()
|
|
|
|
# Print summary
|
|
print_section("Summary")
|
|
|
|
print(f"PyTorch CUDA Support: {'✓ ENABLED' if pytorch_cuda else '✗ DISABLED'}")
|
|
print(f"OpenCV CUDA Support: {'✓ ENABLED' if opencv_cuda else '✗ DISABLED'}")
|
|
|
|
if pytorch_cuda and opencv_cuda:
|
|
print("\n✓ All CUDA checks passed - system ready for GPU-accelerated processing")
|
|
return 0
|
|
elif pytorch_cuda:
|
|
print("\n⚠ PyTorch CUDA enabled, but OpenCV CUDA disabled")
|
|
print(" Some operations will use GPU, but OpenCV operations will use CPU")
|
|
return 1
|
|
elif opencv_cuda:
|
|
print("\n⚠ OpenCV CUDA enabled, but PyTorch CUDA disabled")
|
|
print(" OpenCV operations will use GPU, but PyTorch models will use CPU")
|
|
return 1
|
|
else:
|
|
print("\n✗ CUDA support not available - will run in CPU mode")
|
|
print(" Performance will be significantly slower")
|
|
return 1
|
|
|
|
|
|
if __name__ == "__main__":
|
|
exit_code = main()
|
|
print("\n" + "=" * 60 + "\n")
|
|
sys.exit(exit_code)
|