Added cuda_is_available (#4725)

### What problem does this PR solve?

Added cuda_is_available

### Type of change

- [x] Refactoring
This commit is contained in:
Zhichang Yu
2025-02-05 18:01:23 +08:00
committed by GitHub
parent 283d036cba
commit 3411d0a2ce
4 changed files with 24 additions and 7 deletions

View File

@ -27,7 +27,6 @@ from . import operators
import math
import numpy as np
import cv2
import torch
import onnxruntime as ort
from .postprocess import build_post_process
@ -72,6 +71,15 @@ def load_model(model_dir, nm):
raise ValueError("not find model file path {}".format(
model_file_path))
def cuda_is_available():
try:
import torch
if torch.cuda.is_available():
return True
except Exception:
return False
return False
options = ort.SessionOptions()
options.enable_cpu_mem_arena = False
options.execution_mode = ort.ExecutionMode.ORT_SEQUENTIAL
@ -81,7 +89,7 @@ def load_model(model_dir, nm):
# https://github.com/microsoft/onnxruntime/issues/9509#issuecomment-951546580
# Shrink GPU memory after execution
run_options = ort.RunOptions()
if torch.cuda.is_available():
if cuda_is_available():
cuda_provider_options = {
"device_id": 0, # Use specific GPU
"gpu_mem_limit": 512 * 1024 * 1024, # Limit gpu memory