我正在尝试使用
YOLOv8
模型进行一些推理,只需使用以下命令:
yolo detect predict source=input.jpg model=yolov8n.pt device=0
但是我收到了与 PyTorch 相关的错误(如下所示,我的 pytorch 版本是
2.3.0+cu118
):
RuntimeError: device >= 0 && device < num_gpus INTERNAL ASSERT FAILED at "../aten/src/ATen/cuda/CUDAContext.cpp":50, please report a bug to PyTorch. device=, num_gpus=
我搜索了很多,找到
CUDAContext.cpp
文件并修复它等等,但找不到解决方案。
有什么问题吗?怎么解决?
PyTorch 使用情况和 GPU 可用性的输出如下所示,看起来不错:
Python 3.9.7 | packaged by conda-forge | (default, Sep 2 2021, 17:58:34)
[GCC 9.4.0] on linux
Type "help", "copyright", "credits" or "license" for more information.
>>> import torch
>>> torch.zeros(2).cuda(0)
tensor([0., 0.], device='cuda:0')
>>> print(torch.__version__)
2.3.0+cu118
>>> print(f"Is CUDA available?: {torch.cuda.is_available()}")
Is CUDA available?: True
>>> print(f"Number of CUDA devices: {torch.cuda.device_count()}")
Number of CUDA devices: 3
>>> device = torch.device('cuda')
>>> print(f"A torch tensor: {torch.rand(5).to(device)}")
A torch tensor: tensor([0.6085, 0.7618, 0.6855, 0.5276, 0.1606], device='cuda:0')
完整堆栈跟踪:
Traceback (most recent call last):
File "/home/conda/lib/python3.9/site-packages/torch/cuda/__init__.py", line 306, in _lazy_init
queued_call()
File "/home/conda/lib/python3.9/site-packages/torch/cuda/__init__.py", line 174, in _check_capability
capability = get_device_capability(d)
File "/home/conda/lib/python3.9/site-packages/torch/cuda/__init__.py", line 430, in get_device_capability
prop = get_device_properties(device)
File "/home/conda/lib/python3.9/site-packages/torch/cuda/__init__.py", line 448, in get_device_properties
return _get_device_properties(device) # type: ignore[name-defined]
RuntimeError: device >= 0 && device < num_gpus INTERNAL ASSERT FAILED at "../aten/src/ATen/cuda/CUDAContext.cpp":50, please report a bug to PyTorch. device=, num_gpus=
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/home/conda/bin/yolo", line 8, in <module>
sys.exit(entrypoint())
File "/home/conda/lib/python3.9/site-packages/ultralytics/cfg/__init__.py", line 583, in entrypoint
getattr(model, mode)(**overrides) # default args from model
File "/home/conda/lib/python3.9/site-packages/ultralytics/engine/model.py", line 528, in val
validator(model=self.model)
File "/home/conda/lib/python3.9/site-packages/torch/utils/_contextlib.py", line 115, in decorate_context
return func(*args, **kwargs)
File "/home/conda/lib/python3.9/site-packages/ultralytics/engine/validator.py", line 126, in __call__
device=select_device(self.args.device, self.args.batch),
File "/home/conda/lib/python3.9/site-packages/ultralytics/utils/torch_utils.py", line 156, in select_device
p = torch.cuda.get_device_properties(i)
File "/home/conda/lib/python3.9/site-packages/torch/cuda/__init__.py", line 444, in get_device_properties
_lazy_init() # will define _get_device_properties
File "/home/conda/lib/python3.9/site-packages/torch/cuda/__init__.py", line 312, in _lazy_init
raise DeferredCudaCallError(msg) from e
torch.cuda.DeferredCudaCallError: CUDA call failed lazily at initialization with error: device >= 0 && device < num_gpus INTERNAL ASSERT FAILED at "../aten/src/ATen/cuda/CUDAContext.cpp":50, please report a bug to PyTorch. device=, num_gpus=
CUDA call was originally invoked at:
File "/home/conda/bin/yolo", line 5, in <module>
from ultralytics.cfg import entrypoint
File "<frozen importlib._bootstrap>", line 1007, in _find_and_load
File "<frozen importlib._bootstrap>", line 972, in _find_and_load_unlocked
File "<frozen importlib._bootstrap>", line 228, in _call_with_frames_removed
File "<frozen importlib._bootstrap>", line 1007, in _find_and_load
File "<frozen importlib._bootstrap>", line 986, in _find_and_load_unlocked
File "<frozen importlib._bootstrap>", line 680, in _load_unlocked
File "<frozen importlib._bootstrap_external>", line 850, in exec_module
File "<frozen importlib._bootstrap>", line 228, in _call_with_frames_removed
File "/home/conda/lib/python3.9/site-packages/ultralytics/__init__.py", line 5, in <module>
from ultralytics.data.explorer.explorer import Explorer
File "<frozen importlib._bootstrap>", line 1007, in _find_and_load
File "<frozen importlib._bootstrap>", line 972, in _find_and_load_unlocked
File "<frozen importlib._bootstrap>", line 228, in _call_with_frames_removed
File "<frozen importlib._bootstrap>", line 1007, in _find_and_load
File "<frozen importlib._bootstrap>", line 972, in _find_and_load_unlocked
File "<frozen importlib._bootstrap>", line 228, in _call_with_frames_removed
File "<frozen importlib._bootstrap>", line 1007, in _find_and_load
File "<frozen importlib._bootstrap>", line 986, in _find_and_load_unlocked
File "<frozen importlib._bootstrap>", line 680, in _load_unlocked
File "<frozen importlib._bootstrap_external>", line 850, in exec_module
File "<frozen importlib._bootstrap>", line 228, in _call_with_frames_removed
File "/home/conda/lib/python3.9/site-packages/ultralytics/data/__init__.py", line 3, in <module>
from .base import BaseDataset
File "<frozen importlib._bootstrap>", line 1007, in _find_and_load
File "<frozen importlib._bootstrap>", line 986, in _find_and_load_unlocked
File "<frozen importlib._bootstrap>", line 680, in _load_unlocked
File "<frozen importlib._bootstrap_external>", line 850, in exec_module
File "<frozen importlib._bootstrap>", line 228, in _call_with_frames_removed
File "/home/conda/lib/python3.9/site-packages/ultralytics/data/base.py", line 15, in <module>
from torch.utils.data import Dataset
File "<frozen importlib._bootstrap>", line 1007, in _find_and_load
File "<frozen importlib._bootstrap>", line 972, in _find_and_load_unlocked
File "<frozen importlib._bootstrap>", line 228, in _call_with_frames_removed
File "<frozen importlib._bootstrap>", line 1007, in _find_and_load
File "<frozen importlib._bootstrap>", line 972, in _find_and_load_unlocked
File "<frozen importlib._bootstrap>", line 228, in _call_with_frames_removed
File "<frozen importlib._bootstrap>", line 1007, in _find_and_load
File "<frozen importlib._bootstrap>", line 986, in _find_and_load_unlocked
File "<frozen importlib._bootstrap>", line 680, in _load_unlocked
File "<frozen importlib._bootstrap_external>", line 850, in exec_module
File "<frozen importlib._bootstrap>", line 228, in _call_with_frames_removed
File "/home/conda/lib/python3.9/site-packages/torch/__init__.py", line 1478, in <module>
_C._initExtension(manager_path())
File "<frozen importlib._bootstrap>", line 1007, in _find_and_load
File "<frozen importlib._bootstrap>", line 986, in _find_and_load_unlocked
File "<frozen importlib._bootstrap>", line 680, in _load_unlocked
File "<frozen importlib._bootstrap_external>", line 850, in exec_module
File "<frozen importlib._bootstrap>", line 228, in _call_with_frames_removed
File "/home/conda/lib/python3.9/site-packages/torch/cuda/__init__.py", line 238, in <module>
_lazy_call(_check_capability)
File "/home/conda/lib/python3.9/site-packages/torch/cuda/__init__.py", line 235, in _lazy_call
_queued_calls.append((callable, traceback.format_stack()))
sssssssssssssssssssssssssssssss
这是 PyTorch 中的一个错误。要解决,找到并转到
python3.xx\site-packages\torch\cuda\__init__.py
并修改此函数:
删除或注释掉这个旧函数:
'''
@lru_cache(maxsize=1)
def device_count() -> int:
r"""Return the number of GPUs available."""
if not _is_compiled():
return 0
# bypass _device_count_nvml() if rocm (not supported)
nvml_count = -1 if torch.version.hip else _device_count_nvml()
return torch._C._cuda_getDeviceCount() if nvml_count < 0 else nvml_count
'''
并将其替换为新代码:
_cached_device_count: Optional[int] = None
def device_count() -> int:
r"""Return the number of GPUs available."""
global _cached_device_count
if not _is_compiled():
return 0
if _cached_device_count is not None:
return _cached_device_count
# bypass _device_count_nvml() if rocm (not supported)
nvml_count = -1
if torch.version.hip else _device_count_nvml()
r = torch._C._cuda_getDeviceCount() if nvml_count < 0 else nvml_count
# NB: Do not cache the device count prior to CUDA initialization, because
# the number of devices can change due to changes to CUDA_VISIBLE_DEVICES
# setting prior to CUDA initialization.
if _cached_device_count is None and _initialized:
_cached_device_count = r
return r