diff options
author | Dmitry Baranchuk <dmitrybaranchuk@gmail.com> | 2022-09-10 19:33:21 -0700 |
---|---|---|
committer | GitHub <noreply@github.com> | 2022-09-10 19:33:21 -0700 |
commit | 843ad0631c65eabc7f64e80906ecf5482cc1a036 (patch) | |
tree | 07ab541ec59ab3474a711c155daa118fc0ae6864 /bitsandbytes/cuda_setup/main.py | |
parent | 8d34d36f150b0fd4914cdb56d4e3bda34c029ccc (diff) | |
parent | 2e630b55f51d454f3bd723dffda68a07ef93190c (diff) |
Merge pull request #1 from TimDettmers/main
Update main branch
Diffstat (limited to 'bitsandbytes/cuda_setup/main.py')
-rw-r--r-- | bitsandbytes/cuda_setup/main.py | 11 |
1 files changed, 6 insertions, 5 deletions
diff --git a/bitsandbytes/cuda_setup/main.py b/bitsandbytes/cuda_setup/main.py index 975b772..78a2844 100644 --- a/bitsandbytes/cuda_setup/main.py +++ b/bitsandbytes/cuda_setup/main.py @@ -17,9 +17,7 @@ evaluation: """ import ctypes -from pathlib import Path -from ..utils import execute_and_return from .paths import determine_cuda_runtime_lib_path @@ -28,7 +26,7 @@ def check_cuda_result(cuda, result_val): if result_val != 0: error_str = ctypes.c_char_p() cuda.cuGetErrorString(result_val, ctypes.byref(error_str)) - raise Exception(f"CUDA exception! Error code: {error_str.value.decode()}") + print(f"CUDA exception! Error code: {error_str.value.decode()}") def get_cuda_version(cuda, cudart_path): # https://docs.nvidia.com/cuda/cuda-runtime-api/group__CUDART____VERSION.html#group__CUDART____VERSION @@ -57,7 +55,7 @@ def get_cuda_lib_handle(): cuda = ctypes.CDLL("libcuda.so") except OSError: # TODO: shouldn't we error or at least warn here? - raise Exception('CUDA SETUP: ERROR! libcuda.so not found! Do you have a CUDA driver installed? If you are on a cluster, make sure you are on a CUDA machine!') + print('CUDA SETUP: WARNING! libcuda.so not found! Do you have a CUDA driver installed? If you are on a cluster, make sure you are on a CUDA machine!') return None check_cuda_result(cuda, cuda.cuInit(0)) @@ -80,7 +78,6 @@ def get_compute_capabilities(cuda): cc_major = ctypes.c_int() cc_minor = ctypes.c_int() - result = ctypes.c_int() device = ctypes.c_int() check_cuda_result(cuda, cuda.cuDeviceGetCount(ctypes.byref(nGpus))) @@ -119,6 +116,10 @@ def evaluate_cuda_setup(): print('For effortless bug reporting copy-paste your error into this form: https://docs.google.com/forms/d/e/1FAIpQLScPB8emS3Thkp66nvqwmjTEgxp8Y9ufuWTzFyr9kJ5AoI47dQ/viewform?usp=sf_link') print('='*80) binary_name = "libbitsandbytes_cpu.so" + #if not torch.cuda.is_available(): + #print('No GPU detected. Loading CPU library...') + #return binary_name + cudart_path = determine_cuda_runtime_lib_path() if cudart_path is None: print( |