def get_supported_ccs(): global _supported_cc if _supported_cc: return _supported_cc try: from numba.cuda.cudadrv.runtime import runtime cudart_version_major, cudart_version_minor = runtime.get_version() except: # noqa: E722 # The CUDA Runtime may not be present cudart_version_major = 0 # List of supported compute capability in sorted order if cudart_version_major == 0: _supported_cc = () elif cudart_version_major < 9: _supported_cc = () ctk_ver = f"{cudart_version_major}.{cudart_version_minor}" msg = f"CUDA Toolkit {ctk_ver} is unsupported by Numba - 9.0 is the " \ + "minimum required version." warnings.warn(msg) elif cudart_version_major == 9: # CUDA 9.x _supported_cc = (3, 0), (3, 5), (5, 0), (5, 2), (5, 3), (6, 0), (6, 1), (6, 2), (7, 0) # noqa: E501 elif cudart_version_major == 10: # CUDA 10.x _supported_cc = (3, 0), (3, 5), (5, 0), (5, 2), (5, 3), (6, 0), (6, 1), (6, 2), (7, 0), (7, 2), (7, 5) # noqa: E501 else: # CUDA 11.0 and later _supported_cc = (3, 5), (5, 0), (5, 2), (5, 3), (6, 0), (6, 1), (6, 2), (7, 0), (7, 2), (7, 5), (8, 0) # noqa: E501 return _supported_cc
def test_get_version(self): if config.ENABLE_CUDASIM: supported_versions = (-1, -1), else: supported_versions = ((9, 0), (9, 1), (9, 2), (10, 0), (10, 1), (10, 2), (11, 0)) self.assertIn(runtime.get_version(), supported_versions)
def get_supported_ccs(): global _supported_cc if _supported_cc: return _supported_cc try: from numba.cuda.cudadrv.runtime import runtime cudart_version_major = runtime.get_version()[0] except: # The CUDA Runtime may not be present cudart_version_major = 0 # List of supported compute capability in sorted order if cudart_version_major == 0: _supported_cc = (), elif cudart_version_major < 9: # CUDA 8.x _supported_cc = (2, 0), (2, 1), (3, 0), (3, 5), (5, 0), (5, 2), (5, 3), (6, 0), (6, 1), (6, 2) elif cudart_version_major < 10: # CUDA 9.x _supported_cc = (3, 0), (3, 5), (5, 0), (5, 2), (5, 3), (6, 0), (6, 1), (6, 2), (7, 0) elif cudart_version_major < 11: # CUDA 10.x _supported_cc = (3, 0), (3, 5), (5, 0), (5, 2), (5, 3), (6, 0), ( 6, 1), (6, 2), (7, 0), (7, 2), (7, 5) else: # CUDA 11.0 and later _supported_cc = (3, 5), (5, 0), (5, 2), (5, 3), (6, 0), (6, 1), ( 6, 2), (7, 0), (7, 2), (7, 5), (8, 0) return _supported_cc
def get_supported_ccs(): global _supported_cc if _supported_cc: return _supported_cc try: from numba.cuda.cudadrv.runtime import runtime cudart_version = runtime.get_version() except: # noqa: E722 # The CUDA Runtime may not be present cudart_version = (0, 0) ctk_ver = f"{cudart_version[0]}.{cudart_version[1]}" unsupported_ver = f"CUDA Toolkit {ctk_ver} is unsupported by Numba - " \ + "10.2 is the minimum required version." # List of supported compute capability in sorted order if cudart_version == (0, 0): _supported_cc = () elif cudart_version == (10, 2): _supported_cc = ((3, 5), (3, 7), (5, 0), (5, 2), (5, 3), (6, 0), (6, 1), (6, 2), (7, 0), (7, 2), (7, 5)) elif cudart_version == (11, 0): _supported_cc = ((3, 5), (3, 7), (5, 0), (5, 2), (5, 3), (6, 0), (6, 1), (6, 2), (7, 0), (7, 2), (7, 5), (8, 0)) elif cudart_version > (11, 0): _supported_cc = ((3, 5), (3, 7), (5, 0), (5, 2), (5, 3), (6, 0), (6, 1), (6, 2), (7, 0), (7, 2), (7, 5), (8, 0), (8, 6)) elif cudart_version > (11, 4): _supported_cc = ((3, 5), (3, 7), (5, 0), (5, 2), (5, 3), (6, 0), (6, 1), (6, 2), (7, 0), (7, 2), (7, 5), (8, 0), (8, 6), (8, 7)) else: _supported_cc = () warnings.warn(unsupported_ver) return _supported_cc
def get_sysinfo(): # Gather the information that shouldn't raise exceptions sys_info = { _start: datetime.now(), _start_utc: datetime.utcnow(), _machine: platform.machine(), _cpu_name: llvmbind.get_host_cpu_name(), _cpu_count: multiprocessing.cpu_count(), _platform_name: platform.platform(aliased=True), _platform_release: platform.release(), _os_name: platform.system(), _os_version: platform.version(), _python_comp: platform.python_compiler(), _python_impl: platform.python_implementation(), _python_version: platform.python_version(), _numba_env_vars: {k: v for (k, v) in os.environ.items() if k.startswith('NUMBA_')}, _numba_version: version_number, _llvm_version: '.'.join(str(i) for i in llvmbind.llvm_version_info), _llvmlite_version: llvmlite_version, _psutil: _psutil_import, } # CPU features try: feature_map = llvmbind.get_host_cpu_features() except RuntimeError as e: _error_log.append(f'Error (CPU features): {e}') else: features = sorted([key for key, value in feature_map.items() if value]) sys_info[_cpu_features] = ' '.join(features) # Python locale # On MacOSX, getdefaultlocale can raise. Check again if Py > 3.7.5 try: # If $LANG is unset, getdefaultlocale() can return (None, None), make # sure we can encode this as strings by casting explicitly. sys_info[_python_locale] = '.'.join( [str(i) for i in locale.getdefaultlocale()]) except Exception as e: _error_log.append(f'Error (locale): {e}') # CUDA information try: cu.list_devices()[0] # will a device initialise? except Exception as e: sys_info[_cu_dev_init] = False msg_not_found = "CUDA driver library cannot be found" msg_disabled_by_user = "******" msg_end = " or no CUDA enabled devices are present." msg_generic_problem = "CUDA device initialisation problem." msg = getattr(e, 'msg', None) if msg is not None: if msg_not_found in msg: err_msg = msg_not_found + msg_end elif msg_disabled_by_user in msg: err_msg = msg_disabled_by_user + msg_end else: err_msg = msg_generic_problem + " Message:" + msg else: err_msg = msg_generic_problem + " " + str(e) # Best effort error report _warning_log.append("Warning (cuda): %s\nException class: %s" % (err_msg, str(type(e)))) else: try: sys_info[_cu_dev_init] = True output = StringIO() with redirect_stdout(output): cu.detect() sys_info[_cu_detect_out] = output.getvalue() output.close() sys_info[_cu_drv_ver] = '%s.%s' % cudriver.get_version() sys_info[_cu_rt_ver] = '%s.%s' % curuntime.get_version() output = StringIO() with redirect_stdout(output): cudadrv.libs.test(sys.platform, print_paths=False) sys_info[_cu_lib_test] = output.getvalue() output.close() try: from cuda import cuda # noqa: F401 nvidia_bindings_available = True except ImportError: nvidia_bindings_available = False sys_info[_cu_nvidia_bindings] = nvidia_bindings_available nv_binding_used = bool(cudadrv.driver.USE_NV_BINDING) sys_info[_cu_nvidia_bindings_used] = nv_binding_used except Exception as e: _warning_log.append( "Warning (cuda): Probing CUDA failed " "(device and driver present, runtime problem?)\n" f"(cuda) {type(e)}: {e}") # NumPy information sys_info[_numpy_version] = np.version.full_version try: # NOTE: These consts were added in NumPy 1.20 from numpy.core._multiarray_umath import ( __cpu_features__, __cpu_dispatch__, __cpu_baseline__, ) except ImportError: sys_info[_numpy_AVX512_SKX_detected] = False else: feat_filtered = [k for k, v in __cpu_features__.items() if v] sys_info[_numpy_supported_simd_features] = feat_filtered sys_info[_numpy_supported_simd_dispatch] = __cpu_dispatch__ sys_info[_numpy_supported_simd_baseline] = __cpu_baseline__ sys_info[_numpy_AVX512_SKX_detected] = \ __cpu_features__.get("AVX512_SKX", False) # SVML information # Replicate some SVML detection logic from numba.__init__ here. # If SVML load fails in numba.__init__ the splitting of the logic # here will help diagnosing the underlying issue. svml_lib_loaded = True try: if sys.platform.startswith('linux'): llvmbind.load_library_permanently("libsvml.so") elif sys.platform.startswith('darwin'): llvmbind.load_library_permanently("libsvml.dylib") elif sys.platform.startswith('win'): llvmbind.load_library_permanently("svml_dispmd") else: svml_lib_loaded = False except Exception: svml_lib_loaded = False func = getattr(llvmbind.targets, "has_svml", None) sys_info[_llvm_svml_patched] = func() if func else False sys_info[_svml_state] = config.USING_SVML sys_info[_svml_loaded] = svml_lib_loaded sys_info[_svml_operational] = all(( sys_info[_svml_state], sys_info[_svml_loaded], sys_info[_llvm_svml_patched], )) # Check which threading backends are available. def parse_error(e, backend): # parses a linux based error message, this is to provide feedback # and hide user paths etc try: path, problem, symbol = [x.strip() for x in e.msg.split(':')] extn_dso = os.path.split(path)[1] if backend in extn_dso: return "%s: %s" % (problem, symbol) except Exception: pass return "Unknown import problem." try: # check import is ok, this means the DSO linkage is working from numba.np.ufunc import tbbpool # NOQA # check that the version is compatible, this is a check performed at # runtime (well, compile time), it will also ImportError if there's # a problem. from numba.np.ufunc.parallel import _check_tbb_version_compatible _check_tbb_version_compatible() sys_info[_tbb_thread] = True except ImportError as e: # might be a missing symbol due to e.g. tbb libraries missing sys_info[_tbb_thread] = False sys_info[_tbb_error] = parse_error(e, 'tbbpool') try: from numba.np.ufunc import omppool sys_info[_openmp_thread] = True sys_info[_openmp_vendor] = omppool.openmp_vendor except ImportError as e: sys_info[_openmp_thread] = False sys_info[_openmp_error] = parse_error(e, 'omppool') try: from numba.np.ufunc import workqueue # NOQA sys_info[_wkq_thread] = True except ImportError as e: sys_info[_wkq_thread] = True sys_info[_wkq_error] = parse_error(e, 'workqueue') # Look for conda and installed packages information cmd = ('conda', 'info', '--json') try: conda_out = check_output(cmd) except Exception as e: _warning_log.append(f'Warning: Conda not available.\n Error was {e}\n') # Conda is not available, try pip list to list installed packages cmd = (sys.executable, '-m', 'pip', 'list') try: reqs = check_output(cmd) except Exception as e: _error_log.append(f'Error (pip): {e}') else: sys_info[_inst_pkg] = reqs.decode().splitlines() else: jsond = json.loads(conda_out.decode()) keys = { 'conda_build_version': _conda_build_ver, 'conda_env_version': _conda_env_ver, 'platform': _conda_platform, 'python_version': _conda_python_ver, 'root_writable': _conda_root_writable, } for conda_k, sysinfo_k in keys.items(): sys_info[sysinfo_k] = jsond.get(conda_k, 'N/A') # Get info about packages in current environment cmd = ('conda', 'list') try: conda_out = check_output(cmd) except CalledProcessError as e: _error_log.append(f'Error (conda): {e}') else: data = conda_out.decode().splitlines() sys_info[_inst_pkg] = [l for l in data if not l.startswith('#')] sys_info.update(get_os_spec_info(sys_info[_os_name])) sys_info[_errors] = _error_log sys_info[_warnings] = _warning_log sys_info[_runtime] = (datetime.now() - sys_info[_start]).total_seconds() return sys_info
return '' data_layout = { 32: ('e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-' 'f64:64:64-v16:16:16-v32:32:32-v64:64:64-v128:128:128-n16:32:64'), 64: ('e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-' 'f64:64:64-v16:16:16-v32:32:32-v64:64:64-v128:128:128-n16:32:64') } default_data_layout = data_layout[tuple.__itemsize__ * 8] try: from numba.cuda.cudadrv.runtime import runtime cudart_version_major = runtime.get_version()[0] except: # The CUDA Runtime may not be present cudart_version_major = 0 # List of supported compute capability in sorted order if cudart_version_major == 0: SUPPORTED_CC = (), elif cudart_version_major < 9: # CUDA 8.x SUPPORTED_CC = (2, 0), (2, 1), (3, 0), (3, 5), (5, 0), (5, 2), (5, 3), (6, 0), (6, 1), (6,
def test_get_version(self): self.assertIn(runtime.get_version(), SUPPORTED_VERSIONS)