def _get_vram(self): """ Obtain the total VRAM in Megabytes for each connected GPU. Returns ------- list List of floats containing the total amount of VRAM in Megabytes for each connected GPU as corresponding to the values in :attr:`_handles """ self._initialize() if self._device_count == 0: vram = list() elif self._is_plaidml: vram = self._plaid.vram elif IS_MACOS: vram = [ pynvx.cudaGetMemTotal(handle, ignore=True) / (1024 * 1024) for handle in self._handles ] else: vram = [ pynvml.nvmlDeviceGetMemoryInfo(handle).total / (1024 * 1024) for handle in self._handles ] self._log("debug", "GPU VRAM: {}".format(vram)) return vram
def get_vram(self): """ Return total vram in megabytes per device """ self.initialize() if IS_MACOS: vram = [pynvx.cudaGetMemTotal(handle, ignore=True) / (1024 * 1024) for handle in self.handles] else: vram = [pynvml.nvmlDeviceGetMemoryInfo(handle).total / (1024 * 1024) for handle in self.handles] if self.logger: self.logger.debug("GPU VRAM: %s", vram) return vram
def get_vram(self): """ Return total vram in megabytes per device """ self.initialize() if is_macos: vram = [ pynvx.cudaGetMemTotal(handle, ignore=True) / (1024 * 1024) for handle in self.handles ] else: vram = [ pynvml.nvmlDeviceGetMemoryInfo(handle).total / (1024 * 1024) for handle in self.handles ] return vram
def get_vram(self): """ Return total vram in megabytes per device """ self.initialize() if self.device_count == 0: vram = list() elif IS_MACOS: vram = [pynvx.cudaGetMemTotal(handle, ignore=True) / (1024 * 1024) for handle in self.handles] else: vram = [pynvml.nvmlDeviceGetMemoryInfo(handle).total / (1024 * 1024) for handle in self.handles] if self.logger: self.logger.debug("GPU VRAM: %s", vram) return vram
def _get_vram(self) -> List[int]: """ Obtain the VRAM in Megabytes for each connected Nvidia GPU as identified in :attr:`_handles`. Returns ------- list The VRAM in Megabytes for each connected Nvidia GPU """ vram = [ pynvx.cudaGetMemTotal(handle, ignore=True) / (1024 * 1024) # pylint:disable=no-member for handle in self._handles ] self._log("debug", f"GPU VRAM: {vram}") return vram
def print_info(): try: pynvx.cudaInit() except RuntimeError as e: print(e) return print('================ CUDA INFO =====================') print('Driver Version : {}'.format(pynvx.cudaSystemGetDriverVersion())) print('Runtime Version : {}'.format(pynvx.cudaSystemGetRuntimeVersion())) print('Device Count : {}'.format(pynvx.cudaDeviceGetCount())) handles = pynvx.cudaDeviceGetHandles() for handle in handles: print('------------------------------------------------') print('Device {}:'.format(handle)) print('Device Name : {}'.format( pynvx.cudaGetName(handle))) print('Device ClockRate : {} MHz'.format( pynvx.cudaGetClockRate(handle) / 1024)) print('Device ComputeCapability : {}'.format( pynvx.cudaGetComputeCapability(handle))) print('Device ProcessorCount : {}'.format( pynvx.cudaGetMultiProcessorCount(handle))) print('Device PciBusID : {}'.format( pynvx.cudaGetPciBusID(handle))) print('Device PciDeviceID : {}'.format( pynvx.cudaGetPciDeviceID(handle))) print('Device PciDomainID : {}'.format( pynvx.cudaGetPciDomainID(handle))) print('Device MemTotal : {} MiB'.format( pynvx.cudaGetMemTotal(handle) / (1024 * 1024))) print('Device MemFree : {} MiB'.format( pynvx.cudaGetMemFree(handle) / (1024 * 1024))) print('Device MemUsed : {} MiB'.format( pynvx.cudaGetMemUsed(handle) / (1024 * 1024)))
def test_memory_total(): v = m.cudaGetMemTotal(0, ignore=True) assert type(v) in integer_types