Example #1
0
    def _get_free_vram(self):
        """ Obtain the amount of VRAM that is available, in Megabytes, for each connected GPU.

        Returns
        -------
        list
             List of floats containing the amount of VRAM available, in Megabytes, for each
             connected GPU as corresponding to the values in :attr:`_handles

        Notes
        -----
        There is no useful way to get free VRAM on PlaidML. OpenCL loads and unloads VRAM as
        required, so this returns the total memory available per card for AMD cards, which us
        not particularly useful.

        """
        self._initialize()
        if self._is_plaidml:
            vram = self._plaid.vram
        elif IS_MACOS:
            vram = [
                pynvx.cudaGetMemFree(handle, ignore=True) / (1024 * 1024)
                for handle in self._handles
            ]
        else:
            vram = [
                pynvml.nvmlDeviceGetMemoryInfo(handle).free / (1024 * 1024)
                for handle in self._handles
            ]
        self._shutdown()
        self._log("debug", "GPU VRAM free: {}".format(vram))
        return vram
Example #2
0
 def get_free(self):
     """ Return the vram available """
     self.initialize()
     if IS_MACOS:
         vram = [pynvx.cudaGetMemFree(handle, ignore=True) / (1024 * 1024)
                 for handle in self.handles]
     else:
         vram = [pynvml.nvmlDeviceGetMemoryInfo(handle).free / (1024 * 1024)
                 for handle in self.handles]
     self.shutdown()
     if self.logger:
         self.logger.debug("GPU VRAM free: %s", vram)
     return vram
Example #3
0
 def get_free(self):
     """ Return the vram available """
     self.initialize()
     if IS_MACOS:
         vram = [pynvx.cudaGetMemFree(handle, ignore=True) / (1024 * 1024)
                 for handle in self.handles]
     else:
         vram = [pynvml.nvmlDeviceGetMemoryInfo(handle).free / (1024 * 1024)
                 for handle in self.handles]
     self.shutdown()
     if self.logger:
         self.logger.debug("GPU VRAM free: %s", vram)
     return vram
Example #4
0
 def get_free(self):
     """ Return the vram available """
     self.initialize()
     if is_macos:
         vram = [
             pynvx.cudaGetMemFree(handle, ignore=True) / (1024 * 1024)
             for handle in self.handles
         ]
     else:
         vram = [
             pynvml.nvmlDeviceGetMemoryInfo(handle).free / (1024 * 1024)
             for handle in self.handles
         ]
     self.shutdown()
     return vram
Example #5
0
    def _get_free_vram(self) -> List[int]:
        """ Obtain the amount of VRAM that is available, in Megabytes, for each connected Nvidia
        GPU.

        Returns
        -------
        list
             List of `float`s containing the amount of VRAM available, in Megabytes, for each
             connected GPU as corresponding to the values in :attr:`_handles
        """
        vram = [
            pynvx.cudaGetMemFree(handle, ignore=True) / (1024 * 1024)  # pylint:disable=no-member
            for handle in self._handles
        ]
        self._log("debug", f"GPU VRAM free: {vram}")
        return vram
Example #6
0
 def get_free(self):
     """ Return the vram available """
     self.initialize()
     if self.plaid:
         # NB There is no useful way to get free VRAM on PlaidML.
         # OpenCL loads and unloads VRAM as required, so this returns the total memory
         # It's not particularly useful
         vram = self.plaid.vram
     elif IS_MACOS:
         vram = [pynvx.cudaGetMemFree(handle, ignore=True) / (1024 * 1024)
                 for handle in self.handles]
     else:
         vram = [pynvml.nvmlDeviceGetMemoryInfo(handle).free / (1024 * 1024)
                 for handle in self.handles]
     self.shutdown()
     if self.logger:
         self.logger.debug("GPU VRAM free: %s", vram)
     return vram
Example #7
0
def print_info():
    try:
        pynvx.cudaInit()
    except RuntimeError as e:
        print(e)
        return

    print('================ CUDA INFO =====================')
    print('Driver Version  : {}'.format(pynvx.cudaSystemGetDriverVersion()))
    print('Runtime Version : {}'.format(pynvx.cudaSystemGetRuntimeVersion()))
    print('Device Count    : {}'.format(pynvx.cudaDeviceGetCount()))

    handles = pynvx.cudaDeviceGetHandles()
    for handle in handles:
        print('------------------------------------------------')
        print('Device {}:'.format(handle))
        print('Device Name              : {}'.format(
            pynvx.cudaGetName(handle)))
        print('Device ClockRate         : {} MHz'.format(
            pynvx.cudaGetClockRate(handle) / 1024))
        print('Device ComputeCapability : {}'.format(
            pynvx.cudaGetComputeCapability(handle)))
        print('Device ProcessorCount    : {}'.format(
            pynvx.cudaGetMultiProcessorCount(handle)))
        print('Device PciBusID          : {}'.format(
            pynvx.cudaGetPciBusID(handle)))
        print('Device PciDeviceID       : {}'.format(
            pynvx.cudaGetPciDeviceID(handle)))
        print('Device PciDomainID       : {}'.format(
            pynvx.cudaGetPciDomainID(handle)))
        print('Device MemTotal          : {} MiB'.format(
            pynvx.cudaGetMemTotal(handle) / (1024 * 1024)))
        print('Device MemFree           : {} MiB'.format(
            pynvx.cudaGetMemFree(handle) / (1024 * 1024)))
        print('Device MemUsed           : {} MiB'.format(
            pynvx.cudaGetMemUsed(handle) / (1024 * 1024)))
Example #8
0
def test_memory_free():
    v = m.cudaGetMemFree(0, ignore=True)
    assert type(v) in integer_types