Esempio n. 1
0
    def get_used(self):
        """ Return the vram in use """
        self.initialize()
        if IS_MACOS:
            vram = [pynvx.cudaGetMemUsed(handle, ignore=True) / (1024 * 1024)
                    for handle in self.handles]
        else:
            vram = [pynvml.nvmlDeviceGetMemoryInfo(handle).used / (1024 * 1024)
                    for handle in self.handles]
        self.shutdown()

        if self.logger:
            self.logger.verbose("GPU VRAM used: %s", vram)
        return vram
Esempio n. 2
0
    def get_used(self):
        """ Return the vram in use """
        self.initialize()
        if IS_MACOS:
            vram = [pynvx.cudaGetMemUsed(handle, ignore=True) / (1024 * 1024)
                    for handle in self.handles]
        else:
            vram = [pynvml.nvmlDeviceGetMemoryInfo(handle).used / (1024 * 1024)
                    for handle in self.handles]
        self.shutdown()

        if self.logger:
            self.logger.verbose("GPU VRAM used: %s", vram)
        return vram
Esempio n. 3
0
    def get_used(self):
        """ Return the vram in use """
        self.initialize()
        if is_macos:
            vram = [
                pynvx.cudaGetMemUsed(handle, ignore=True) / (1024 * 1024)
                for handle in self.handles
            ]
        else:
            vram = [
                pynvml.nvmlDeviceGetMemoryInfo(handle).used / (1024 * 1024)
                for handle in self.handles
            ]
        self.shutdown()

        if self.verbose:
            print("GPU VRAM used:    {}".format(vram))

        return vram
Esempio n. 4
0
    def get_used(self):
        """ Return the vram in use """
        self.initialize()
        if self.plaid:
            # NB There is no useful way to get allocated VRAM on PlaidML.
            # OpenCL loads and unloads VRAM as required, so this returns 0
            # It's not particularly useful
            vram = [0 for idx in range(self.device_count)]

        elif IS_MACOS:
            vram = [pynvx.cudaGetMemUsed(handle, ignore=True) / (1024 * 1024)
                    for handle in self.handles]
        else:
            vram = [pynvml.nvmlDeviceGetMemoryInfo(handle).used / (1024 * 1024)
                    for handle in self.handles]
        self.shutdown()

        if self.logger:
            self.logger.verbose("GPU VRAM used: %s", vram)
        return vram
Esempio n. 5
0
def print_info():
    try:
        pynvx.cudaInit()
    except RuntimeError as e:
        print(e)
        return

    print('================ CUDA INFO =====================')
    print('Driver Version  : {}'.format(pynvx.cudaSystemGetDriverVersion()))
    print('Runtime Version : {}'.format(pynvx.cudaSystemGetRuntimeVersion()))
    print('Device Count    : {}'.format(pynvx.cudaDeviceGetCount()))

    handles = pynvx.cudaDeviceGetHandles()
    for handle in handles:
        print('------------------------------------------------')
        print('Device {}:'.format(handle))
        print('Device Name              : {}'.format(
            pynvx.cudaGetName(handle)))
        print('Device ClockRate         : {} MHz'.format(
            pynvx.cudaGetClockRate(handle) / 1024))
        print('Device ComputeCapability : {}'.format(
            pynvx.cudaGetComputeCapability(handle)))
        print('Device ProcessorCount    : {}'.format(
            pynvx.cudaGetMultiProcessorCount(handle)))
        print('Device PciBusID          : {}'.format(
            pynvx.cudaGetPciBusID(handle)))
        print('Device PciDeviceID       : {}'.format(
            pynvx.cudaGetPciDeviceID(handle)))
        print('Device PciDomainID       : {}'.format(
            pynvx.cudaGetPciDomainID(handle)))
        print('Device MemTotal          : {} MiB'.format(
            pynvx.cudaGetMemTotal(handle) / (1024 * 1024)))
        print('Device MemFree           : {} MiB'.format(
            pynvx.cudaGetMemFree(handle) / (1024 * 1024)))
        print('Device MemUsed           : {} MiB'.format(
            pynvx.cudaGetMemUsed(handle) / (1024 * 1024)))
Esempio n. 6
0
def test_memory_used():
    v = m.cudaGetMemUsed(0, ignore=True)
    assert type(v) in integer_types