def collect_system_information(trainer): import psutil mem = psutil.virtual_memory() trainer.set_job_info('memory_total', mem.total) # at this point, theano is already initialised through KerasLogger from theano.sandbox import cuda trainer.set_job_info('cuda_available', cuda.cuda_available) if cuda.cuda_available: trainer.on_gpu = cuda.use.device_number is not None trainer.set_job_info('cuda_device_number', cuda.active_device_number()) trainer.set_job_info('cuda_device_name', cuda.active_device_name()) if cuda.cuda_ndarray.cuda_ndarray.mem_info: gpu = cuda.cuda_ndarray.cuda_ndarray.mem_info() trainer.set_job_info('cuda_device_max_memory', gpu[1]) free = gpu[0]/1024/1024/1024 total = gpu[1]/1024/1024/1024 used = total-free print("%.2fGB GPU memory used of %.2fGB" %(used, total)) trainer.set_job_info('on_gpu', trainer.on_gpu) import cpuinfo cpu = cpuinfo.get_cpu_info() trainer.set_job_info('cpu_name', cpu['brand']) trainer.set_job_info('cpu', [cpu['hz_actual_raw'][0], cpu['count']])
def dnn_available(): if dnn_available.avail is None: dev = active_device_number() if device_properties(dev)['major'] < 3: dnn_available.msg = "Device not supported by cuDNN" dnn_available.avail = False else: dnn_available.msg = "Can not find the cuDNN library" dnn_available.avail = theano.gof.cmodule.GCC_compiler.try_flags( ["-l", "cudnn"]) return dnn_available.avail
def test_conv_grads(): if cuda.device_properties(cuda.active_device_number())['major'] < 3: ops = [gemm_op] else: ops = [gemm_op, dnn_op] for mode in 'valid', 'full': for bs in [1, 5]: for ch in [4]: for nf in [3]: for rImg1 in [2, 5]: for rImg2 in [2, 8]: for rFlt1 in [1, 2]: for rFlt2 in [1, 2]: for subsample in (1, 1), (1, 2), (2, 2): for op in ops: yield (conv_grad, mode, bs, ch, nf, rImg1, rImg2, rFlt1, rFlt2, subsample, op)
def test_conv_grads(): if (not cuda.dnn.dnn_available() or cuda.device_properties(cuda.active_device_number())['major'] < 3): ops = [gemm_op] else: ops = [gemm_op, dnn_op] for mode in 'valid', 'full': for bs in [1, 5]: for ch in [4]: for nf in [3]: for rImg1 in [2, 5]: for rImg2 in [2, 8]: for rFlt1 in [1, 2]: for rFlt2 in [1, 2]: for subsample in (1, 1), (1, 2), (2, 2): for op in ops: yield (conv_grad, mode, bs, ch, nf, rImg1, rImg2, rFlt1, rFlt2, subsample, op)
def configure(self, flags): if self.configured is True: return self.configured = True if 'theano' in sys.modules: self.log.warning('Theano was already imported and cannot be reconfigured.') return os.environ.setdefault('THEANO_FLAGS', flags+',print_active_device=False') cuda = logging.getLogger('theano.sandbox.cuda') cuda.setLevel(logging.CRITICAL) import theano cuda.setLevel(logging.WARNING) try: import theano.sandbox.cuda as cd self.log.info('Using device gpu%i: %s', cd.active_device_number(), cd.active_device_name()) except AttributeError: self.log.info('Using device cpu0, with %r.', theano.config.floatX)
def dnn_available(): if dnn_available.avail is None: dev = active_device_number() if device_properties(dev)['major'] < 3: dnn_available.msg = "Device not supported by cuDNN" dnn_available.avail = False else: preambule = """ #include <stdio.h> #include <cuda.h> #include <cudnn.h> #include <cudnn_helper.h> """ body = """ cudnnHandle_t _handle = NULL; cudnnStatus_t err; if ((err = cudnnCreate(&_handle)) != CUDNN_STATUS_SUCCESS) { fprintf(stderr, "could not create cuDNN handle: %s", cudnnGetErrorString(err)); return 1; } """ comp, run, out, err = gof.cmodule.GCC_compiler.try_flags( ["-l", "cudnn", "-I" + os.path.dirname(__file__), "-I" + os.path.join(theano.config.cuda.root, 'include'), "-L" + os.path.join(theano.config.cuda.root, 'lib64')], preambule=preambule, body=body, try_run=True, output=True) dnn_available.avail = comp and run if dnn_available.avail: dnn_available.msg = "cuDNN should work" else: dnn_available.msg = ( "Theano is not able to use cuDNN. We got this error: \n" + str(err)) return dnn_available.avail
def dnn_available(): if dnn_available.avail is None: dev = active_device_number() if device_properties(dev)['major'] < 3: dnn_available.msg = "Device not supported by cuDNN" dnn_available.avail = False else: preambule = """ #include <cudnn.h> #include <stdio.h> #include <cuda.h> #include <cudnn_helper.h> """ body = """ cudnnHandle_t _handle = NULL; cudnnStatus_t err; if ((err = cudnnCreate(&_handle)) != CUDNN_STATUS_SUCCESS) { fprintf(stderr, "could not create cuDNN handle: %s", cudnnGetErrorString(err)); return 1; } """ comp, run, out, err = gof.cmodule.GCC_compiler.try_flags( ["-l", "cudnn", "-I" + os.path.dirname(__file__)], preambule=preambule, body=body, try_run=True, output=True) dnn_available.avail = comp and run if dnn_available.avail: dnn_available.msg = "cuDNN should work" else: dnn_available.msg = ( "Theano is not able to use cuDNN. We got this error: \n" + err) return dnn_available.avail
def test_dnn_subsample(): if cuda.device_properties(cuda.active_device_number())['major'] < 3: raise SkipTest('Current GPU too old') for t in _test_subsample(GpuDnnConv, theano_mode.including('cudnn')): yield t
def test_dnn_full(): if cuda.device_properties(cuda.active_device_number())['major'] < 3: raise SkipTest('Current GPU too old') for t in _test_full(GpuDnnConv, mode=theano_mode.including("cudnn")): yield t