def do_register_gpu_opt(f): from theano.sandbox.cuda import cuda_available, register_opt from theano.gof.opt import local_optimizer f = local_optimizer([base_op_class])(f) if cuda_available: f = register_opt()(f) return f # just ignore
def f(local_opt): name = (kwargs and kwargs.pop("name")) or local_opt.__name__ if isinstance(local_opt, DB): opt = local_opt else: opt = local_optimizer(tracks)(local_opt) gpu_optimizer2.register(name, opt, "fast_run", "gpuarray", *tags) return local_opt
mode=mode) return unpad_dims(ret_padded, img, 2, nd) pool_db.register( "local_gpua_pool_dnn_alternative", op_lifter([Pool])(local_gpua_pool_dnn_alternative), "gpuarray", "fast_compile", "fast_run", "cudnn", position=0, ) pool_db2.register( "local_gpua_pool_dnn_alternative", local_optimizer([Pool])(local_gpua_pool_dnn_alternative), "gpuarray", "fast_compile", "fast_run", "cudnn", position=0, ) def local_gpua_pool_dnn_grad_stride(op, ctx_name, inputs, outputs): if not dnn_available(ctx_name): return if not op.ignore_border: return inp, out, out_grad, ws, stride, pad = inputs nd = op.ndim