def local_to_gpu(node): """ op(host_from_gpu()) -> host_from_gpu(op) gpu_from_host(op) -> op(gpu_from_host) """ if isinstance(node.op, op): #op(host_from_gpu()) -> host_from_gpu(op) #If any of the input that go on the GPU are on the GPU, #move the op to the gpu. if any(node.inputs[idx].owner and isinstance(node.inputs[idx].owner.op, cuda.HostFromGpu) for idx in to_gpu): new_inp = list(node.inputs) for idx in to_gpu: new_inp[idx] = cuda.gpu_from_host(new_inp[idx]) return [cuda.host_from_gpu(op()(*new_inp))] if node.op == cuda.gpu_from_host: #gpu_from_host(op) -> op(gpu_from_host) host_input = node.inputs[0] if host_input.owner and isinstance(host_input.owner.op, op): op_node = host_input.owner new_inp = list(op_node.inputs) for idx in to_gpu: new_inp[idx] = cuda.gpu_from_host(new_inp[idx]) return [op()(*new_inp)] return False
def local_to_gpu(node): """ op(host_from_gpu()) -> host_from_gpu(op) gpu_from_host(op) -> op(gpu_from_host) """ if isinstance(node.op, op): # op(host_from_gpu()) -> host_from_gpu(op) # If any of the input that go on the GPU are on the GPU, # move the op to the gpu. if any(node.inputs[idx].owner and isinstance(node.inputs[idx].owner.op, cuda.HostFromGpu) for idx in to_gpu): new_inp = list(node.inputs) for idx in to_gpu: new_inp[idx] = cuda.gpu_from_host(new_inp[idx]) result_node = op()(*new_inp) copy_stack_trace(node.outputs[0], result_node) transfer_node = result_node.transfer('cpu') copy_stack_trace(node.outputs[0], transfer_node) return [transfer_node] if node.op == cuda.gpu_from_host: # gpu_from_host(op) -> op(gpu_from_host) host_input = node.inputs[0] if host_input.owner and isinstance(host_input.owner.op, op): op_node = host_input.owner new_inp = list(op_node.inputs) for idx in to_gpu: new_inp[idx] = cuda.gpu_from_host(new_inp[idx]) new_node = op()(*new_inp) copy_stack_trace(host_input, new_node) return [new_node] return False
def test_gpu_opt(): if not cuda.cuda_available: # Skip test if cuda_ndarray is not available. from nose.plugins.skip import SkipTest raise SkipTest('Optional package cuda not available') # We test the case where we put the op on the gpu when the output # is moved to the gpu. p = tensor.fmatrix() u = tensor.fvector() m = multinomial.MultinomialFromUniform('auto')(p, u) assert m.dtype == 'float32', m.dtype m_gpu = cuda.gpu_from_host(m) f = function([p, u], m_gpu, allow_input_downcast=True, mode=get_mode(True)) assert any([type(node.op) is multinomial.GpuMultinomialFromUniform for node in f.maker.fgraph.toposort()]) pval = numpy.arange(10000 * 4, dtype='float32').reshape((10000, 4))+0.1 pval = pval / pval.sum(axis=1)[:, None] uval = numpy.ones_like(pval[:, 0]) * 0.5 mval = f(pval, uval) # Test with a row, it was failing in the past. r = tensor.frow() m = multinomial.MultinomialFromUniform('auto')(r, u) assert m.dtype == 'float32', m.dtype m_gpu = cuda.gpu_from_host(m) f = function([r, u], m_gpu, allow_input_downcast=True, mode=get_mode(True)) assert any([type(node.op) is multinomial.GpuMultinomialFromUniform for node in f.maker.fgraph.toposort()]) pval = numpy.arange(1 * 4, dtype='float32').reshape((1, 4))+0.1 pval = pval / pval.sum(axis=1)[:, None] uval = numpy.ones_like(pval[:, 0]) * 0.5 mval2 = f(pval, uval)
def test_reject_bad_filt_number(): for cls in (FilterActs, ImageActs): # Tests that running FilterActs with a # of filters per # group that is not 16 is an error rng = np.random.RandomState([2012, 10, 9]) batch_size = 5 rows = 10 cols = 9 channels = 3 filter_rows = 4 filter_cols = filter_rows num_filters = 6 images = shared(rng.uniform( -1., 1., (channels, rows, cols, batch_size)).astype('float32'), name='images') filters = shared(rng.uniform(-1., 1., (channels, filter_rows, filter_cols, num_filters)).astype('float32'), name='filters') gpu_images = gpu_from_host(images) gpu_filters = gpu_from_host(filters) if cls is ImageActs: output = cls()(gpu_images, gpu_filters, as_tensor_variable((rows, cols))) else: output = cls()(gpu_images, gpu_filters) f = function([], output) try: output = f() except ValueError: continue assert False
def test_reject_rect(): for cls in (FilterActs, ImageActs): # Tests that running FilterActs with a non-square # kernel is an error rng = np.random.RandomState([2012, 10, 9]) batch_size = 5 rows = 10 cols = 9 channels = 3 filter_rows = 4 filter_cols = filter_rows + 1 num_filters = 6 images = shared(rng.uniform(-1., 1., (channels, rows, cols, batch_size)).astype('float32'), name='images') filters = shared(rng.uniform(-1., 1., (channels, filter_rows, filter_cols, num_filters)).astype('float32'), name='filters') gpu_images = gpu_from_host(images) gpu_filters = gpu_from_host(filters) if cls is ImageActs: output = cls()(gpu_images, gpu_filters, as_tensor_variable((rows, cols))) else: output = cls()(gpu_images, gpu_filters) f = function([], output) try: output = f() except ValueError: continue assert False
def test_match_valid_conv(): # Tests that running FilterActs with no padding is the same as running # theano's conv2D in valid mode rng = np.random.RandomState([2012, 10, 9]) batch_size = 5 rows = 10 cols = 9 channels = 3 filter_rows = 4 filter_cols = filter_rows num_filters = 16 images = shared(rng.uniform( -1., 1., (channels, rows, cols, batch_size)).astype('float32'), name='images') filters = shared(rng.uniform( -1., 1., (channels, filter_rows, filter_cols, num_filters)).astype('float32'), name='filters') gpu_images = gpu_from_host(images) gpu_filters = gpu_from_host(filters) output = FilterActs()(gpu_images, gpu_filters) output = host_from_gpu(output) images_bc01 = images.dimshuffle(3, 0, 1, 2) filters_bc01 = filters.dimshuffle(3, 0, 1, 2) filters_bc01 = filters_bc01[:, :, ::-1, ::-1] output_conv2d = conv2d(images_bc01, filters_bc01, border_mode='valid') output_conv2d = output_conv2d.dimshuffle(1, 2, 3, 0) f = function([], [output, output_conv2d]) output, output_conv2d = f() warnings.warn( """test_match_valid_conv success criterion is not very strict. Can we verify that this is OK? One possibility is that theano is numerically unstable and Alex's code is better. Probably theano CPU 64 bit is OK but it's worth checking the others.""" ) if np.abs(output - output_conv2d).max() > 2.4e-6: assert type(output) == type(output_conv2d) assert output.dtype == output_conv2d.dtype if output.shape != output_conv2d.shape: print 'cuda-convnet shape: ', output.shape print 'theano shape: ', output_conv2d.shape assert False err = np.abs(output - output_conv2d) print 'absolute error range: ', (err.min(), err.max()) print 'mean absolute error: ', err.mean() print 'cuda-convnet value range: ', (output.min(), output.max()) print 'theano value range: ', (output_conv2d.min(), output_conv2d.max()) assert False
def test_reject_bad_filt_number(): for cls in (FilterActs, ImageActs): # Tests that running FilterActs with a # of filters per # group that is not 16 is an error rng = np.random.RandomState([2012, 10, 9]) batch_size = 5 rows = 10 cols = 9 channels = 3 filter_rows = 4 filter_cols = filter_rows num_filters = 6 images = shared(rng.uniform(-1., 1., (channels, rows, cols, batch_size)).astype('float32'), name='images') filters = shared(rng.uniform(-1., 1., (channels, filter_rows, filter_cols, num_filters)).astype('float32'), name='filters') gpu_images = gpu_from_host(images) gpu_filters = gpu_from_host(filters) output = cls()(gpu_images, gpu_filters) f = function([], output) try: output = f() except ValueError: continue assert False
def test_reject_rect(): for cls in (FilterActs, ImageActs): # Tests that running FilterActs with a non-square # kernel is an error rng = np.random.RandomState([2012, 10, 9]) batch_size = 5 rows = 10 cols = 9 channels = 3 filter_rows = 4 filter_cols = filter_rows + 1 num_filters = 6 images = shared(rng.uniform( -1., 1., (channels, rows, cols, batch_size)).astype('float32'), name='images') filters = shared(rng.uniform(-1., 1., (channels, filter_rows, filter_cols, num_filters)).astype('float32'), name='filters') gpu_images = gpu_from_host(images) gpu_filters = gpu_from_host(filters) output = cls()(gpu_images, gpu_filters) f = function([], output) try: output = f() except ValueError: continue assert False
def test_alloc_memset_0(): i = tensor.iscalar() z = numpy.zeros((1, ), dtype='float32') o = numpy.ones((1, ), dtype='float32') ones = numpy.ones((2, ), dtype='float32') # Test with 0 a = basic_ops.gpu_alloc(cuda.gpu_from_host(tensor.constant(z)), i) f = theano.function([i], a, mode=mode_with_gpu) topo = f.maker.fgraph.toposort() assert len(topo) == 1 assert isinstance(topo[0].op, basic_ops.GpuAlloc) and topo[0].op.memset_0 assert (numpy.asarray(f(6)) == 0).all() # Test with 1 a = basic_ops.gpu_alloc(cuda.gpu_from_host(tensor.constant(o)), i) f = theano.function([i], a, mode=mode_with_gpu) topo = f.maker.fgraph.toposort() assert len(topo) == 1 assert isinstance(topo[0].op, basic_ops.GpuAlloc) assert not topo[0].op.memset_0 assert (numpy.asarray(f(6)) == 1).all() # Test with 1, 1 a = basic_ops.gpu_alloc(cuda.gpu_from_host(tensor.constant(ones)), i) f = theano.function([i], a, mode=mode_with_gpu) topo = f.maker.fgraph.toposort() assert len(topo) == 1 assert isinstance(topo[0].op, basic_ops.GpuAlloc) assert not topo[0].op.memset_0 assert (numpy.asarray(f(2)) == 1).all()
def local_to_gpu(node): """ op(host_from_gpu()) -> host_from_gpu(op) gpu_from_host(op) -> op(gpu_from_host) """ if isinstance(node.op, op): # op(host_from_gpu()) -> host_from_gpu(op) # If any of the input that go on the GPU are on the GPU, # move the op to the gpu. if any(node.inputs[idx].owner and isinstance(node.inputs[idx].owner.op, cuda.HostFromGpu) for idx in to_gpu): new_inp = list(node.inputs) for idx in to_gpu: new_inp[idx] = cuda.gpu_from_host(new_inp[idx]) return [cuda.host_from_gpu(op()(*new_inp))] if node.op == cuda.gpu_from_host: # gpu_from_host(op) -> op(gpu_from_host) host_input = node.inputs[0] if host_input.owner and isinstance(host_input.owner.op, op): op_node = host_input.owner new_inp = list(op_node.inputs) for idx in to_gpu: new_inp[idx] = cuda.gpu_from_host(new_inp[idx]) return [op()(*new_inp)] return False
def test_alloc_memset_0(): i = tensor.iscalar() z = numpy.zeros((1,), dtype='float32') o = numpy.ones((1,), dtype='float32') ones = numpy.ones((2,), dtype='float32') # Test with 0 a = basic_ops.gpu_alloc(cuda.gpu_from_host(tensor.constant(z)), i) f = theano.function([i], a, mode=mode_with_gpu) topo = f.maker.fgraph.toposort() assert len(topo) == 1 assert isinstance(topo[0].op, basic_ops.GpuAlloc) and topo[0].op.memset_0 assert (numpy.asarray(f(6)) == 0).all() # Test with 1 a = basic_ops.gpu_alloc(cuda.gpu_from_host(tensor.constant(o)), i) f = theano.function([i], a, mode=mode_with_gpu) topo = f.maker.fgraph.toposort() assert len(topo) == 1 assert isinstance(topo[0].op, basic_ops.GpuAlloc) assert not topo[0].op.memset_0 assert (numpy.asarray(f(6)) == 1).all() # Test with 1, 1 a = basic_ops.gpu_alloc(cuda.gpu_from_host(tensor.constant(ones)), i) f = theano.function([i], a, mode=mode_with_gpu) topo = f.maker.fgraph.toposort() assert len(topo) == 1 assert isinstance(topo[0].op, basic_ops.GpuAlloc) assert not topo[0].op.memset_0 assert (numpy.asarray(f(2)) == 1).all()
def insert_gpu_weight_acts(node): """ .. todo:: WRITEME """ if isinstance(node.op, WeightActs): """ .. todo:: WRITEME """ images, hidacts, frows, fcols = node.inputs if any_from_gpu(images, hidacts) or any_gpu_client(*node.outputs): gpu_weight_acts = GpuWeightActs( module_stride=node.op.module_stride, partial_sum=1) return [ host_from_gpu( gpu_weight_acts( gpu_from_host(images), gpu_from_host(hidacts), frows, fcols, )) ]
def test_match_valid_conv(): # Tests that running FilterActs with no padding is the same as running # theano's conv2D in valid mode rng = np.random.RandomState([2012,10,9]) batch_size = 5 rows = 10 cols = 9 channels = 3 filter_rows = 4 filter_cols = filter_rows num_filters = 16 images = shared(rng.uniform(-1., 1., (channels, rows, cols, batch_size)).astype('float32'), name='images') filters = shared(rng.uniform(-1., 1., (channels, filter_rows, filter_cols, num_filters)).astype('float32'), name='filters') gpu_images = gpu_from_host(images) gpu_filters = gpu_from_host(filters) output = FilterActs()(gpu_images, gpu_filters) output = host_from_gpu(output) images_bc01 = images.dimshuffle(3,0,1,2) filters_bc01 = filters.dimshuffle(3,0,1,2) filters_bc01 = filters_bc01[:,:,::-1,::-1] output_conv2d = conv2d(images_bc01, filters_bc01, border_mode='valid') output_conv2d = output_conv2d.dimshuffle(1,2,3,0) try: f = function([], [output, output_conv2d]) except: raise KnownFailureTest("cuda-convnet code depends on an unmerged theano feature.") output, output_conv2d = f() warnings.warn("test_match_valid_conv success criterion is not very strict. Can we verify that this is OK?") if np.abs(output - output_conv2d).max() > 2.4e-6: assert type(output) == type(output_conv2d) assert output.dtype == output_conv2d.dtype if output.shape != output_conv2d.shape: print 'cuda-convnet shape: ',output.shape print 'theano shape: ',output_conv2d.shape assert False err = np.abs(output - output_conv2d) print 'absolute error range: ', (err.min(), err.max()) print 'mean absolute error: ', err.mean() print 'cuda-convnet value range: ', (output.min(), output.max()) print 'theano value range: ', (output_conv2d.min(), output_conv2d.max()) assert False
def test_grad(): rng = np.random.RandomState([2012, 10, 9]) batch_size = 5 rows = 10 cols = 9 channels = 3 filter_rows = 4 filter_cols = filter_rows num_filters = 16 images = shared(rng.uniform(-1.0, 1.0, (channels, rows, cols, batch_size)).astype("float32"), name="images") filters = shared( rng.uniform(-1.0, 1.0, (channels, filter_rows, filter_cols, num_filters)).astype("float32"), name="filters" ) gpu_images = gpu_from_host(images) gpu_filters = gpu_from_host(filters) output = FilterActs()(gpu_images, gpu_filters) output = host_from_gpu(output) # XXX: use verify_grad output_grad = grad(output.sum(), images) images_bc01 = images.dimshuffle(3, 0, 1, 2) filters_bc01 = filters.dimshuffle(3, 0, 1, 2) filters_bc01 = filters_bc01[:, :, ::-1, ::-1] output_conv2d = conv2d(images_bc01, filters_bc01, border_mode="valid") output_conv2d = output_conv2d.dimshuffle(1, 2, 3, 0) # XXX: use verify_grad output_conv2d_grad = grad(output_conv2d.sum(), images) f = function([], [output_grad, output_conv2d_grad]) output_grad, output_conv2d_grad = f() warnings.warn( """test_match_valid_conv success criterion is not very strict. Can we verify that this is OK? One possibility is that theano is numerically unstable and Alex's code is better. Probably theano CPU 64 bit is OK but it's worth checking the others.""" ) if np.abs(output_grad - output_conv2d_grad).max() > 7.7e-6: assert type(output_grad) == type(output_conv2d_grad) assert output_grad.dtype == output_conv2d_grad.dtype if output_grad.shape != output_conv2d_grad.shape: print "cuda-convnet shape: ", output_grad.shape print "theano shape: ", output_conv2d_grad.shape assert False err = np.abs(output_grad - output_conv2d_grad) print "absolute error range: ", (err.min(), err.max()) print "mean absolute error: ", err.mean() print "cuda-convnet value range: ", (output_grad.min(), output_grad.max()) print "theano value range: ", (output_conv2d_grad.min(), output_conv2d_grad.max()) assert False
def test_match_valid_conv_strided(): # Tests that running FilterActs with stride is the same as running # theano's conv2D in valid mode and then downsampling rng = np.random.RandomState([2012,10,9]) batch_size = 5 rows = 9 cols = 9 channels = 3 filter_rows = 3 filter_cols = filter_rows stride = 3 num_filters = 16 images = shared(rng.uniform(-1., 1., (channels, rows, cols, batch_size)).astype('float32'), name='images') filters = shared(rng.uniform(-1., 1., (channels, filter_rows, filter_cols, num_filters)).astype('float32'), name='filters') gpu_images = gpu_from_host(images) gpu_filters = gpu_from_host(filters) output = FilterActs(stride=stride)(gpu_images, gpu_filters) output = host_from_gpu(output) images_bc01 = images.dimshuffle(3,0,1,2) filters_bc01 = filters.dimshuffle(3,0,1,2) filters_bc01 = filters_bc01[:,:,::-1,::-1] output_conv2d = conv2d(images_bc01, filters_bc01, border_mode='valid', subsample=(stride, stride)) output_conv2d_orig = output_conv2d.dimshuffle(1,2,3,0) output_conv2d = output_conv2d_orig # [:, ::stride, ::stride, :] f = function([], [output, output_conv2d, output_conv2d_orig]) output, output_conv2d, output_conv2d_orig = f() warnings.warn("""test_match_valid_conv success criterion is not very strict. Can we verify that this is OK? One possibility is that theano is numerically unstable and Alex's code is better. Probably theano CPU 64 bit is OK but it's worth checking the others.""") if np.abs(output - output_conv2d).max() > 2.4e-6: assert type(output) == type(output_conv2d) assert output.dtype == output_conv2d.dtype if output.shape != output_conv2d.shape: print 'cuda-convnet shape: ',output.shape print 'theano shape: ',output_conv2d.shape assert False err = np.abs(output - output_conv2d) print 'absolute error range: ', (err.min(), err.max()) print 'mean absolute error: ', err.mean() print 'cuda-convnet value range: ', (output.min(), output.max()) print 'theano value range: ', (output_conv2d.min(), output_conv2d.max()) assert False
def insert_gpu_filter_acts(node): if isinstance(node.op, FilterActs): images, filters = node.inputs if any_from_gpu(images, filters) or any_gpu_client(*node.outputs): gpu_filter_acts = GpuFilterActs( module_stride=node.op.module_stride, partial_sum=1) return [host_from_gpu(gpu_filter_acts( gpu_from_host(images), gpu_from_host(filters)))]
def insert_gpu_filter_acts(node): if isinstance(node.op, FilterActs): images, filters = node.inputs if any_from_gpu(images, filters) or any_gpu_client(*node.outputs): gpu_filter_acts = GpuFilterActs( module_stride=node.op.module_stride, partial_sum=1) return [ host_from_gpu( gpu_filter_acts(gpu_from_host(images), gpu_from_host(filters))) ]
def insert_gpu_weight_acts(node): if isinstance(node.op, WeightActs): images, hidacts, frows, fcols = node.inputs if any_from_gpu(images, hidacts) or any_gpu_client(*node.outputs): gpu_weight_acts = GpuWeightActs( module_stride=node.op.module_stride, partial_sum=1) return [host_from_gpu(gpu_weight_acts( gpu_from_host(images), gpu_from_host(hidacts), frows, fcols, ))]
def insert_gpu_img_acts(node): if isinstance(node.op, ImgActs): filters, hidacts, irows, icols = node.inputs if any_from_gpu(filters, hidacts) or any_gpu_client(*node.outputs): gpu_img_acts = GpuImgActs( module_stride=node.op.module_stride, partial_sum=1) return [host_from_gpu(gpu_img_acts( gpu_from_host(filters), gpu_from_host(hidacts), irows, icols, ))]
def insert_gpu_img_acts(node): if isinstance(node.op, ImgActs): filters, hidacts, irows, icols = node.inputs if any_from_gpu(filters, hidacts) or any_gpu_client(*node.outputs): gpu_img_acts = GpuImgActs(module_stride=node.op.module_stride, partial_sum=1) return [ host_from_gpu( gpu_img_acts( gpu_from_host(filters), gpu_from_host(hidacts), irows, icols, )) ]
def local_gpu_fft_conv(node): """ gpu_conv -> gpu_fft_conv_op """ if not isinstance(node.op, GpuConv): return if (node.op.border_mode=='full' and node.op.subsample==(1,1)): img, kern = node.inputs img = gpu_contiguous(img) kern = gpu_contiguous(kern) gpu_fft_conv = GpuFFTConvOp(node.op.border_mode, check=node.op.verbose) return [gpu_fft_conv(img,kern)] if (config.GpuFFTConvOp.valid and node.op.border_mode=='valid' and node.op.subsample==(1,1) and node.op.kshp and node.op.imshp): kshp = node.op.kshp ishp = node.op.imshp[1:] pad_up = kshp[0]-1 pad_left = kshp[1]-1 size_height = ishp[0]-kshp[0]+1 size_width = ishp[1]-kshp[1]+1 img = gpu_contiguous(node.inputs[0]) kern = gpu_contiguous(node.inputs[1]) gpu_fft_conv = GpuFFTConvOp("full", check=node.op.verbose)(img,kern)[:,:,pad_up:pad_up+size_height,pad_left:pad_left+size_width] gpu_fft_conv = cuda.gpu_from_host(gpu_fft_conv) return [gpu_fft_conv]
def lmul(self, x): """ dot(x, A) aka, do convolution with input image x """ check_cuda(str(type(self)) + ".lmul") cpu = 'Cuda' not in str(type(x)) assert cpu if cpu: x = gpu_from_host(x) assert x.ndim == 5 x_axes = self.input_axes assert len(x_axes) == 5 #x = shapeprint(x) op_axes = ('b', 0, 1, 't', 'c') print x_axes, op_axes if tuple(x_axes) != op_axes: x = x.dimshuffle(*[x_axes.index(axis) for axis in op_axes]) #x = shapeprint(x) #self._filters = shapeprint(self._filters) rval = self.conv3d_op(x, self._filters, self.b, self.kernel_stride) #assert len(rval_axes) == 5 #op_axes = self.output_axes #if tuple(rval_axes) != op_axes: # rval = rval.dimshuffle(*[op_axes.index(axis) for axis in rval_axes]) return rval
def lmul(self, x): """ dot(x, A) aka, do convolution with input image x """ check_cuda(str(type(self)) + ".lmul") cpu = 'Cuda' not in str(type(x)) #assert cpu if cpu: x = gpu_from_host(x) assert x.ndim == 5 x_axes = self.input_axes assert len(x_axes) == 5 #x = shapeprint(x) op_axes = ('b', 'c', 0, 1, 't') print x_axes, op_axes #if tuple(x_axes) != op_axes: # x = x.dimshuffle(*[x_axes.index(axis) for axis in op_axes]) #x = shapeprint(x) #self._filters = shapeprint(self._filters) rval = cuda.blas.GpuCorr3dMM(border_mode= 'valid', subsample = tuple(self.kernel_stride), pad=tuple(self.pad))(x, self._filters) #rval = conv3d(im, filt, None, None, (self.kernel_stride[0], self.kernel_stride[1]) ) #rval = rval.dimshuffle(0,4,1,2,3) #print "hello" return rval
def lmul(self, x): """ dot(x, A) aka, do convolution with input image x """ check_cuda(str(type(self)) + ".lmul") # TODO Why is it CPU?? print "Por que?!?!", type(x) cpu = "Cuda" not in str(type(x)) if cpu: x = gpu_from_host(x) assert x.ndim == 5 x_axes = self.input_axes assert len(x_axes) == 5 op_axes = ("c", 0, 1, "t", "b") if tuple(x_axes) != op_axes: print "ssssssssssssssss" x = x.dimshuffle(*[x_axes.index(axis) for axis in op_axes]) _x_4d_shape = ( self.signal_shape[0], self.signal_shape[1], self.signal_shape[2], self.signal_shape[3] * self.signal_shape[4], ) x = x.reshape(_x_4d_shape) x = gpu_contiguous(x) rval = FilterActs(self.pad, self.partial_sum, self.kernel_stride[0])(x, self._filters) if cpu: rval = host_from_gpu(rval) rval = rval.reshape( ( self.filter_shape[3], self.filter_shape[4], rval.shape[1], rval.shape[2], self.signal_shape[3], self.signal_shape[4], ) ) rval = diagonal_subtensor(rval, 4, 0).sum(axis=0) # Format the output based on the output space rval_axes = self.output_axes assert len(rval_axes) == 5 if tuple(rval_axes) != op_axes: rval = rval.dimshuffle(*[op_axes.index(axis) for axis in rval_axes]) return rval
def lmul(self, x): """ dot(x, A) aka, do convolution with input image x """ check_cuda(str(type(self)) + ".lmul") cpu = 'Cuda' not in str(type(x)) assert cpu if cpu: x = gpu_from_host(x) assert x.ndim == 5 x_axes = self.input_axes assert len(x_axes) == 5 #x = shapeprint(x) op_axes = ('b', 0, 1, 't', 'c') print x_axes, op_axes if tuple(x_axes) != op_axes: x = x.dimshuffle(*[x_axes.index(axis) for axis in op_axes]) #x = shapeprint(x) #self._filters = shapeprint(self._filters) im = x.dimshuffle(0,3,4,1,2) filt = self._filters.dimshuffle(0,3,4,1,2) rval = conv3d(im, filt, None, None, (self.kernel_stride[0], self.kernel_stride[1]) ) rval = rval.dimshuffle(0,3,4,1,2) return rval
def test_gemv2(self): ''' test vector1+dot(vector2,matrix) ''' v1 = theano.shared(numpy.array(numpy.random.rand(5), dtype='float32')) v2 = tensor._shared(numpy.array(numpy.random.rand(2), dtype='float32')) m = theano.shared(numpy.array(numpy.random.rand(5, 2), dtype='float32')) no_gpu_f = theano.function([], v2 + theano.dot(v1, m), mode=mode_without_gpu) gpu_f = theano.function([], v2 + theano.dot(v1, m), mode=mode_with_gpu) # gpu_f2 is needed to test the case when the input is not on the gpu # but the output is moved to the gpu. gpu_f2 = theano.function( [], tcn.gpu_from_host(v2 + theano.dot(v1, m)), mode=mode_with_gpu) # Assert they produce the same output assert numpy.allclose(no_gpu_f(), gpu_f(), atol=self.atol) assert numpy.allclose(no_gpu_f(), gpu_f2(), atol=self.atol) # Assert that the gpu version actually uses gpu assert sum([node.op is gpu_gemv_inplace for node in gpu_f2.maker.fgraph.toposort()]) == 1 assert sum([node.op is gpu_gemv_inplace for node in gpu_f.maker.fgraph.toposort()]) == 1
def test_dot_vm(self): ''' Test vector dot matrix ''' v = theano.shared(numpy.array(numpy.random.rand(2), dtype='float32')) m = theano.shared(numpy.array(numpy.random.rand(2, 5), dtype='float32')) no_gpu_f = theano.function([], theano.dot(v, m), mode=mode_without_gpu) gpu_f = theano.function([], theano.dot(v, m), mode=mode_with_gpu) # gpu_f2 is needed to test the case when the input is not on the gpu # but the output is moved to the gpu. gpu_f2 = theano.function([], tcn.gpu_from_host(theano.dot(v, m)), mode=mode_with_gpu) # Assert they produce the same output assert numpy.allclose(no_gpu_f(), gpu_f(), atol=self.atol) assert numpy.allclose(no_gpu_f(), gpu_f2(), atol=self.atol) # Assert that the gpu version actually uses gpu assert sum([node.op is gpu_gemv_inplace for node in gpu_f.maker.fgraph.toposort()]) == 1 assert sum([node.op is gpu_gemv_inplace for node in gpu_f2.maker.fgraph.toposort()]) == 1 # Check double-strided m m.set_value( m.get_value(borrow=True, return_internal_type=True)[::-1, ::-1], borrow=True) assert numpy.allclose(no_gpu_f(), gpu_f(), atol=self.atol) assert numpy.allclose(no_gpu_f(), gpu_f2(), atol=self.atol)
def test_dot_vm(self): ''' Test vector dot matrix ''' v = theano.shared(numpy.array(numpy.random.rand(2), dtype='float32')) m = theano.shared(numpy.array(numpy.random.rand(2, 5), dtype='float32')) no_gpu_f = theano.function([], theano.dot(v, m), mode=mode_without_gpu) gpu_f = theano.function([], theano.dot(v, m), mode=mode_with_gpu) #gpu_f2 is needed to test the case when the input is not on the gpu #but the output is moved to the gpu. gpu_f2 = theano.function([], tcn.gpu_from_host(theano.dot(v, m)), mode=mode_with_gpu) # Assert they produce the same output assert numpy.allclose(no_gpu_f(), gpu_f(), atol=self.atol) assert numpy.allclose(no_gpu_f(), gpu_f2(), atol=self.atol) # Assert that the gpu version actually uses gpu assert sum([node.op is gpu_gemv_inplace for node in gpu_f.maker.fgraph.toposort()]) == 1 assert sum([node.op is gpu_gemv_inplace for node in gpu_f2.maker.fgraph.toposort()]) == 1 # Check double-strided m m.set_value( m.get_value(borrow=True, return_internal_type=True)[::-1, ::-1], borrow=True) assert numpy.allclose(no_gpu_f(), gpu_f(), atol=self.atol) assert numpy.allclose(no_gpu_f(), gpu_f2(), atol=self.atol)
def lmul(self, x): """ dot(x, A) aka, do convolution with input image x """ check_cuda(str(type(self)) + ".lmul") cpu = 'Cuda' not in str(type(x)) assert cpu if cpu: x = gpu_from_host(x) assert x.ndim == 5 x_axes = self.input_axes assert len(x_axes) == 5 #x = shapeprint(x) op_axes = ('b', 0, 1, 't', 'c') print x_axes, op_axes if tuple(x_axes) != op_axes: x = x.dimshuffle(*[x_axes.index(axis) for axis in op_axes]) #x = shapeprint(x) #self._filters = shapeprint(self._filters) im = x.dimshuffle(0, 3, 4, 1, 2) filt = self._filters.dimshuffle(0, 3, 4, 1, 2) rval = conv3d(im, filt, None, None, (self.kernel_stride[0], self.kernel_stride[1])) rval = rval.dimshuffle(0, 3, 4, 1, 2) return rval
def traverse(out, x, x_copy, d, visited=None): ''' Function used by scan to parse the tree and figure out which nodes it needs to replace. There are two options : 1) x and x_copy or on host, then you would replace x with x_copy 2) x is on gpu, x_copy on host, then you need to replace host_from_gpu(x) with x_copy This happens because initially shared variables are on GPU .. which is fine for the main computational graph but confuses things a bit for the inner graph of scan ''' # ``visited`` is a set of nodes that are already known and don't need to be # checked again, speeding up the traversal of multiply-connected graphs. # if a ``visited`` set is given, it will be updated in-place so the callee # knows which nodes we have seen. if visited is None: visited = set() if out in visited: return d visited.add(out) import theano.sandbox.cuda as cuda if out == x: d[out] = cuda.gpu_from_host(x_copy) return d elif out.owner is None: return d elif (cuda.cuda_available and out.owner.op == cuda.host_from_gpu and out.owner.inputs == [x]): d[out] = tensor.as_tensor_variable(x_copy) return d else: for inp in out.owner.inputs: d = traverse(inp, x, x_copy, d, visited) return d
def test_gemv1(self): ''' test vector1+dot(matrix,vector2) ''' v1 = theano.tensor._shared(numpy.array(numpy.random.rand(2), dtype='float32')) v2 = theano.tensor._shared(numpy.array(numpy.random.rand(5), dtype='float32')) m = theano.tensor._shared(numpy.array(numpy.random.rand(5, 2), dtype='float32')) no_gpu_f = theano.function([], v2 + theano.dot(m, v1), mode=mode_without_gpu) gpu_f = theano.function([], v2 + theano.dot(m, v1), mode=mode_with_gpu) #gpu_f2 is needed to test the case when the input is not on the gpu #but the output is moved to the gpu. gpu_f2 = theano.function([], tcn.gpu_from_host(v2 + theano.dot(m, v1)), mode=mode_with_gpu) # Assert they produce the same output assert numpy.allclose(no_gpu_f(), gpu_f(), atol=self.atol) assert numpy.allclose(no_gpu_f(), gpu_f2(), atol=self.atol) # Assert that the gpu version actually uses gpu assert sum([node.op is gpu_gemv_inplace for node in gpu_f2.maker.fgraph.toposort()]) == 1 assert sum([node.op is gpu_gemv_inplace for node in gpu_f.maker.fgraph.toposort()]) == 1
def test_local_gpu_contiguous(): a = tensor.fmatrix() o = tensor.extra_ops.cpu_contiguous(a) for o in [o, cuda.gpu_from_host(o)]: f = theano.function([a], o, mode=mode_with_gpu) assert 1 == len([node for node in f.maker.fgraph.toposort() if isinstance(node.op, basic_ops.GpuContiguous)]) f([[2.]])
def test_reject_rect(): # Tests that running FilterActs with a non-square # kernel is an error rng = np.random.RandomState([2012, 10, 9]) batch_size = 5 rows = 10 cols = 9 channels = 3 filter_rows = 4 filter_cols = filter_rows + 1 num_filters = 6 images = shared(rng.uniform( -1., 1., (channels, rows, cols, batch_size)).astype('float32'), name='images') filters = shared(rng.uniform( -1., 1., (channels, filter_rows, filter_cols, num_filters)).astype('float32'), name='filters') gpu_images = gpu_from_host(images) gpu_filters = gpu_from_host(filters) output = FilterActs()(gpu_images, gpu_filters) images_bc01 = images.dimshuffle(3, 0, 1, 2) filters_bc01 = images.dimshuffle(3, 0, 1, 2) output_conv2d = conv2d(images_bc01, filters_bc01, border_mode='valid') try: f = function([], [output, output_conv2d]) except: raise KnownFailureTest( "cuda-convnet code depends on an unmerged theano feature.") try: output, output_conv2d = f() except ValueError: return assert False
def test_cross_map_norm_grad_simple(): rng = numpy.random.RandomState([2013, 02, 10]) op = CrossMapNorm(16, 15/16., 1, True) make_graph = lambda inp: op(gpu_from_host(inp))[0] verify = lambda array: verify_grad(make_graph, [array]) inputs = [numpy.ones((16, 1, 1, 1), dtype='float32'), rng.normal(size=(32, 5, 5, 10)).astype('float32')] for arr in inputs: yield verify, arr
def test_1(self): data = numpy.float32([1, 2, 3, 4]) x = f32sc(data) y = x**2 f = theano.function([], y, updates={x: x + 1}) f() # Test that we can update with a CudaVariable f = theano.function([], y, updates={x: cuda.gpu_from_host(x + 1)}) f()
def test_1(self): data = numpy.float32([1, 2, 3, 4]) x = f32sc(data) y = x ** 2 f = theano.function([], y, updates=[(x, x + 1)]) f() # Test that we can update with a CudaVariable f = theano.function([], y, updates=[(x, cuda.gpu_from_host(x + 1))]) f()
def test_reject_rect(): # Tests that running FilterActs with a non-square # kernel is an error rng = np.random.RandomState([2012,10,9]) batch_size = 5 rows = 10 cols = 9 channels = 3 filter_rows = 4 filter_cols = filter_rows + 1 num_filters = 6 images = shared(rng.uniform(-1., 1., (channels, rows, cols, batch_size)).astype('float32'), name='images') filters = shared(rng.uniform(-1., 1., (channels, filter_rows, filter_cols, num_filters)).astype('float32'), name='filters') gpu_images = gpu_from_host(images) gpu_filters = gpu_from_host(filters) output = FilterActs()(gpu_images, gpu_filters) images_bc01 = images.dimshuffle(3,0,1,2) filters_bc01 = images.dimshuffle(3,0,1,2) output_conv2d = conv2d(images_bc01, filters_bc01, border_mode='valid') try: f = function([], [output, output_conv2d]) except: raise KnownFailureTest("cuda-convnet code depends on an unmerged theano feature.") try: output, output_conv2d = f() except ValueError: return assert False
def test_viewop_gpu(): from theano.sandbox import cuda if cuda.cuda_available == False: raise SkipTest('Optional package cuda disabled') _x = theano.tensor.fvector('x') x = cuda.gpu_from_host(_x) _out = theano.compile.ViewOp()(x) out = cuda.host_from_gpu(_out) f = theano.function([x], out, mode=mode_with_gpu) data = numpy.array([1, 2, 3], dtype='float32') assert numpy.allclose(f(data), data)
def test_cross_map_norm_grad_simple(): rng = numpy.random.RandomState([2013, 2, 10]) op = CrossMapNorm(16, 15 / 16., 1, True) make_graph = lambda inp: op(gpu_from_host(inp))[0] verify = lambda array: verify_grad(make_graph, [array]) inputs = [ numpy.ones((16, 1, 1, 1), dtype='float32'), rng.normal(size=(32, 5, 5, 10)).astype('float32') ] for arr in inputs: yield verify, arr
def test_viewop_gpu(): from theano.sandbox import cuda if cuda.cuda_available == False: raise SkipTest("Optional package cuda disabled") _x = theano.tensor.fvector("x") x = cuda.gpu_from_host(_x) _out = theano.compile.ViewOp()(x) out = cuda.host_from_gpu(_out) f = theano.function([x], out, mode=mode_with_gpu) data = numpy.array([1, 2, 3], dtype="float32") assert numpy.allclose(f(data), data)
def test_many_arg_elemwise(): """this test checks whether the + and * elemwise ops can handle extremely large numbers of arguments on gpu i.e., it is a test of the optimization theano/sandbox/cuda/opt.py:local_gpu_huge_add_or_mul """ rng = numpy.random.RandomState([1, 2, 3]) for num_args in [25]: for op_to_test in [theano.tensor.add, theano.tensor.mul]: for nb_dim in [2, 3, 4, 5]: shapes = [rng.randint(1, 5) for i in range(nb_dim)] args = [ numpy.cast['float32'](rng.randn(*shapes)) for arg in xrange(0, num_args) ] symb_args = [ theano.tensor.TensorType('float32', (False, ) * nb_dim)() for arg in xrange(0, num_args) ] outputs = [] for mode in [mode_with_gpu, mode_without_gpu]: #test the optijmization local_gpu_elemwise_0 f = theano.function( symb_args, op_to_test(*symb_args), mode=mode.excluding("local_gpu_elemwise_1")) outputs.append(f(*args)) #assert that the test was done on the gpu. if mode is mode_with_gpu: assert any([ isinstance(node.op, cuda.GpuElemwise) for node in f.maker.env.nodes ]) #test the optijmization local_gpu_elemwise_1 f = theano.function( symb_args, cuda.gpu_from_host(op_to_test(*symb_args)), mode=mode.excluding("local_gpu_elemwise_0")) out = f(*args) #assert that the test was done on the gpu. if mode is mode_with_gpu: assert any([ isinstance(node.op, cuda.GpuElemwise) for node in f.maker.env.nodes ]) assert numpy.allclose(out, outputs[-1]) results_gpu, results_cpu = outputs assert numpy.allclose(results_gpu, results_cpu)
def lmul(self, x): """ dot(x, A) aka, do convolution with input image x """ check_cuda(str(type(self)) + ".lmul") # TODO Why is it CPU?? print 'Por que?!?!', type(x) cpu = 'Cuda' not in str(type(x)) if cpu: x = gpu_from_host(x) assert x.ndim == 5 x_axes = self.input_axes assert len(x_axes) == 5 op_axes = ('c', 0, 1, 't', 'b') if tuple(x_axes) != op_axes: print 'ssssssssssssssss' x = x.dimshuffle(*[x_axes.index(axis) for axis in op_axes]) _x_4d_shape = (self.signal_shape[0], self.signal_shape[1], self.signal_shape[2], self.signal_shape[3] * self.signal_shape[4]) x = x.reshape(_x_4d_shape) x = gpu_contiguous(x) rval = FilterActs(self.pad, self.partial_sum, self.kernel_stride[0])(x, self._filters) if cpu: rval = host_from_gpu(rval) rval = rval.reshape( (self.filter_shape[3], self.filter_shape[4], rval.shape[1], rval.shape[2], self.signal_shape[3], self.signal_shape[4])) rval = diagonal_subtensor(rval, 4, 0).sum(axis=0) # Format the output based on the output space rval_axes = self.output_axes assert len(rval_axes) == 5 if tuple(rval_axes) != op_axes: rval = rval.dimshuffle( *[op_axes.index(axis) for axis in rval_axes]) return rval
def local_gpu_alloc_diagonal(node): if (isinstance(node.op, AllocDiag) and isinstance(node.inputs[0].type, theano.tensor.TensorType)): inp = node.inputs[0] if inp.owner and isinstance(inp.owner.op, cuda.HostFromGpu): diag = inp.owner.inputs[0] y = cuda.gpu_from_host(tensor.alloc(numpy.asarray(0, dtype=diag.dtype), diag.shape[0], diag.shape[0])) y = theano.tensor.nnet.conv3d2d.IncDiagonalSubtensor()(y, 0, 1, diag) return [cuda.host_from_gpu(y)] else: return False return False
def test_reject_bad_filt_number(): # Tests that running FilterActs with a # of filters per # group that is not 16 is an error rng = np.random.RandomState([2012,10,9]) batch_size = 5 rows = 10 cols = 9 channels = 3 filter_rows = 4 filter_cols = filter_rows num_filters = 6 images = shared(rng.uniform(-1., 1., (channels, rows, cols, batch_size)).astype('float32'), name='images') filters = shared(rng.uniform(-1., 1., (channels, filter_rows, filter_cols, num_filters)).astype('float32'), name='filters') gpu_images = gpu_from_host(images) gpu_filters = gpu_from_host(filters) output = FilterActs()(gpu_images, gpu_filters) images_bc01 = images.dimshuffle(3,0,1,2) filters_bc01 = images.dimshuffle(3,0,1,2) output_conv2d = conv2d(images_bc01, filters_bc01, border_mode='valid') f = function([], [output, output_conv2d]) try: output, output_conv2d = f() except ValueError: return assert False
def lmul(self, x): """ .. todo:: WRITEME properly dot(x, A) aka, do convolution with input image x """ check_cuda(str(type(self)) + ".lmul") cpu = 'Cuda' not in str(type(x)) if cpu: x = gpu_from_host(x) # x must be formatted as channel, topo dim 0, topo dim 1, batch_index # for use with FilterActs assert x.ndim == 4 x_axes = self.input_axes assert len(x_axes) == 4 op_axes = ('c', 0, 1, 'b') if tuple(x_axes) != op_axes: x = x.dimshuffle(*[x_axes.index(axis) for axis in x_axes]) x = gpu_contiguous(x) # Patch old pickle files. if not hasattr(self, 'kernel_stride'): self.kernel_stride = (1, 1) rval = FilterActs(self.pad, self.partial_sum, self.kernel_stride[0])( x, self._filters ) # Format the output based on the output space rval_axes = self.output_axes assert len(rval_axes) == 4 if cpu: rval = host_from_gpu(rval) if tuple(rval_axes) != op_axes: rval = rval.dimshuffle(*[op_axes.index(axis) for axis in rval_axes]) return rval
def lmul(self, x): """ .. todo:: WRITEME properly dot(x, A) aka, do convolution with input image x """ check_cuda(str(type(self)) + ".lmul") cpu = 'Cuda' not in str(type(x)) if cpu: x = gpu_from_host(x) # x must be formatted as channel, topo dim 0, topo dim 1, batch_index # for use with FilterActs assert x.ndim == 4 x_axes = self.input_axes assert len(x_axes) == 4 op_axes = ('c', 0, 1, 'b') if tuple(x_axes) != op_axes: x = x.dimshuffle(*[x_axes.index(axis) for axis in op_axes]) x = gpu_contiguous(x) # Patch old pickle files. if not hasattr(self, 'kernel_stride'): self.kernel_stride = (1, 1) rval = FilterActs(self.pad, self.partial_sum, self.kernel_stride[0])( x, self._filters ) # Format the output based on the output space rval_axes = self.output_axes assert len(rval_axes) == 4 if cpu: rval = host_from_gpu(rval) if tuple(rval_axes) != op_axes: rval = rval.dimshuffle(*[op_axes.index(axis) for axis in rval_axes]) return rval
def traverse(out, x, x_copy, d, visited=None): """ Function used by scan to parse the tree and figure out which nodes it needs to replace. There are two options : 1) x and x_copy or on host, then you would replace x with x_copy 2) x is on gpu, x_copy on host, then you need to replace host_from_gpu(x) with x_copy This happens because initially shared variables are on GPU... which is fine for the main computational graph but confuses things a bit for the inner graph of scan. """ # ``visited`` is a set of nodes that are already known and don't need to be # checked again, speeding up the traversal of multiply-connected graphs. # if a ``visited`` set is given, it will be updated in-place so the callee # knows which nodes we have seen. if visited is None: visited = set() if out in visited: return d visited.add(out) from theano.sandbox import cuda from theano.gpuarray.basic_ops import gpu_from_host, host_from_gpu from theano.gpuarray import pygpu_activated from theano.gpuarray.type import GpuArrayType if out == x: if isinstance(x.type, cuda.CudaNdarrayType): d[out] = cuda.gpu_from_host(x_copy) else: assert isinstance(x.type, GpuArrayType) d[out] = gpu_from_host(x.type.context_name)(x_copy) return d elif out.owner is None: return d elif (cuda.cuda_available and out.owner.op == cuda.host_from_gpu and out.owner.inputs == [x]): d[out] = tensor.as_tensor_variable(x_copy) return d elif (pygpu_activated and out.owner.op == host_from_gpu and out.owner.inputs == [x]): d[out] = tensor.as_tensor_variable(x_copy) return d else: for inp in out.owner.inputs: d = traverse(inp, x, x_copy, d, visited) return d
def test_local_gpu_split(): """ Test that the GpuSplit op is being applied and works """ # Construct symbolic split x = tensor.fvector() splits = tensor.lvector() ra, rb, rc = tensor.split(x, splits, n_splits=3, axis=0) # Compile function to use CPU f = theano.function([x, splits], [ra, rb, rc], mode=mode_without_gpu) # Get values for CPU version cpu_res = f([0, 1, 2, 3, 4, 5], [3, 2, 1]) l = f.maker.fgraph.toposort() # Ensure that one op is theano.tensor.Split assert any([isinstance(o.op, theano.tensor.Split) for o in l]) # GPU version f = theano.function([x, splits], [ra, rb, rc], mode=mode_with_gpu) gpu_res = f([0, 1, 2, 3, 4, 5], [3, 2, 1]) l = f.maker.fgraph.toposort() assert any([isinstance(o.op, cuda.GpuSplit) for o in l]) # Check equality assert all([(cpu == gpu).all() for cpu, gpu in zip(cpu_res, gpu_res)]) # Test the other path of the optimizer, when it is the output that # is moved to the GPU. ra = cuda.gpu_from_host(ra) f = theano.function([x, splits], [ra, rb, rc], mode=mode_with_gpu.excluding("InputToGpuOptimizer")) gpu_res = f([0, 1, 2, 3, 4, 5], [3, 2, 1]) l = f.maker.fgraph.toposort() assert any([isinstance(o.op, cuda.GpuSplit) for o in l]) # Check equality assert all([(cpu == gpu).all() for cpu, gpu in zip(cpu_res, gpu_res)]) # Test that split with only 1 output work ra = tensor.split(x, splits, n_splits=1, axis=0) f = theano.function([x, splits], [ra], mode=mode_without_gpu) cpu_res = f([0, 1, 2, 3, 4, 5], [6]) l = f.maker.fgraph.toposort() # Ensure that no op is theano.tensor.Split or GpuSplit, they get # optimized away. assert not any( [isinstance(o.op, (theano.tensor.Split, cuda.GpuSplit)) for o in l]) # GPU version f = theano.function([x, splits], [ra], mode=mode_with_gpu) gpu_res = f([0, 1, 2, 3, 4, 5], [6]) l = f.maker.fgraph.toposort() assert not any( [isinstance(o.op, (theano.tensor.Split, cuda.GpuSplit)) for o in l]) # Check equality assert all([(cpu == gpu).all() for cpu, gpu in zip(cpu_res, gpu_res)])
def test_local_gpu_split(): """ Test that the GpuSplit op is being applied and works """ # Construct symbolic split x = tensor.fvector() splits = tensor.lvector() ra, rb, rc = tensor.split(x, splits, n_splits=3, axis=0) # Compile function to use CPU f = theano.function([x, splits], [ra, rb, rc], mode=mode_without_gpu) # Get values for CPU version cpu_res = f([0, 1, 2, 3, 4, 5], [3, 2, 1]) l = f.maker.fgraph.toposort() # Ensure that one op is theano.tensor.Split assert any([isinstance(o.op, theano.tensor.Split) for o in l]) # GPU version f = theano.function([x, splits], [ra, rb, rc], mode=mode_with_gpu) gpu_res = f([0, 1, 2, 3, 4, 5], [3, 2, 1]) l = f.maker.fgraph.toposort() assert any([isinstance(o.op, cuda.GpuSplit) for o in l]) # Check equality assert all([(cpu == gpu).all() for cpu, gpu in zip(cpu_res, gpu_res)]) # Test the other path of the optimizer, when it is the output that # is moved to the GPU. ra = cuda.gpu_from_host(ra) f = theano.function([x, splits], [ra, rb, rc], mode=mode_with_gpu.excluding("InputToGpuOptimizer")) gpu_res = f([0, 1, 2, 3, 4, 5], [3, 2, 1]) l = f.maker.fgraph.toposort() assert any([isinstance(o.op, cuda.GpuSplit) for o in l]) # Check equality assert all([(cpu == gpu).all() for cpu, gpu in zip(cpu_res, gpu_res)]) # Test that split with only 1 output work ra = tensor.split(x, splits, n_splits=1, axis=0) f = theano.function([x, splits], [ra], mode=mode_without_gpu) cpu_res = f([0, 1, 2, 3, 4, 5], [6]) l = f.maker.fgraph.toposort() # Ensure that no op is theano.tensor.Split or GpuSplit, they get # optimized away. assert not any([isinstance(o.op, (theano.tensor.Split, cuda.GpuSplit)) for o in l]) # GPU version f = theano.function([x, splits], [ra], mode=mode_with_gpu) gpu_res = f([0, 1, 2, 3, 4, 5], [6]) l = f.maker.fgraph.toposort() assert not any([isinstance(o.op, (theano.tensor.Split, cuda.GpuSplit)) for o in l]) # Check equality assert all([(cpu == gpu).all() for cpu, gpu in zip(cpu_res, gpu_res)])
def test_gpu_out_multiple_clients(self): # Test that when the output of gpu_from_host is used by more # than one Op, the gradient still works. # A problem used to be that GpuFromHost.grad expected the output # gradient to be on GPU, but the summation of the different # incoming gradients was done on CPU. x = tensor.fmatrix('x') z = cuda.gpu_from_host(x) n1 = tensor.nnet.sigmoid(z) n2 = tensor.dot(z, z.T) s1 = n1.sum() s2 = n2.sum() c = s1 + s2 dc_dx = theano.grad(c, x) if self.verbose: theano.printing.debugprint(c, print_type=True) theano.printing.debugprint(dc_dx, print_type=True)
def lmul(self, x): """ dot(x, A) aka, do convolution with input image x """ cpu = 'Cuda' not in str(type(x)) if cpu: x = gpu_from_host(x) # x must be formatted as channel, topo dim 0, topo dim 1, batch_index # for use with FilterActs assert x.ndim == 4 x_axes = self.input_axes assert len(x_axes) == 4 op_axes = ('c', 0, 1, 'b') if tuple(x_axes) != op_axes: x = x.dimshuffle(*[x_axes.index(axis) for axis in op_axes]) x = gpu_contiguous(x) rval = FilterActs(self.pad, self.partial_sum)(x, self._filters) # Format the output based on the output space rval_axes = self.output_axes assert len(rval_axes) == 4 if tuple(rval_axes) != op_axes: rval = rval.dimshuffle( *[op_axes.index(axis) for axis in rval_axes]) if cpu: rval = host_from_gpu(rval) return rval
def traverse(out, x, x_copy, d): ''' Function used by scan to parse the tree and figure out which nodes it needs to replace. There are two options : 1) x and x_copy or on host, then you would replace x with x_copy 2) x is on gpu, x_copy on host, then you need to replace host_from_gpu(x) with x_copy This happens because initially shared variables are on GPU .. which is fine for the main computational graph but confuses things a bit for the inner graph of scan ''' import theano.sandbox.cuda as cuda if out == x: d[out] = cuda.gpu_from_host(x_copy) return d elif out.owner is None: return d elif (cuda.cuda_available and out.owner.op == cuda.host_from_gpu and out.owner.inputs == [x]): d[out] = tensor.as_tensor_variable(x_copy) return d else: for inp in out.owner.inputs: d = traverse(inp, x, x_copy, d) return d
def lmul(self, x, b): """ dot(x, A) aka, do convolution with input image x """ check_cuda(str(type(self)) + ".lmul") cpu = 'Cuda' not in str(type(x)) assert cpu if cpu: x = gpu_from_host(x) assert x.ndim == 5 x_axes = self.input_axes assert len(x_axes) == 5 #op_axes = ('b', 0, 1, 't', 'c') #if tuple(x_axes) != op_axes: # x = x.dimshuffle(*[x_axes.index(axis) for axis in op_axes]) rval = self.conv3d_op(x, self._filters, b, (1, 1, 1)) #rval = conv.Conv3DFFT(self.signal_shape, self.filter_shape)(x, self._filters) #rval = conv.conv3d_fft(x, # self._filters, # image_shape = x.shape, # filter_shape = self.filter_shape) #rval = x rval_axes = self.output_axes assert len(rval_axes) == 5 #op_axes = ('b', 'c', 't', 0, 1) #if tuple(rval_axes) != op_axes: # rval = rval.dimshuffle(*[op_axes.index(axis) for axis in rval_axes]) return rval
def test_dot_vm(): ''' Test vector dot matrix ''' v = theano.shared(numpy.array(numpy.random.rand(2), dtype='float32')) m = theano.shared(numpy.array(numpy.random.rand(2, 5), dtype='float32')) no_gpu_f = theano.function([], theano.dot(v, m), mode=mode_without_gpu) gpu_f = theano.function([], theano.dot(v, m), mode=mode_with_gpu) #gpu_f2 is needed to test the case when the input is not on the gpu #but the output is moved to the gpu. gpu_f2 = theano.function([], cuda.gpu_from_host(theano.dot(v, m)), mode=mode_with_gpu) # Assert they produce the same output assert numpy.allclose(no_gpu_f(), gpu_f(), atol=atol) assert numpy.allclose(no_gpu_f(), gpu_f2(), atol=atol) # Assert that the gpu version actually uses gpu assert sum([ isinstance(node.op, blasop.GpuDot22) for node in gpu_f.maker.env.toposort() ]) == 1 assert sum([ isinstance(node.op, blasop.GpuDot22) for node in gpu_f2.maker.env.toposort() ]) == 1
def test_dot_mv(self): ''' Test matrix dot vector ''' v = theano.shared(numpy.array(numpy.random.rand(2), dtype='float32')) m = theano.shared(numpy.array(numpy.random.rand(5, 2), dtype='float32')) no_gpu_f = theano.function([], theano.dot(m, v), mode=mode_without_gpu) gpu_f = theano.function([], theano.dot(m, v), mode=mode_with_gpu) #gpu_f2 is needed to test the case when the input is not on the gpu #but the output is moved to the gpu. gpu_f2 = theano.function([], tcn.gpu_from_host(theano.dot(m, v)), mode=mode_with_gpu) # Assert they produce the same output assert numpy.allclose(no_gpu_f(), gpu_f(), atol=self.atol) assert numpy.allclose(no_gpu_f(), gpu_f2(), atol=self.atol) # Assert that the gpu version actually uses gpu assert sum([ node.op is gpu_gemv_inplace for node in gpu_f.maker.env.toposort() ]) == 1 assert sum([ node.op is gpu_gemv_inplace for node in gpu_f2.maker.env.toposort() ]) == 1