def test_mkl_lrn_forward(): if theano.config.floatX == 'float32': x = tensor.ftensor4() else: x = tensor.dtensor4() y = lrn(x) f = theano.function([x], y, mode=mode_with_mkl) topo = f.maker.fgraph.toposort() inputs = f.maker.fgraph.inputs outputs = f.maker.fgraph.outputs assert len(inputs) == 1 assert len(outputs) == 1 assert len(topo) == 3 assert isinstance(topo[0].op, U2ILRN) assert isinstance(topo[1].op, mkl.mkl_lrn.LRN) assert isinstance(topo[2].op, I2U) assert outputs[0].owner == topo[2] imval = numpy.random.rand(4, 2, 4, 4).astype(theano.config.floatX) f(imval) print('test_mkl_lrn_forward() pass..')
def __init__(self, input, image_shape, filter_shape, convstride, padsize, group, poolsize, poolstride, bias_init, lrn=False, ): ''' conv, pooling, relu and norm layers ''' self.filter_size = filter_shape self.convstride = convstride self.padsize = padsize self.poolsize = poolsize self.poolstride = poolstride self.channel = image_shape[1] self.lrn = lrn assert group in [1, 2] self.filter_shape = list(filter_shape) self.image_shape = list(image_shape) if group == 1: self.W = Weight(self.filter_shape) self.b = Weight(self.filter_shape[0], bias_init, std=0) new_filter_shape = self.filter_shape else: # making new filter shape 5D tensor, when group is more than 1 new_filter_shape = (group, self.filter_shape[0] / group, self.filter_shape[1] / group, self.filter_shape[2], self.filter_shape[3]) self.W = Weight(new_filter_shape) self.b = Weight(self.filter_shape[0], bias_init, std=0) conv_out = AbstractConvGroup(imshp=image_shape, kshp=new_filter_shape, subsample=(convstride, convstride), border_mode=(padsize, padsize), filter_flip=False, group=group)(input, self.W.val, self.b.val) # ReLu self.output = T.nnet.relu(conv_out, 0) # LRN if self.lrn: self.output = LRN.lrn(self.output, alpha=1e-4, beta=0.75, k=1, n=5) # Pooling if self.poolsize != 1: self.output = pool.pool_2d(input=self.output, ds=(poolsize, poolsize), ignore_border=False, st=(poolstride, poolstride), mode='max') self.params = [self.W.val, self.b.val] self.weight_type = ['W', 'b'] print "conv layer with shape_in: {}".format(str(image_shape))
def test_mkl_lrn_backward(): predefineOps = [ U2ILRN, mkl.mkl_lrn.LRN, I2U, Shape_i, Shape_i, Shape_i, Shape_i, Alloc, I2UGrad, mkl.mkl_lrn.LRNGrad, U2IGrad ] if theano.config.floatX == 'float32': x = tensor.ftensor4() else: x = tensor.dtensor4() y = lrn(x) z = tensor.grad(tensor.sum(y), [x]) f = theano.function([x], z, mode=mode_with_mkl) topo = f.maker.fgraph.toposort() inputs = f.maker.fgraph.inputs outputs = f.maker.fgraph.outputs assert len(inputs) == 1 assert len(outputs) == 1 assert len(topo) == 11 for i, node in enumerate(topo): assert isinstance(node.op, predefineOps[i]) # U2I_LRN assert len(topo[0].inputs) == 1 assert topo[0].inputs[0] == inputs[0] # LRN assert len(topo[1].inputs) == 1 assert topo[1].inputs[0].owner == topo[0] # I2UGrad assert len(topo[8].inputs) == 2 assert topo[8].inputs[0].owner == topo[1] assert topo[8].inputs[1].owner == topo[7] # LRNGrad assert len(topo[9].inputs) == 2 assert topo[9].inputs[0].owner == topo[0] assert topo[9].inputs[1].owner == topo[8] # U2IGrad assert len(topo[10].inputs) == 2 assert topo[10].inputs[0] == inputs[0] assert topo[10].inputs[1].owner == topo[9] # Output assert outputs[0].owner == topo[10] imval = numpy.random.rand(4, 2, 4, 4).astype(theano.config.floatX) f(imval) print('test_mkl_lrn_backward() pass..')
def test_mkl_lrn_backward(): predefineOps = [U2ILRN, mkl.mkl_lrn.LRN, I2U, Shape_i, Shape_i, Shape_i, Shape_i, Alloc, I2UGrad, mkl.mkl_lrn.LRNGrad, U2IGrad] if theano.config.floatX == 'float32': x = tensor.ftensor4() else: x = tensor.dtensor4() y = lrn(x) z = tensor.grad(tensor.sum(y), [x]) f = theano.function([x], z, mode=mode_with_mkl) topo = f.maker.fgraph.toposort() inputs = f.maker.fgraph.inputs outputs = f.maker.fgraph.outputs assert len(inputs) == 1 assert len(outputs) == 1 assert len(topo) == 11 for i, node in enumerate(topo): assert isinstance(node.op, predefineOps[i]) # U2I_LRN assert len(topo[0].inputs) == 1 assert topo[0].inputs[0] == inputs[0] # LRN assert len(topo[1].inputs) == 1 assert topo[1].inputs[0].owner == topo[0] # I2UGrad assert len(topo[8].inputs) == 2 assert topo[8].inputs[0].owner == topo[1] assert topo[8].inputs[1].owner == topo[7] # LRNGrad assert len(topo[9].inputs) == 2 assert topo[9].inputs[0].owner == topo[0] assert topo[9].inputs[1].owner == topo[8] # U2IGrad assert len(topo[10].inputs) == 2 assert topo[10].inputs[0] == inputs[0] assert topo[10].inputs[1].owner == topo[9] # Output assert outputs[0].owner == topo[10] imval = numpy.random.rand(4, 2, 4, 4).astype(theano.config.floatX) f(imval) print('test_mkl_lrn_backward() pass..')
import theano from theano import tensor as T from theano.sandbox import mkl from theano.tensor.nnet.lrn import lrn from nose.plugins.skip import SkipTest if not mkl.mkl_available: raise SkipTest('Optional package MKL disabled') x = T.ftensor4() y = lrn(x) theano.printing.pydotprint(y, outfile='lrn_fwd_before.png', var_with_name_simple=True) f = theano.function([x], y) theano.printing.pydotprint(f, outfile='lrn_fwd_after.png', var_with_name_simple=True) z = T.grad(T.sum(y), [x]) theano.printing.pydotprint(z, outfile='lrn_bwd_before.png', var_with_name_simple=True) f1 = theano.function([x], z) theano.printing.pydotprint(f1, outfile='lrn_bwd_after.png', var_with_name_simple=True)
def __init__( self, input, image_shape, filter_shape, convstride, padsize, group, poolsize, poolstride, bias_init, lrn=False, ): ''' conv, pooling, relu and norm layers ''' self.filter_size = filter_shape self.convstride = convstride self.padsize = padsize self.poolsize = poolsize self.poolstride = poolstride self.channel = image_shape[1] self.lrn = lrn assert group in [1, 2] self.filter_shape = list(filter_shape) self.image_shape = list(image_shape) if group == 1: self.W = Weight(self.filter_shape) self.b = Weight(self.filter_shape[0], bias_init, std=0) new_filter_shape = self.filter_shape else: # making new filter shape 5D tensor, when group is more than 1 new_filter_shape = (group, self.filter_shape[0] / group, self.filter_shape[1] / group, self.filter_shape[2], self.filter_shape[3]) self.W = Weight(new_filter_shape) self.b = Weight(self.filter_shape[0], bias_init, std=0) conv_out = AbstractConvGroup(imshp=image_shape, kshp=new_filter_shape, subsample=(convstride, convstride), border_mode=(padsize, padsize), filter_flip=False, group=group)(input, self.W.val, self.b.val) # ReLu self.output = T.nnet.relu(conv_out, 0) # LRN if self.lrn: self.output = LRN.lrn(self.output, alpha=1e-4, beta=0.75, k=1, n=5) # Pooling if self.poolsize != 1: self.output = pool.pool_2d(input=self.output, ds=(poolsize, poolsize), ignore_border=False, st=(poolstride, poolstride), mode='max') self.params = [self.W.val, self.b.val] self.weight_type = ['W', 'b'] print "conv layer with shape_in: {}".format(str(image_shape))