コード例 #1
0
ファイル: theano_backend.py プロジェクト: luogongning/keras
def pool2d(x, pool_size, strides=(1, 1), border_mode='valid',
           dim_ordering='th', pool_mode='max'):
    if border_mode == 'same':
        # TODO: add implementation for border_mode="same"
        raise Exception('border_mode="same" not supported with Theano.')
    elif border_mode == 'valid':
        ignore_border = True
        padding = (0, 0)
    else:
        raise Exception('Invalid border mode: ' + str(border_mode))

    if dim_ordering not in {'th', 'tf'}:
        raise Exception('Unknown dim_ordering ' + str(dim_ordering))

    if dim_ordering == 'tf':
        x = x.dimshuffle((0, 3, 1, 2))

    if pool_mode == 'max':
        pool_out = downsample.max_pool_2d(x, ds=pool_size, st=strides,
                                          ignore_border=ignore_border,
                                          padding=padding,
                                          mode='max')
    elif pool_mode == 'avg':
        pool_out = downsample.max_pool_2d(x, ds=pool_size, st=strides,
                                          ignore_border=ignore_border,
                                          padding=padding,
                                          mode='average_exc_pad')
    else:
        raise Exception('Invalid pooling mode: ' + str(pool_mode))

    if dim_ordering == 'tf':
        pool_out = pool_out.dimshuffle((0, 2, 3, 1))
    return pool_out
コード例 #2
0
ファイル: convnet.py プロジェクト: youralien/smarterboard-nn
def testmodel(X, w, w2, w3, w_o, p_drop_conv, p_drop_hidden):
    l1a = rectify(conv2d(X, w, border_mode='valid'))
    l1 = max_pool_2d(l1a, (2, 2))
    l1 = dropout(l1, p_drop_conv)

    l2a = rectify(conv2d(l1, w2))
    l2b = max_pool_2d(l2a, (2, 2))
    l2 = T.flatten(l2b, outdim=2)
    l2 = dropout(l2, p_drop_conv)

    l3 = rectify(T.dot(l2, w3))
    l3 = dropout(l3, p_drop_hidden)
    
    pyx = softmax(T.dot(l3, w_o))
    # l3a = rectify(conv2d(l2, w3))
    # l3b = max_pool_2d(l3a, (2, 2))
    # l3 = T.flatten(l3b, outdim=2)
    # l3 = dropout(l3, p_drop_conv)

    # problem happening here
    # l4 = rectify(T.dot(l3, w4))
    # l4 = dropout(l4, p_drop_hidden)

    # pyx = softmax(T.dot(l4, w_o))
    return l1, l2, l3, pyx   
コード例 #3
0
ファイル: conv_net.py プロジェクト: deccs/ndsb_theano
def model3(X, w, w2, w22, w222, w3, w4, p_drop_conv, p_drop_hidden):
  l1a = rectify(conv2d(X, w, border_mode='full'))
  l1 = max_pool_2d(l1a, (2, 2))
  l1 = dropout(l1, p_drop_conv)

  l2a = rectify(conv2d(l1, w2))
  l2 = max_pool_2d(l2a, (2, 2))
  l2 = dropout(l2, p_drop_conv)

  l22a = rectify(conv2d(l2, w22))
  l22 = max_pool_2d(l22a, (2, 2))
  l22 = dropout(l22, p_drop_conv)

  l222a = rectify(conv2d(l22, w222))
  l222 = max_pool_2d(l222a, (2, 2))
  l222 = dropout(l222, p_drop_conv)

  l3a = rectify(conv2d(l222, w3))
  l3b = max_pool_2d(l3a, (2, 2))
  l3 = T.flatten(l3b, outdim=2)
  l3 = dropout(l3, p_drop_conv)

  l4 = rectify(T.dot(l3, w4))
  l4 = dropout(l4, p_drop_hidden)

  pyx = softmax(T.dot(l4, w_o))
  return l1, l2, l22, l222, l3, l4, pyx
コード例 #4
0
def pool2d(x, pool_size, strides=(1, 1), border_mode="valid", dim_ordering="th", pool_mode="max"):
    if border_mode == "same":
        # TODO: add implementation for border_mode="same"
        raise Exception('border_mode="same" not supported with Theano.')
    elif border_mode == "valid":
        ignore_border = True
        padding = (0, 0)
    else:
        raise Exception("Invalid border mode: " + str(border_mode))

    if dim_ordering not in {"th", "tf"}:
        raise Exception("Unknown dim_ordering " + str(dim_ordering))

    if dim_ordering == "tf":
        x = x.dimshuffle((0, 3, 1, 2))

    if pool_mode == "max":
        pool_out = downsample.max_pool_2d(
            x, ds=pool_size, st=strides, ignore_border=ignore_border, padding=padding, mode="max"
        )
    elif pool_mode == "avg":
        pool_out = downsample.max_pool_2d(
            x, ds=pool_size, st=strides, ignore_border=ignore_border, padding=padding, mode="average_exc_pad"
        )
    else:
        raise Exception("Invalid pooling mode: " + str(pool_mode))

    if dim_ordering == "tf":
        pool_out = pool_out.dimshuffle((0, 2, 3, 1))
    return pool_out
コード例 #5
0
    def output(self, input, mask=None):
        if mask is None:
            drop_in = input * self.drop
        else:
            drop_in = input * mask

        conv_out1 = conv.conv2d(input=drop_in, filters=self.W1, filter_shape=self.filter_shape1,
                               image_shape=self.shape_in)
        linout1 = T.nnet.relu(conv_out1 + self.b1.dimshuffle('x', 0, 'x', 'x'))
        output1 = (
            linout1 if self.activation is None
            else self.activation(linout1)
        )
        pooled_out1 = downsample.max_pool_2d(input=output1, ds=self.poolsize1, ignore_border=True)

        conv_out2 = conv.conv2d(input=drop_in, filters=self.W2, filter_shape=self.filter_shape2,
                                image_shape=self.shape_in)
        linout2 = T.nnet.relu(conv_out2 + self.b2.dimshuffle('x', 0, 'x', 'x'))
        output2 = (
            linout2 if self.activation is None
            else self.activation(linout2)
        )
        pooled_out2 = downsample.max_pool_2d(input=output2, ds=self.poolsize2, ignore_border=True)

        conv_out3 = conv.conv2d(input=drop_in, filters=self.W3, filter_shape=self.filter_shape3,
                                image_shape=self.shape_in)
        linout3 = T.nnet.relu(conv_out3 + self.b3.dimshuffle('x', 0, 'x', 'x'))
        output3 = (
            linout3 if self.activation is None
            else self.activation(linout3)
        )
        pooled_out3 = downsample.max_pool_2d(input=output3, ds=self.poolsize3, ignore_border=True)

        output = T.concatenate([pooled_out1, pooled_out2, pooled_out3], axis=1)
        return output
コード例 #6
0
def bench_ConvMed(batchsize):
    data_x.value = randn(n_examples, 1, 96, 96)
    w0 = shared(rand(6, 1, 7, 7) * numpy.sqrt(6 / (25.)))
    b0 = shared(zeros(6))
    w1 = shared(rand(16, 6, 7, 7) * numpy.sqrt(6 / (25.)))
    b1 = shared(zeros(16))
    vv = shared(rand(16*8*8, 120) * numpy.sqrt(6.0/16./25))
    cc = shared(zeros(120))
    v = shared(zeros(120, outputs))
    c = shared(zeros(outputs))
    params = [w0, b0, w1, b1, v, c, vv, cc]

    c0 = tanh(conv2d(sx, w0, image_shape=(batchsize, 1, 96, 96), filter_shape=(6,1,7,7)) + b0.dimshuffle(0, 'x', 'x'))
    s0 = tanh(max_pool_2d(c0, (3,3))) # this is not the correct leNet5 model, but it's closer to

    c1 = tanh(conv2d(s0, w1, image_shape=(batchsize, 6, 30, 30), filter_shape=(16,6,7,7)) + b1.dimshuffle(0, 'x', 'x'))
    s1 = tanh(max_pool_2d(c1, (3,3)))

    p_y_given_x = softmax(dot(tanh(dot(s1.flatten(2), vv)+cc), v)+c)
    nll = -log(p_y_given_x)[arange(sy.shape[0]), sy]
    cost = nll.mean()

    gparams = grad(cost, params)

    train = function([si, nsi], cost,
            updates=[(p,p-lr*gp) for p,gp in zip(params, gparams)])
    eval_and_report(train, "ConvMed", [batchsize], N=120)
コード例 #7
0
    def get_output(self, train):
        X = self.get_input(train)


        if theano.config.device == 'gpu':
            # max_pool_2d X and Z
            output = downsample.max_pool_2d(input=X.dimshuffle(0, 4, 2, 3, 1),
                                            ds=(self.pool_size[1], self.pool_size[2]),
                                            ignore_border=self.ignore_border)

            # max_pool_2d X and Y (with X constant)
            output = downsample.max_pool_2d(input=output.dimshuffle(0, 4, 2, 3, 1),
                                            ds=(1, self.pool_size[0]),
                                            ignore_border=self.ignore_border)
        else: #cpu  order:(batch, row, column, time, inchannel) from cpu convolution
             # max_pool_2d X and Z
            output = downsample.max_pool_2d(input=X.dimshuffle(0, 4, 1, 2, 3),
                                            ds=(self.pool_size[1], self.pool_size[2]),
                                            ignore_border=self.ignore_border)

            # max_pool_2d X and Y (with X constant)
            output = downsample.max_pool_2d(input=output.dimshuffle(0, 1, 4, 3, 2),
                                            ds=(1, self.pool_size[0]),
                                            ignore_border=self.ignore_border)
            output = output.dimshuffle(0, 4, 3, 2, 1)

        return output
コード例 #8
0
    def __init__(self, rng, input, filter_shape, image_shape, 
                poolsize=(2, 2), poolmode="max", non_linear="tanh"):
        """
        Allocate a LeNetConvPoolLayer with shared variable internal parameters.

        :type rng: numpy.random.RandomState
        :param rng: a random number generator used to initialize weights

        :type input: theano.tensor.dtensor4
        :param input: symbolic image tensor, of shape image_shape

        :type filter_shape: tuple or list of length 4
        :param filter_shape: (number of filters, num input feature maps,
                              filter height,filter width)

        :type image_shape: tuple or list of length 4
        :param image_shape: (batch size, num input feature maps,
                             image height, image width)

        :type poolsize: tuple or list of length 2
        :param poolsize: the downsampling (pooling) factor (#rows,#cols)
        """

        assert image_shape[1] == filter_shape[1]
        self.input = input
        self.filter_shape = filter_shape
        self.image_shape = image_shape
        self.poolsize = poolsize
        self.non_linear = non_linear
        self.poolmode = poolmode
        # there are "num input feature maps * filter height * filter width"
        # inputs to each hidden unit
        fan_in = numpy.prod(filter_shape[1:])
        # each unit in the lower layer receives a gradient from:
        # "num output feature maps * filter height * filter width" /
        #   pooling size
        fan_out = (filter_shape[0] * numpy.prod(filter_shape[2:]) /numpy.prod(poolsize))
        # initialize weights with random weights
        if self.non_linear=="none" or self.non_linear=="relu":
            self.W = theano.shared(numpy.asarray(rng.uniform(low=-0.01,high=0.01,size=filter_shape), 
                                                dtype=theano.config.floatX),borrow=True,name="W_conv")
        else:
            W_bound = numpy.sqrt(6. / (fan_in + fan_out))
            self.W = theano.shared(numpy.asarray(rng.uniform(low=-W_bound, high=W_bound, size=filter_shape),
                dtype=theano.config.floatX),borrow=True,name="W_conv")   
        b_values = numpy.zeros((filter_shape[0],), dtype=theano.config.floatX)
        self.b = theano.shared(value=b_values, borrow=True, name="b_conv")
        
        # convolve input feature maps with filters
        conv_out = conv.conv2d(input=input, filters=self.W,filter_shape=self.filter_shape, image_shape=self.image_shape)
        if self.non_linear=="tanh":
            conv_out_tanh = T.tanh(conv_out + self.b.dimshuffle('x', 0, 'x', 'x'))
            self.output = downsample.max_pool_2d(input=conv_out_tanh, ds=self.poolsize, ignore_border=True, mode=self.poolmode)
        elif self.non_linear=="relu":
            conv_out_tanh = ReLU(conv_out + self.b.dimshuffle('x', 0, 'x', 'x'))
            self.output = downsample.max_pool_2d(input=conv_out_tanh, ds=self.poolsize, ignore_border=True, mode=self.poolmode)
        else:
            pooled_out = downsample.max_pool_2d(input=conv_out, ds=self.poolsize, ignore_border=True, mode=self.poolmode)
            self.output = pooled_out + self.b.dimshuffle('x', 0, 'x', 'x')
        self.params = [self.W, self.b]
コード例 #9
0
ファイル: test_dnn.py プロジェクト: ballasn/Theano
def test_pooling_opt():
    if not dnn.dnn_available():
        raise SkipTest(dnn.dnn_available.msg)

    x = T.fmatrix()

    f = theano.function(
        [x],
        max_pool_2d(x, ds=(2, 2), mode='average_inc_pad',
                    ignore_border=True),
        mode=mode_with_gpu)

    assert any([isinstance(n.op, dnn.GpuDnnPool)
                for n in f.maker.fgraph.toposort()])

    f(numpy.zeros((10, 10), dtype='float32'))

    f = theano.function(
        [x],
        T.grad(max_pool_2d(x, ds=(2, 2), mode='average_inc_pad',
                           ignore_border=True).sum(),
               x),
        mode=mode_with_gpu.including("cudnn"))

    assert any([isinstance(n.op, dnn.GpuDnnPoolGrad)
                for n in f.maker.fgraph.toposort()])

    f(numpy.zeros((10, 10), dtype='float32'))
コード例 #10
0
ファイル: lfw_cnn.py プロジェクト: PiscesDream/Lab_Models
    def __init__(self, rng, input_A, input_B, filter_shape, image_shape, poolsize=(2, 2)):

        print image_shape
        print filter_shape
        assert image_shape[1] == filter_shape[1]

        #calc the W_bound and init the W
        fan_in = numpy.prod(filter_shape[1:])
        fan_out = (filter_shape[0] * numpy.prod(filter_shape[2:]) /
                   numpy.prod(poolsize))
        W_bound = numpy.sqrt(6. / (fan_in + fan_out))
        self.W = theano.shared(numpy.asarray(
            rng.uniform(low=-W_bound, high=W_bound, size=filter_shape),
            dtype = theano.config.floatX),
                        borrow = True)

        b_value = numpy.zeros((filter_shape[0],), 
                              dtype = theano.config.floatX)
        self.b = theano.shared(value = b_value, borrow = True)


        conv_out_A = conv.conv2d(input = input_A, filters = self.W, 
                filter_shape = filter_shape, image_shape = image_shape)
        conv_out_B = conv.conv2d(input = input_B, filters = self.W, 
                filter_shape = filter_shape, image_shape = image_shape)
        pooled_out_A = downsample.max_pool_2d(input = conv_out_A,
                                ds = poolsize, ignore_border = True)
        pooled_out_B = downsample.max_pool_2d(input = conv_out_B,
                                ds = poolsize, ignore_border = True)


        self.output_A = T.tanh(pooled_out_A + self.b.dimshuffle('x',0,'x','x'))
        self.output_B = T.tanh(pooled_out_B + self.b.dimshuffle('x',0,'x','x'))

        self.params = [self.W, self.b]
コード例 #11
0
def model(X, w, w2, w3, w4, w5, w_o, b_h1, b_h2, b_o, p_drop_conv, p_drop_hidden):
  
    l1_lin  = conv2d(X, w, border_mode='full')+b_c1.dimshuffle('x', 0, 'x', 'x')
    l1a     = alpha_c1 * rectify(l1_lin) + (1.- alpha_c1) * T.tanh(l1_lin)
    l1      = max_pool_2d(l1a, (2, 2))
    l1      = dropout(l1, p_drop_conv)

    l2_lin = conv2d(l1, w2) + b_c2.dimshuffle('x', 0, 'x', 'x')
    l2a    = alpha_c2 * rectify(l2_lin) + (1. - alpha_c2) * T.tanh(l2_lin)
    l2     = max_pool_2d(l2a, (2, 2))
    l2     = dropout(l2, p_drop_conv)

    l3_lin = conv2d(l2, w3) + b_c3.dimshuffle('x', 0, 'x', 'x')
    l3a    = alpha_c3 * rectify(l3_lin) + ( 1 - alpha_c3) * T.tanh(l3_lin)
    l3b    = max_pool_2d(l3a, (2, 2))
    l3     = T.flatten(l3b, outdim=2)
    l3     = dropout(l3, p_drop_conv)

    l4_lin = T.dot(l3, w4) + b_h1 
    l4 = alpha_h1 * rectify(l4_lin) + (1.-alpha_h1) * T.tanh(l4_lin)
    l4 = dropout(l4, p_drop_hidden)

    l5_lin = T.dot(l4, w5) + b_h2
    l5 = alpha_h1 * rectify(l5_lin) + (1.-alpha_h2) * T.tanh(l5_lin)
    l5 = dropout(l5, p_drop_hidden)

    pyx = softmax(T.dot(l5, w_o) + b_o )
    return l1, l2, l3, l4, l5, pyx
コード例 #12
0
	def model(self, X, w1, w2, w3, w4, wo, p_drop_conv, p_drop_hidden):
		nin1 = self.init_weights((32, 3, 1, 1))
		nin2 = self.init_weights((64, 3, 1, 1))
		nin3 = self.init_weights((128, 3, 1, 1))

		l1a = self.rectify(conv2d(X, w1, border_mode = "full"))
		l1 = max_pool_2d(l1a, (2, 2))
		l1 = conv2d(l1, nin1)
		l1 = self.dropout(l1, p_drop_conv)

		l2a = self.rectify(conv2d(l1, w2))
		l2 = max_pool_2d(l2a, (2, 2))
		l2 = conv2d(l2, nin2)
		l2 = self.dropout(l2, p_drop_conv)

		l3a = self.rectify(conv2d(l2, w3))
		l3b = max_pool_2d(l3a, (2, 2))
		l3b = conv2d(l3b, nin3)
		l3 = T.flatten(l3b, outdim = 2)
		l3 = self.dropout(l3, p_drop_conv)

		l4 = self.rectify(T.dot(l3, w4))
		l4 = self.dropout(l4, p_drop_hidden)

		pyx = self.softmax(T.dot(l4, wo))
		return l1, l2, l3, l4, pyx
コード例 #13
0
def bench_ConvLarge(batchsize, variant=True):
    name = "ConvLarge_b" + str(GlobalBenchReporter.batch_size)
    name += "_" + config.linker

    # Image shape 256x256
    GlobalBenchReporter.batch_size = batchsize
    data_x.set_value(randn(n_examples, 1, 256, 256))
    w0 = shared(rand(6, 1, 7, 7) * numpy.sqrt(6 / (25.)))
    b0 = shared(zeros(6))
    w1 = shared(rand(16, 6, 7, 7) * numpy.sqrt(6 / (25.)))
    b1 = shared(zeros(16))
    vv = shared(rand(16 * 11 * 11, 120) * numpy.sqrt(6.0 / 16. / 25))
    cc = shared(zeros(120))
    v = shared(zeros(120, outputs))
    c = shared(zeros(outputs))
    params = [w0, b0, w1, b1, v, c, vv, cc]

    c0 = tanh(conv2d(sx, w0, image_shape=(batchsize, 1, 256, 256),
                     filter_shape=(6, 1, 7, 7)) + b0.dimshuffle(0, 'x', 'x'))
    # this is not the correct leNet5 model, but it's closer to
    s0 = tanh(max_pool_2d(c0, (5, 5)))

    c1 = tanh(conv2d(s0, w1, image_shape=(batchsize, 6, 50, 50),
                     filter_shape=(16, 6, 7, 7)) + b1.dimshuffle(0, 'x', 'x'))
    s1 = tanh(max_pool_2d(c1, (4, 4)))

    p_y_given_x = softmax(dot(tanh(dot(s1.flatten(2), vv) + cc), v) + c)
    nll = -log(p_y_given_x)[arange(sy.shape[0]), sy]
    cost = nll.mean()

    gparams = grad(cost, params)

    train = function([si, nsi], cost,
                     updates=[(p, p - lr * gp) for p, gp in zip(params, gparams)],
                     name=name)
    GlobalBenchReporter.eval_model(train, name)
    if not variant:
        return

    # Versions with no inputs
    snsi.set_value(GlobalBenchReporter.batch_size)
    c0 = tanh(conv2d(ssx, w0, image_shape=(batchsize, 1, 256, 256),
                     filter_shape=(6, 1, 7, 7)) + b0.dimshuffle(0, 'x', 'x'))
    # this is not the correct leNet5 model, but it's closer to
    s0 = tanh(max_pool_2d(c0, (5, 5)))

    c1 = tanh(conv2d(s0, w1, image_shape=(batchsize, 6, 50, 50),
                     filter_shape=(16, 6, 7, 7)) + b1.dimshuffle(0, 'x', 'x'))
    s1 = tanh(max_pool_2d(c1, (4, 4)))

    p_y_given_x = softmax(dot(tanh(dot(s1.flatten(2), vv) + cc), v) + c)
    nll = -log(p_y_given_x)[arange(ssy.shape[0]), ssy]
    cost = nll.mean()

    gparams = grad(cost, params)

    train2 = function([], cost,
                      updates=[(p, p - lr * gp) for p, gp in zip(params, gparams)] + [(ssi, ssi + snsi)],
                      name=name)
    GlobalBenchReporter.bypass_eval_model(train2, name, init_to_zero=ssi)
	def model(self, X, w1, w2, w3, w4, wo, p_drop_conv, p_drop_hidden):
		
		# print X
		l1a = self.rectify(conv2d(X, w1, border_mode = "full"))
		l1 = max_pool_2d(l1a, (2, 2))
		l1 = self.dropout(l1, p_drop_conv)
		# print np.mean(l1)
		
		l2a = self.rectify(conv2d(l1, w2))
		l2 = max_pool_2d(l2a, (2, 2))
		l2 = self.dropout(l2, p_drop_conv)
		# print np.mean(l2)

		l3a = self.rectify(conv2d(l2, w3))
		l3b = max_pool_2d(l3a, (2, 2))
		l3 = T.flatten(l3b, outdim = 2)
		l3 = self.dropout(l3, p_drop_conv)
		# print np.mean(l3)
		
		l4 = self.rectify(T.dot(l3, w4))
		l4 = self.dropout(l4, p_drop_hidden)
		# print np.mean(l4)
		# l4 = T.dot(l4, wo)
		sig = T.dot(l4, wo)
		# pyx = self.softmax(T.dot(l4, wo))
		return l1, l2, l3, l4, sig
コード例 #15
0
def model(X, w, w2, w3, w4, w_o, p_drop_conv, p_drop_hidden):

    # conv + ReLU + pool
    # border_mode = full, then zero-padding, default is valid
    l1a = rectify(conv2d(X, w, border_mode='full'))
    # pooling at 2*2 kernel and select the largest in the kernel
    l1 = max_pool_2d(l1a, (2, 2))
    l1 = dropout(l1, p_drop_conv)

    # conv + ReLU + pool
    l2a = rectify(conv2d(l1, w2))
    l2 = max_pool_2d(l2a, (2, 2))
    l2 = dropout(l2, p_drop_conv)

    # conv + ReLU + pool
    l3a = rectify(conv2d(l2, w3))
    l3b = max_pool_2d(l3a, (2, 2))
    # convert a ndim array to 2 dim. if l3b dim larger than 2 then the rest dim collapsed.
    # flatten for enter the FC layer
    l3 = T.flatten(l3b, outdim=2)
    l3 = dropout(l3, p_drop_conv)

    # FC + ReLU
    l4 = rectify(T.dot(l3, w4))
    l4 = dropout(l4, p_drop_hidden)


    # output layer + softmax
    pyx = softmax(T.dot(l4, w_o))

    return l1, l2, l3, l4, pyx
コード例 #16
0
def convolutional_model(X, w_1, w_2, w_3, w_4, w_5, w_6, p_1, p_2, p_3, p_4, p_5):
    l1 = dropout(T.tanh( max_pool_2d(T.maximum(conv2d(X, w_1, border_mode='full'),0.), (2, 2),ignore_border=True) + b_1.dimshuffle('x', 0, 'x', 'x') ), p_1)
    l2 = dropout(T.tanh( max_pool_2d(T.maximum(conv2d(l1, w_2), 0.), (2, 2),ignore_border=True) + b_2.dimshuffle('x', 0, 'x', 'x') ), p_2)
    l3 = dropout(T.flatten(T.tanh( max_pool_2d(T.maximum(conv2d(l2, w_3), 0.), (2, 2),ignore_border=True) + b_3.dimshuffle('x', 0, 'x', 'x') ), outdim=2), p_3)# flatten to switch back to 1d layers
    l4 = dropout(T.maximum(T.dot(l3, w_4), 0.), p_4)
    l5 = dropout(T.maximum(T.dot(l4, w_5), 0.), p_5)
    return T.dot(l5, w_6)
コード例 #17
0
ファイル: cov_net_enhancer.py プロジェクト: r3fang/foo
def model(X, w1, w2, w3, Max_Pooling_Shape, p_drop_conv):
    # Max_Pooling_Shape has to be large enough to pool only one element out
    l1 = dropout(max_pool_2d(rectify(conv2d(X, w1, border_mode='valid')), (2,2)), p_drop_conv)
    l2 = T.flatten(dropout(max_pool_2d(rectify(conv2d(l1, w1, border_mode='valid')), Max_Pooling_Shape), p_drop_conv), outdim=2)

    l2 = dropout(rectify(T.dot(l1, w2)), p_drop_hidden)
    pyx = softmax(T.dot(l2, w3))
    return pyx
コード例 #18
0
def model(X, w_1, w_2, w_3, w_4, w_5, p_1, p_2):
    # T.maximum is the rectify activation
    l1 = dropout(max_pool_2d(T.maximum(conv2d(X, w_1, border_mode='full'), 0.), (2, 2)), p_1)
    l2 = dropout(max_pool_2d(T.maximum(conv2d(l1, w_2), 0.), (2, 2)), p_1)
    # flatten to switch back to 1d layers - with "outdim = 2" (2d) output
    l3 = dropout(T.flatten(max_pool_2d(T.maximum(conv2d(l2, w_3), 0.), (2, 2)), outdim=2), p_1)
    l4 = dropout(T.maximum(T.dot(l3, w_4), 0.), p_2)
    return T.dot(l4, w_5) #T.nnet.softmax(T.dot(l4, w_5))
コード例 #19
0
 def model(X, w_1, w_2, w_3, w_4, w_5, w_6, w_7, p_1, p_2):
   l1 = T.maximum(conv2d(X, w_1, border_mode='full'),0.)
   l2 = dropout(max_pool_2d(T.maximum(conv2d(l1, w_2), 0.), (2, 2)), p_1)
   l3 = dropout(max_pool_2d(T.maximum(conv2d(l2, w_3), 0.), (2, 2)), p_1)
   l4 = dropout(max_pool_2d(T.maximum(conv2d(l3, w_4), 0.), (2, 2)), p_1)
   l5 = dropout(T.flatten(max_pool_2d(T.maximum(conv2d(l4, w_5), 0.), (2, 2)), outdim=2), p_1)
   l6 = dropout(T.maximum(T.dot(l5, w_6), 0.), p_2)
   return T.nnet.softmax(T.dot(l6, w_7))
コード例 #20
0
ファイル: better_conv_net.py プロジェクト: JBed/Simple_Theano
def model(x, w_c1, b_c1, w_c2, b_c2, w_h3, b_h3, w_o, b_o):
    c1 = T.maximum(0, conv2d(x, w_c1) + b_c1.dimshuffle('x', 0, 'x', 'x'))
    p1 = max_pool_2d(c1, (3, 3))
    c2 = T.maximum(0, conv2d(p1, w_c2) + b_c2.dimshuffle('x', 0, 'x', 'x'))
    p2 = max_pool_2d(c2, (2, 2))
    p2_flat = p2.flatten(2)
    h3 = T.maximum(0, T.dot(p2_flat, w_h3) + b_h3)
    p_y_given_x = T.nnet.softmax(T.dot(h3, w_o) + b_o)
    return p_y_given_x
コード例 #21
0
ファイル: cnn.py プロジェクト: haoyue92/theano-deep-learning
def model(x, w1, b1, w2, b2, w3, b3, w, b):
    cov1 = T.maximum(0, conv2d(x, w1)+b1.dimshuffle('x', 0, 'x', 'x'))
    pool1 = max_pool_2d(cov1, (2, 2))
    cov2 = T.maximum(0, conv2d(pool1, w2)+b2.dimshuffle('x', 0, 'x', 'x'))
    pool2 = max_pool_2d(cov2, (2, 2))
    pool2_flat = pool2.flatten(2)
    h3 = T.maximum(0, T.dot(pool2_flat, w3) + b3)
    predict_y = T.nnet.softmax(T.dot(h3, w) + b)
    return predict_y
コード例 #22
0
ファイル: cvae.py プロジェクト: gemoku/Theano-Lights
 def conv_enc(X, p):
     h1 = rectify(max_pool_2d(conv(X, p.w1e), (2, 2)))
     h2 = rectify(max_pool_2d(conv(h1, p.w2e), (2, 2)))
     h3 = rectify(max_pool_2d(conv(h2, p.w3e), (2, 2)))
     h3 = T.flatten(h3, outdim=2)
     h4 = T.tanh(T.dot(h3, p.w4e) + p.b4e)
     mu = T.dot(h4, p.wmu) + p.bmu
     log_sigma = 0.5 * (T.dot(h4, p.wsigma) + p.bsigma)
     return mu, log_sigma
コード例 #23
0
def pool3d(x, pool_size, strides=(1, 1, 1), border_mode='valid',
           dim_ordering='th', pool_mode='max'):
    if border_mode == 'same':
        # TODO: add implementation for border_mode="same"
        raise Exception('border_mode="same" not supported with Theano.')
    elif border_mode == 'valid':
        ignore_border = True
        padding = (0, 0)
    else:
        raise Exception('Invalid border mode: ' + str(border_mode))

    if dim_ordering not in {'th', 'tf'}:
        raise Exception('Unknown dim_ordering ' + str(dim_ordering))

    if dim_ordering == 'tf':
        x = x.dimshuffle((0, 4, 1, 2, 3))

    if pool_mode == 'max':
        # pooling over conv_dim2, conv_dim1 (last two channels)
        output = downsample.max_pool_2d(input=x.dimshuffle(0, 1, 4, 3, 2),
                                        ds=(pool_size[1], pool_size[0]),
                                        st=(strides[1], strides[0]),
                                        ignore_border=ignore_border,
                                        padding=padding,
                                        mode='max')

        # pooling over conv_dim3
        pool_out = downsample.max_pool_2d(input=output.dimshuffle(0, 1, 4, 3, 2),
                                          ds=(1, pool_size[2]),
                                          st=(1, strides[2]),
                                          ignore_border=ignore_border,
                                          padding=padding,
                                          mode='max')

    elif pool_mode == 'avg':
        # pooling over conv_dim2, conv_dim1 (last two channels)
        output = downsample.max_pool_2d(input=x.dimshuffle(0, 1, 4, 3, 2),
                                        ds=(pool_size[1], pool_size[0]),
                                        st=(strides[1], strides[0]),
                                        ignore_border=ignore_border,
                                        padding=padding,
                                        mode='average_exc_pad')

        # pooling over conv_dim3
        pool_out = downsample.max_pool_2d(input=output.dimshuffle(0, 1, 4, 3, 2),
                                          ds=(1, pool_size[2]),
                                          st=(1, strides[2]),
                                          ignore_border=ignore_border,
                                          padding=padding,
                                          mode='average_exc_pad')
    else:
        raise Exception('Invalid pooling mode: ' + str(pool_mode))

    if dim_ordering == 'tf':
        pool_out = pool_out.dimshuffle((0, 2, 3, 4, 1))
    return pool_out
コード例 #24
0
ファイル: layers.py プロジェクト: tfjgeorge/ift6268
def avgpool(X, X_test, input_shape, size):
	"""
	A maxpool layer
	"""

	pooled = max_pool_2d(input=X, ds=size, ignore_border=True,  mode='average_exc_pad')
	pooled_test = max_pool_2d(input=X_test, ds=size, ignore_border=True, mode='average_exc_pad')
	output_shape = (input_shape[0], input_shape[1], input_shape[2]/size[0], input_shape[3]/size[1])

	return pooled, pooled_test, [], output_shape
コード例 #25
0
ファイル: layers.py プロジェクト: tfjgeorge/ift6268
def maxpool(X, X_test, input_shape, size):
	"""
	A maxpool layer
	"""

	pooled = max_pool_2d(input=X, ds=size, ignore_border=True)
	pooled_test = max_pool_2d(input=X_test, ds=size, ignore_border=True)
	output_shape = (input_shape[0], input_shape[1], input_shape[2]/size[0], input_shape[3]/size[1])

	return pooled, pooled_test, [], output_shape
コード例 #26
0
ファイル: CNNraw.py プロジェクト: ifenghao/myDeepLearning
def model(X, wconv1, bconv1, wconv2, bconv2, wconv3, bconv3, wfull, bfull, wout, bout):
    lconv1 = T.nnet.sigmoid(conv2d(X, wconv1, border_mode='full') + bconv1.dimshuffle('x', 0, 'x', 'x'))
    lds1 = max_pool_2d(lconv1, (2, 2))
    lconv2 = T.nnet.sigmoid(conv2d(lds1, wconv2) + bconv2.dimshuffle('x', 0, 'x', 'x'))
    lds2 = max_pool_2d(lconv2, (2, 2))
    lconv3 = T.nnet.sigmoid(conv2d(lds2, wconv3) + bconv3.dimshuffle('x', 0, 'x', 'x'))
    lds3 = max_pool_2d(lconv3, (2, 2))
    lflat = T.flatten(lds3, outdim=2)
    lfull = T.nnet.sigmoid(T.dot(lflat, wfull) + bfull)
    return softmax(T.dot(lfull, wout) + bout)
コード例 #27
0
ファイル: cnn.py プロジェクト: xwj95/Airwriting
def model(X, w1, w2, w, b):
    l11 = relu(conv2d(X, w1, border_mode="valid"))
    l1 = max_pool_2d(l11, (2, 2))

    l21 = relu(conv2d(l1, w2, border_mode="valid"))
    l22 = max_pool_2d(l21, (2, 2))
    l2 = T.flatten(l22, outdim=2)

    l3 = T.dot(l2, w) + b
    l = softmax(l3)
    return l
コード例 #28
0
ファイル: thconv.py プロジェクト: Sasikanth3/DeepNetTheano
def model(x, w_c1, b_c1, w_c2, b_c2, w_h3, b_h3, w_o, b_o, p=0.0):
    c1 = T.maximum(0, conv2d(x, w_c1) + b_c1.dimshuffle('x', 0, 'x', 'x'))
    p1 = max_pool_2d(c1, (3, 3),ignore_border=False)

    c2 = T.maximum(0, conv2d(p1, w_c2) + b_c2.dimshuffle('x', 0, 'x', 'x'))
    p2 = max_pool_2d(c2, (2, 2),ignore_border=False)

    p2_flat = p2.flatten(2)
    p2_flat = dropout(p2_flat, p=p)
    h3 = T.maximum(0, T.dot(p2_flat, w_h3) + b_h3)
    p_y_given_x = T.nnet.sigmoid(T.dot(h3, w_o) + b_o)
    return p_y_given_x
コード例 #29
0
def predict_custom_image(params, testImgFilename='own_0.png', activation= activation_convmlp, testImgFilenameDir = '../data/custom/'):

    test_img_value = filter(str.isdigit, testImgFilename)

    test_img = fli.processImg(testImgFilenameDir, testImgFilename)

    nkerns = [20, 50]
    batch_size = 1
    poolsize = (2, 2)

    layer0_input = test_img.reshape((batch_size, 1, 28, 28)).astype(numpy.float32)

    conv_out_0 = conv2d(
        input=layer0_input,
        filters=params[6],
        input_shape=(batch_size, 1, 28, 28),
        filter_shape=(nkerns[0], 1, 5, 5)
    )

    # downsample each feature map individually, using maxpooling
    pooled_out_0 = downsample.max_pool_2d(
        input=conv_out_0,
        ds=poolsize,
        ignore_border=True
    )

    output_0 = activation(pooled_out_0 + params[7].dimshuffle('x', 0, 'x', 'x'))

    conv_out_1 = conv2d(
        input=output_0,
        filters=params[4],
        input_shape=(batch_size, nkerns[0], 12, 12),
        filter_shape=(nkerns[1], nkerns[0], 5, 5),
    )

    # downsample each feature map individually, using maxpooling
    pooled_out_1 = downsample.max_pool_2d(
        input=conv_out_1,
        ds=poolsize,
        ignore_border=True
    )

    output_1 = activation(pooled_out_1 + params[5].dimshuffle('x', 0, 'x', 'x'))
    output_2 = activation(T.dot(output_1.flatten(2), params[2]) + params[3])

    final_output = T.dot(output_2, params[0]) + params[1]
    p_y_given_x = T.nnet.softmax(final_output)
    y_pred = T.argmax(p_y_given_x, axis=1)
    testfunc = theano.function([], [y_pred[0]])
    prediction = testfunc()[0]
    correct = (int(test_img_value) == prediction)
    print('The prediction ' + str(testfunc()[0]) + ' for ' + testImgFilename + '  is ' + str(correct) + '.')
    return correct
コード例 #30
0
 def predict(self, new_data, batch_size):
     img_shape = (batch_size, 1, self.image_shape[2], self.image_shape[3])
     conv_out = conv.conv2d(input=new_data, filters=self.W, filter_shape=self.filter_shape, image_shape=img_shape)
     if self.non_linear=="tanh":
         conv_out_tanh = T.tanh(conv_out + self.b.dimshuffle('x', 0, 'x', 'x'))
         output = downsample.max_pool_2d(input=conv_out_tanh, ds=self.poolsize, ignore_border=True)
     if self.non_linear=="relu":
         conv_out_tanh = ReLU(conv_out + self.b.dimshuffle('x', 0, 'x', 'x'))
         output = downsample.max_pool_2d(input=conv_out_tanh, ds=self.poolsize, ignore_border=True)
     else:
         pooled_out = downsample.max_pool_2d(input=conv_out, ds=self.poolsize, ignore_border=True)
         output = pooled_out + self.b.dimshuffle('x', 0, 'x', 'x')
     return output
コード例 #31
0
def cnn_layer(tparams, input, options, prefix='cnn'):
    filter_shape = options[_p(prefix, 'filter')]
    poolsize = options[_p(prefix, 'pool')]
    image_shape = options[_p(prefix, 'image')]
    assert image_shape[1] == filter_shape[1]
    # convolve input feature maps with filters
    conv_out = conv.conv2d(input=input,
                           filters=tparams[_p(prefix, 'W')],
                           filter_shape=filter_shape,
                           image_shape=image_shape)

    # downsample each feature map individually, using maxpooling
    pooled_out = downsample.max_pool_2d(input=conv_out,
                                        ds=poolsize,
                                        ignore_border=False)

    # add the bias term. Since the bias is a vector (1D array), we first
    # reshape it to a tensor of shape (1,n_filters,1,1). Each bias will
    # thus be broadcasted across mini-batches and feature map
    # width & height
    output = tensor.tanh(pooled_out +
                         tparams[_p(prefix, 'b')].dimshuffle('x', 0, 'x', 'x'))
    return output
コード例 #32
0
ファイル: layers.py プロジェクト: yyuzhong/DL-Benchmarks
    def __init__(self, rng, input, filter_shape, image_shape, poolsize=(2, 2),
                 stride=(1, 1)):
        """
        Allocate a LeNetConvPoolLayer with shared variable internal parameters.
        """

        assert image_shape[1] == filter_shape[1]
        self.input = input
        fan_in = np.prod(filter_shape[1:])
        fan_out = (filter_shape[0] * np.prod(filter_shape[2:]) /
                   np.prod(poolsize))
        W_bound = np.sqrt(6. / (fan_in + fan_out))
        self.W = theano.shared(
            np.asarray(
                rng.uniform(low=-W_bound, high=W_bound, size=filter_shape),
                dtype=theano.config.floatX
            ),
            borrow=True
        )

        b_values = np.zeros((filter_shape[0],), dtype=theano.config.floatX)
        self.b = theano.shared(value=b_values, borrow=True)

        conv_out = conv.conv2d(
            input=input,
            filters=self.W,
            filter_shape=filter_shape,
            image_shape=image_shape,
            subsample=stride
        )

        pooled_out = downsample.max_pool_2d(
            input=conv_out,
            ds=poolsize,
            ignore_border=True
        )
        self.output = T.tanh(pooled_out + self.b.dimshuffle('x', 0, 'x', 'x'))
コード例 #33
0
    def __init__(self, input, filter_shape, image_shape, poolsize=(2, 2)):

        assert image_shape[1] == filter_shape[1]
        self.input = input

        fan_in = numpy.prod(filter_shape[1:])

        fan_out = (filter_shape[0] * numpy.prod(filter_shape[2:]) /
                   numpy.prod(poolsize))

        W_bound = numpy.sqrt(6. / (fan_in + fan_out))
        self.W = theano.shared(numpy.asarray(rng.uniform(low=-W_bound,
                                                         high=W_bound,
                                                         size=filter_shape),
                                             dtype=theano.config.floatX),
                               name='W_conv',
                               borrow=True)

        b_values = numpy.zeros((filter_shape[0], ), dtype=theano.config.floatX)
        self.b = theano.shared(value=b_values, name='b_conv', borrow=True)

        # convolve input feature maps with filters
        conv_out = conv.conv2d(input=input,
                               filters=self.W,
                               filter_shape=filter_shape,
                               image_shape=image_shape)

        # downsample each feature map individually, using maxpooling
        pooled_out = downsample.max_pool_2d(input=conv_out,
                                            ds=poolsize,
                                            ignore_border=True)

        self.output = T.tanh(pooled_out + self.b.dimshuffle('x', 0, 'x', 'x'))

        self.params = [self.W, self.b]

        self.input = input
コード例 #34
0
    def __init__(self,
                 input,
                 params_W,
                 params_b,
                 filter_shape,
                 image_shape,
                 poolsize=(2, 2)):
        assert image_shape[1] == filter_shape[1]
        self.input = input

        self.W = params_W

        self.b = params_b
        # conv
        conv_out = conv.conv2d(input=self.input,
                               filters=self.W,
                               filter_shape=filter_shape,
                               image_shape=image_shape)
        # maxpooling
        pooled_out = downsample.max_pool_2d(input=conv_out,
                                            ds=poolsize,
                                            ignore_border=True)
        self.output = T.tanh(pooled_out + self.b.dimshuffle('x', 0, 'x', 'x'))
        self.params = [self.W, self.b]
コード例 #35
0
ファイル: test_downsample.py プロジェクト: yanyan-cas/Theano
    def test_DownsampleFactorMax(self):
        rng = numpy.random.RandomState(utt.fetch_seed())
        # generate random images
        maxpoolshps = ((1, 1), (2, 2), (3, 3), (2, 3))
        imval = rng.rand(4, 2, 16, 16)
        images = tensor.dtensor4()
        for maxpoolshp, ignore_border, mode in product(maxpoolshps,
                                                       [True, False],
                                                       ['max',
                                                        'sum',
                                                        'average_inc_pad',
                                                        'average_exc_pad']):
                # print 'maxpoolshp =', maxpoolshp
                # print 'ignore_border =', ignore_border

                # Pure Numpy computation
                numpy_output_val = self.numpy_max_pool_2d(imval, maxpoolshp,
                                                          ignore_border,
                                                          mode=mode)
                output = max_pool_2d(images, maxpoolshp, ignore_border,
                                     mode=mode)
                f = function([images, ], [output, ])
                output_val = f(imval)
                utt.assert_allclose(output_val, numpy_output_val)

                # DownsampleFactorMax op
                maxpool_op = DownsampleFactorMax(maxpoolshp,
                                                 ignore_border=ignore_border,
                                                 mode=mode)(images)

                output_shape = DownsampleFactorMax.out_shape(imval.shape, maxpoolshp,
                                                        ignore_border=ignore_border)
                utt.assert_allclose(numpy.asarray(output_shape), numpy_output_val.shape)
                f = function([images], maxpool_op)
                output_val = f(imval)
                utt.assert_allclose(output_val, numpy_output_val)
コード例 #36
0
    def construct(self, X, n_input_kernels, image_shape):

        self.filter_shape = tuple(list([self.n_kernels]) + list([n_input_kernels]) \
                                  + list(self.single_filter_shape))

        W, b = ut.init_weights_conv(self.filter_shape)

        # convolve input feature maps with filters
        conv_out = conv.conv2d(input=X,
                               filters=W,
                               filter_shape=self.filter_shape,
                               image_shape=image_shape)

        # downsample each feature map individually, using maxpooling
        pooled_out = downsample.max_pool_2d(input=conv_out,
                                            ds=self.pool_size,
                                            ignore_border=True)
        self.output = T.tanh(pooled_out + b.dimshuffle('x', 0, 'x', 'x'))

        if self.activation:
            self.output = ACTIVATION_FUNCTIONS[self.activation](self.output)

        self.n_outputs = self.n_kernels * 4
        self.params = [W, b]
コード例 #37
0
	def process(self,data,batchSize):
		'''
		>>>process newly input data

		>>>type data: T.tensor4
		>>>para data: newly input data
		>>>type batchSize: int
		>>>para batchSize: minibatch size
		'''
		shape=(batchSize,self.shape[1],self.shape[2],self.shape[3])

		conv_out=conv.conv2d(
			input=data,
			filters=self.w,
			filter_shape=self.filters,
			image_shape=shape
		)
		pool_out=downsample.max_pool_2d(
			input=conv_out,
			ds=self.pool,
			ignore_border=True
		)
		output=ReLU(pool_out+self.b.dimshuffle('x',0,'x','x'))
		return output
コード例 #38
0
 def __init__(self, rng, input, filter_shape, image_shape, poolsize=(2, 2)):
     #assert condition,condition为True,则继续往下执行,condition为False,中断程序
     #image_shape[1]和filter_shape[1]都是num input feature maps,它们必须是一样的。
     assert image_shape[1] == filter_shape[1]
     self.input = input
     #每个隐层神经元(即像素)与上一层的连接数为num input feature maps * filter height * filter width。
     #可以用numpy.prod(filter_shape[1:])来求得
     fan_in = numpy.prod(filter_shape[1:])
     #lower layer上每个神经元获得的梯度来自于:"num output feature maps * filter height * filter width" /pooling size
     fan_out = (filter_shape[0] * numpy.prod(filter_shape[2:]) /
                numpy.prod(poolsize))
     #以上求得fan_in、fan_out ,将它们代入公式,以此来随机初始化W,W就是线性卷积核
     W_bound = numpy.sqrt(6. / (fan_in + fan_out))
     self.W = theano.shared(numpy.asarray(rng.uniform(low=-W_bound,
                                                      high=W_bound,
                                                      size=filter_shape),
                                          dtype=theano.config.floatX),
                            borrow=True)
     #偏置b是一维向量,每个输出图的特征图都对应一个偏置,
     #而输出的特征图的个数由filter个数决定,因此用filter_shape[0]即number of filters来初始化
     b_values = numpy.zeros((filter_shape[0], ), dtype=theano.config.floatX)
     self.b = theano.shared(value=b_values, borrow=True)
     #将输入图像与filter卷积,conv.conv2d函数
     #卷积完没有加b再通过sigmoid,这里是一处简化。
     conv_out = conv.conv2d(input=input,
                            filters=self.W,
                            filter_shape=filter_shape,
                            image_shape=image_shape)
     # maxpooling,最大子采样过程
     pooled_out = downsample.max_pool_2d(input=conv_out,
                                         ds=poolsize,
                                         ignore_border=True)
     #加偏置,再通过tanh映射,得到卷积+子采样层的最终输出
     self.output = T.tanh(pooled_out + self.b.dimshuffle('x', 0, 'x', 'x'))
     #卷积+采样层的参数
     self.params = [self.W, self.b]
コード例 #39
0
    def __init__(self, rng, input, filter_shape, image_shape, poolsize=(2, 2)):

        assert image_shape[1] == filter_shape[1]
        self.input = input

        fan_in = numpy.prod(filter_shape[1:])
        fan_out = (filter_shape[0] * numpy.prod(filter_shape[2:]) /
                   numpy.prod(poolsize))

        # initialize weights with random weights
        W_bound = numpy.sqrt(6. / (fan_in + fan_out))
        self.W = theano.shared(numpy.asarray(rng.uniform(low=-W_bound,
                                                         high=W_bound,
                                                         size=filter_shape),
                                             dtype=theano.config.floatX),
                               borrow=True)

        # the bias is a 1D tensor -- one bias per output feature map
        b_values = numpy.zeros((filter_shape[0], ), dtype=theano.config.floatX)
        self.b = theano.shared(value=b_values, borrow=True)

        # convolution
        conv_out = conv.conv2d(input=input,
                               filters=self.W,
                               filter_shape=filter_shape,
                               image_shape=image_shape)

        # downsample
        pooled_out = downsample.max_pool_2d(input=conv_out,
                                            ds=poolsize,
                                            ignore_border=True)

        self.output = T.tanh(pooled_out + self.b.dimshuffle('x', 0, 'x', 'x'))

        # store parameters of this layer
        self.params = [self.W, self.b]
コード例 #40
0
    def __init__(self, rng, input, filter_w, filter_h, filter_num, img_w,
                 img_h, input_feature, batch_size, poolsize):

        self.input = input

        fan_in = filter_w * filter_h * input_feature
        fan_out = (filter_num * filter_h * filter_w) / numpy.prod(poolsize)

        # initialize weights with random weights
        W_shape = (filter_num, input_feature, filter_w, filter_h)
        W_bound = numpy.sqrt(1. / (fan_in + fan_out))
        self.W = theano.shared(numpy.asarray(rng.uniform(low=-W_bound,
                                                         high=W_bound,
                                                         size=W_shape),
                                             dtype=theano.config.floatX),
                               borrow=True)

        # the bias is a 1D tensor -- one bias per output feature map
        b_values = numpy.zeros((filter_num, ), dtype=theano.config.floatX)
        self.b = theano.shared(value=b_values, borrow=True)

        conv_out = conv.conv2d(input=input,
                               filters=self.W,
                               filter_shape=(filter_num, input_feature,
                                             filter_h, filter_w),
                               image_shape=(batch_size, input_feature, img_h,
                                            img_w))

        pooled_out = downsample.max_pool_2d(input=conv_out,
                                            ds=poolsize,
                                            ignore_border=True)

        self.output = T.tanh(pooled_out + self.b.dimshuffle('x', 0, 'x', 'x'))

        # store parameters of this layer
        self.params = [self.W, self.b]
コード例 #41
0
    def __dealWithOneDoc(self, DocSentenceCount0, oneDocSentenceCount1, docs, oneDocSentenceWordCount, docW, docB, sentenceW, sentenceB):
#         t = T.and_((shareRandge < oneDocSentenceCount1 + 1),  (shareRandge >= DocSentenceCount0)).nonzero()
        oneDocSentenceWordCount = oneDocSentenceWordCount[DocSentenceCount0:oneDocSentenceCount1 + 1]
        
        sentenceResults, _ = theano.scan(fn=self.__dealWithSentence,
                            non_sequences=[docs, sentenceW, sentenceB],
                             sequences=[dict(input=oneDocSentenceWordCount, taps=[-1, -0])],
                             strict=True)
        
#         p = printing.Print('docPool')
#         docPool = p(docPool)
#         p = printing.Print('sentenceResults')
#         sentenceResults = p(sentenceResults)
#         p = printing.Print('doc_out')
#         doc_out = p(doc_out)
        doc_out = conv.conv2d(input=sentenceResults, filters=docW)
        docPool = downsample.max_pool_2d(doc_out, (self.__MAXDIM, 1), mode= self.__pooling_mode, ignore_border=False)
        docOutput = T.tanh(docPool + docB.dimshuffle([0, 'x', 'x']))
        doc_embedding = docOutput.flatten(1)
        
#         p = printing.Print('doc_embedding')
#         doc_embedding = p(doc_embedding)
        
        return doc_embedding
コード例 #42
0
ファイル: downsample.py プロジェクト: shaoxuan92/treeano
    def compute_output(self, network, in_vw):
        mode = network.find_hyperparameter(["mode"])
        pool_size = network.find_hyperparameter(["pool_size"])
        stride = network.find_hyperparameter(["pool_stride", "stride"], None)
        pad = network.find_hyperparameter(["pool_pad", "pad"], (0, 0))
        ignore_border = network.find_hyperparameter(["ignore_border"], True)
        if ((stride is not None) and (stride != pool_size)
                and (not ignore_border)):
            # as of 20150813
            # for more information, see:
            # https://groups.google.com/forum/#!topic/lasagne-users/t_rMTLAtpZo
            msg = ("Setting stride not equal to pool size and not ignoring"
                   " border results in using a slower (cpu-based)"
                   " implementation")
            # making this an assertion instead of a warning to make sure it
            # is done
            assert False, msg

        out_shape = pool_output_shape(input_shape=in_vw.shape,
                                      axes=(2, 3),
                                      pool_shape=pool_size,
                                      strides=stride,
                                      pads=pad)
        out_var = max_pool_2d(input=in_vw.variable,
                              ds=pool_size,
                              st=stride,
                              ignore_border=ignore_border,
                              padding=pad,
                              mode=mode)

        network.create_vw(
            "default",
            variable=out_var,
            shape=out_shape,
            tags={"output"},
        )
コード例 #43
0
    def __init__(self,
                 rng,
                 filter_shape,
                 image_shape,
                 poolsize=(2, 2),
                 xin=None):

        assert image_shape[1] == filter_shape[1]
        self.image_shape = theano.shared(value=np.asarray(image_shape,
                                                          dtype='int16'),
                                         borrow=True)
        self.poolsize = poolsize
        #self.input = input
        if xin:
            self.x = xin
        else:
            self.x = T.matrix(name='input')
        self.x1 = self.x.reshape(self.image_shape, ndim=4)
        self.filter_shape = filter_shape

        # there are "num input feature maps * filter height * filter width"
        # inputs to each hidden unit
        fan_in = np.prod(filter_shape[1:])
        # each unit in the lower layer receives a gradient from:
        # "num output feature maps * filter height * filter width" /
        #   pooling size
        fan_out = old_div(filter_shape[0] * np.prod(filter_shape[2:]),
                          np.prod(poolsize))
        # initialize weights with random weights
        W_bound = np.sqrt(old_div(6., (fan_in + fan_out)))
        self.W = theano.shared(np.asarray(rng.uniform(low=-W_bound,
                                                      high=W_bound,
                                                      size=filter_shape),
                                          dtype=theano.config.floatX),
                               borrow=True)
        self.W_prime = self.W[:, :, ::-1, ::-1]
        self.W_prime = self.W_prime.dimshuffle(1, 0, 2, 3)
        #self.W_prime=self.W_prime[:,::-1]
        #print self.W.get_value()
        #print self.W_prime.eval()
        # the bias is a 1D tensor -- one bias per output feature map
        b_values = np.zeros((filter_shape[0], ), dtype=theano.config.floatX)
        bp_values = np.zeros((filter_shape[1], ), dtype=theano.config.floatX)
        self.b = theano.shared(value=b_values, borrow=True)
        self.b_prime = theano.shared(value=bp_values, borrow=True)

        # convolve input feature maps with filters
        conv_out = conv.conv2d(
            input=self.x1,
            filters=self.W,
            filter_shape=filter_shape,
            #image_shape=self.image_shape.eval(),
            border_mode='full')
        bp = old_div((filter_shape[2] - 1), 2)

        conv_out = conv_out[:, :, bp:-bp, bp:-bp]

        # downsample each feature map individually, using maxpooling
        self.pooled_out = downsample.max_pool_2d(input=conv_out,
                                                 ds=poolsize,
                                                 ignore_border=True)
        #shp=conv_out.shape
        #y = T.nnet.neighbours.images2neibs(conv_out, poolsize,mode='ignore_borders')
        #pooled_out=y.mean(axis=-1).reshape((shp[0],shp[1],shp[2]/poolsize[0],shp[3]/poolsize[1]))

        # add the bias term. Since the bias is a vector (1D array), we first
        # reshape it to a tensor of shape (1, n_filters, 1, 1). Each bias will
        # thus be broadcasted across mini-batches and feature map
        # width & height
        #self.hidden = T.tanh(self.pooled_out + self.b.dimshuffle('x', 0, 'x', 'x'))
        self.hidden = T.maximum(
            0, (self.pooled_out + self.b.dimshuffle('x', 0, 'x', 'x')))

        # store parameters of this layer
        self.params = [self.W, self.b]
コード例 #44
0
ファイル: qa_cnn.py プロジェクト: starrybubble/WikiQA-CNN
    def __init__(self, rng, linp, rinp, filter_shape, poolsize):
        """
        Allocate a LeNetConvPoolLayer with shared variable internal parameters.

        :type rng: numpy.random.RandomState
        :param rng: a random number generator used to initialize weights

        :type linp: theano.tensor.TensorType
        :param linp: symbolic variable that describes the left input of the
        architecture (one minibatch)
        
        :type rinp: theano.tensor.TensorType
        :param rinp: symbolic variable that describes the right input of the
        architecture (one minibatch)

        :type filter_shape: tuple or list of length 4
        :param filter_shape: (number of filters, 1,
                              filter height,filter width)

        :type poolsize: tuple or list of length 2
        :param poolsize: the downsampling (pooling) factor (#rows,#cols)
        """

        self.linp = linp
        self.rinp = rinp
        self.filter_shape = filter_shape
        self.poolsize = poolsize


        #每个隐层神经元(即像素)与上一层的连接数为num input feature maps * filter height * filter width。
        #可以用numpy.prod(filter_shape[1:])来求得
        fan_in = np.prod(filter_shape[1:])
        

        #lower layer上每个神经元获得的梯度来自于:"num output feature maps * filter height * filter width" /pooling size
        fan_out = (filter_shape[0] * np.prod(filter_shape[2:]) /np.prod(poolsize))
       

        #以上求得fan_in、fan_out ,将它们代入公式,以此来随机初始化W,W就是线性卷积核
        W_bound = np.sqrt(6. / (fan_in + fan_out))
        self.W = theano.shared(np.asarray(rng.uniform(low=-W_bound, high=W_bound, size=filter_shape),
            dtype=theano.config.floatX),borrow=True,name="W_conv")
            
        #偏置b是一维向量,每个输出图的特征图都对应一个偏置,
        #而输出的特征图的个数由filter个数决定,因此用filter_shape[0]即number of filters来初始化
        b_values = np.zeros((filter_shape[0],), dtype=theano.config.floatX)
        #从b_values创建共享变量self.b
        self.b = theano.shared(value=b_values, borrow=True, name="b_conv")
        

        #将输入特征与filter卷积,conv.conv2d函数
        lconv_out = conv.conv2d(input=linp, filters=self.W)
        rconv_out = conv.conv2d(input=rinp, filters=self.W)
        
        #self.b.dimshuffle('x', 0, 'x', 'x'):将self.b一维向量转换成shape(1, filter_shape[0], 1, 1)四维
        #激活函数(每组四个特征进行求和,加权,加偏置)
        lconv_out_tanh = T.tanh(lconv_out + self.b.dimshuffle('x', 0, 'x', 'x'))
        rconv_out_tanh = T.tanh(rconv_out + self.b.dimshuffle('x', 0, 'x', 'x'))
        
        #池化操作
        self.loutput = downsample.max_pool_2d(input=lconv_out_tanh, ds=self.poolsize, ignore_border=True, mode="average_exc_pad")
        self.routput = downsample.max_pool_2d(input=rconv_out_tanh, ds=self.poolsize, ignore_border=True, mode="average_exc_pad")
        self.params = [self.W, self.b]
コード例 #45
0
ファイル: qa_cnn.py プロジェクト: shamilcm/selektor
    def __init__(self, rng, linp, rinp, filter_shape, poolsize):
        """
        Allocate a LeNetConvPoolLayer with shared variable internal parameters.

        :type rng: numpy.random.RandomState
        :param rng: a random number generator used to initialize weights

        :type linp: theano.tensor.TensorType
        :param linp: symbolic variable that describes the left input of the
        architecture (one minibatch)
        
        :type rinp: theano.tensor.TensorType
        :param rinp: symbolic variable that describes the right input of the
        architecture (one minibatch)

        :type filter_shape: tuple or list of length 4
        :param filter_shape: (number of filters, 1,
                              filter height,filter width)

        :type poolsize: tuple or list of length 2
        :param poolsize: the downsampling (pooling) factor (#rows,#cols)
        """

        self.linp = linp
        self.rinp = rinp
        self.filter_shape = filter_shape
        self.poolsize = poolsize

        # there are "num input feature maps * filter height * filter width"
        # inputs to each hidden unit
        fan_in = np.prod(filter_shape[1:])
        # each unit in the lower layer receives a gradient from:
        # "num output feature maps * filter height * filter width" /
        #   pooling size
        fan_out = (filter_shape[0] * np.prod(filter_shape[2:]) /
                   np.prod(poolsize))
        # initialize weights with random weights
        W_bound = np.sqrt(6. / (fan_in + fan_out))
        self.W = theano.shared(np.asarray(rng.uniform(low=-W_bound,
                                                      high=W_bound,
                                                      size=filter_shape),
                                          dtype=theano.config.floatX),
                               borrow=True,
                               name="W_conv")
        b_values = np.zeros((filter_shape[0], ), dtype=theano.config.floatX)
        self.b = theano.shared(value=b_values, borrow=True, name="b_conv")

        # convolve input feature maps with filters

        lconv_out = conv.conv2d(input=linp, filters=self.W)
        rconv_out = conv.conv2d(input=rinp, filters=self.W)

        lconv_out_tanh = T.tanh(lconv_out +
                                self.b.dimshuffle('x', 0, 'x', 'x'))
        rconv_out_tanh = T.tanh(rconv_out +
                                self.b.dimshuffle('x', 0, 'x', 'x'))
        self.loutput = downsample.max_pool_2d(input=lconv_out_tanh,
                                              ds=self.poolsize,
                                              ignore_border=True,
                                              mode="average_exc_pad")
        self.routput = downsample.max_pool_2d(input=rconv_out_tanh,
                                              ds=self.poolsize,
                                              ignore_border=True,
                                              mode="average_exc_pad")
        self.params = [self.W, self.b]
コード例 #46
0
    def __init__(self,
                 rng,
                 input,
                 filter_shape,
                 image_shape,
                 poolsize=(2, 2),
                 non_linear="tanh"):
        """
        Allocate a LeNetConvPoolLayer with shared variable internal parameters.

        :type rng: numpy.random.RandomState
        :param rng: a random number generator used to initialize weights

        :type input: theano.tensor.dtensor4
        :param input: symbolic image tensor, of shape image_shape

        :type filter_shape: tuple or list of length 4
        :param filter_shape: (number of filters, num input feature maps,
                              filter height,filter width)

        :type image_shape: tuple or list of length 4
        :param image_shape: (batch size, num input feature maps,
                             image height, image width)

        :type poolsize: tuple or list of length 2
        :param poolsize: the downsampling (pooling) factor (#rows,#cols)
        """

        assert image_shape[1] == filter_shape[1]
        self.input = input
        self.filter_shape = filter_shape
        self.image_shape = image_shape
        self.poolsize = poolsize
        self.non_linear = non_linear
        # there are "num input feature maps * filter height * filter width"
        # inputs to each hidden unit
        fan_in = numpy.prod(filter_shape[1:])
        # each unit in the lower layer receives a gradient from:
        # "num output feature maps * filter height * filter width" /
        #   pooling size
        fan_out = (filter_shape[0] * numpy.prod(filter_shape[2:]) /
                   numpy.prod(poolsize))
        # initialize weights with random weights
        if self.non_linear == "none" or self.non_linear == "relu":
            self.W = theano.shared(numpy.asarray(rng.uniform(
                low=-0.01, high=0.01, size=filter_shape),
                                                 dtype=theano.config.floatX),
                                   borrow=True,
                                   name="W_conv")
        else:
            W_bound = numpy.sqrt(6. / (fan_in + fan_out))
            self.W = theano.shared(numpy.asarray(rng.uniform(
                low=-W_bound, high=W_bound, size=filter_shape),
                                                 dtype=theano.config.floatX),
                                   borrow=True,
                                   name="W_conv")
        b_values = numpy.zeros((filter_shape[0], ), dtype=theano.config.floatX)
        self.b = theano.shared(value=b_values, borrow=True, name="b_conv")

        # convolve input feature maps with filters
        conv_out = conv.conv2d(input=input,
                               filters=self.W,
                               filter_shape=self.filter_shape,
                               image_shape=self.image_shape)
        if self.non_linear == "tanh":
            conv_out_tanh = T.tanh(conv_out +
                                   self.b.dimshuffle('x', 0, 'x', 'x'))
            self.output = downsample.max_pool_2d(input=conv_out_tanh,
                                                 ds=self.poolsize,
                                                 ignore_border=True)
        elif self.non_linear == "relu":
            conv_out_tanh = ReLU(conv_out +
                                 self.b.dimshuffle('x', 0, 'x', 'x'))
            self.output = downsample.max_pool_2d(input=conv_out_tanh,
                                                 ds=self.poolsize,
                                                 ignore_border=True)
        else:
            pooled_out = downsample.max_pool_2d(input=conv_out,
                                                ds=self.poolsize,
                                                ignore_border=True)
            self.output = pooled_out + self.b.dimshuffle('x', 0, 'x', 'x')
        self.params = [self.W, self.b]
コード例 #47
0
pylab.imshow(filtered_img[0, 0, :, :])  #0:minibatch_index; 0:1-st filter
title('convolution 1')

pylab.subplot(2, 3, 3)
pylab.axis("off")
pylab.imshow(filtered_img[0, 1, :, :])  #0:minibatch_index; 1:1-st filter
title('convolution 2')

#pylab.show()

# maxpooling
from theano.tensor.signal import downsample

input = T.tensor4('input')
maxpool_shape = (2, 2)
pooled_img = downsample.max_pool_2d(input, maxpool_shape, ignore_border=False)

maxpool = theano.function(inputs=[input], outputs=[pooled_img])

pooled_res = numpy.squeeze(maxpool(filtered_img))
#pylab.figure(2)
pylab.subplot(235)
pylab.axis('off')
pylab.imshow(pooled_res[0, :, :])
title('down sampled 1')

pylab.subplot(236)
pylab.axis('off')
pylab.imshow(pooled_res[1, :, :])
title('down sampled 2')
コード例 #48
0
 def __call__(self, X):
     return downsample.max_pool_2d(X, (self.px, self.py),
                                   ignore_border=True)
コード例 #49
0
ファイル: conv_maxpool_.py プロジェクト: fulQuan/ift6266h15
 def __call__(self, inp, mode=None):
     return max_pool_2d(inp, self.downsample_sz, ignore_border=True)
コード例 #50
0
ファイル: base.py プロジェクト: ballasn/sklearn-theano
 def _build_expression(self):
     self.input_ = T.tensor4(dtype=self.input_dtype)
     self.expression_ = max_pool_2d(self.input_,
                                    self.max_pool_stride,
                                    ignore_border=True)
コード例 #51
0
ファイル: vae.py プロジェクト: lizhangzhan/conv-vae
def conv_and_pool(X, w, b=None, activation=rectify):
    return max_pool_2d(conv(X, w, b, activation=activation), (2, 2))
コード例 #52
0
 def forward(self, X):
     conv_out = conv2d(input=X, filters=self.W)
     pooled_out = downsample.max_pool_2d(input=conv_out,
                                         ds=self.poolsz,
                                         ignore_border=True)
     return T.tanh(pooled_out + self.b.dimshuffle('x', 0, 'x', 'x'))
コード例 #53
0
    def __init__(self, rng, input, shape, filters, rfilter, alpha, beta, N,
                 time, pool):
        '''
		>>>type rng: numpy.random.RandomState
		>>>para rng: random seed
		
		>>>type input: T.tensor4
		>>>para input: input data

		>>>type shape: tuple or list of length 4
		>>>para shape: (batch_size,num of input feature maps, image height, image width)

		>>>type filters: tuple or list of length 4
		>>>para filters: (num of filters, num of input feature maps, filter height, filter width)

		>>>type rfilter: tuple or list of length 4
		>>>para rfilter: (num of filters, num of filters, recurrent filter height, recurrent filter width)

		>>>type alpha,beta,N: int or float
		>>>para alpha,beta,N: used in the formulation of recurent state

		>>>type time: int
		>>>para time: the num of iteration in the recurrent layer

		>>>type pool: tuple or list of length 2
		>>>para pool: pooling size
		'''

        assert shape[1] == filters[1]
        assert filters[0] == rfilter[0]
        assert rfilter[0] == rfilter[1]
        self.input = input
        self.filters = filters
        self.rfilter = rfilter
        self.shape = shape
        self.time = time
        self.pool = pool
        self.alpha = alpha
        self.beta = beta
        self.N = N
        layer_size = (shape[0], filters[0], shape[2] - filters[2] + 1,
                      shape[3] - filters[3] + 1)

        inflow = np.prod(filters[1:])
        outflow = filters[0] * np.prod(filters[2:]) / np.prod(pool)

        w_bound = np.sqrt(6. / (inflow + outflow))
        rw_bound = np.sqrt(3. / np.prod(rfilter))

        w_in_init = np.asarray(rng.uniform(low=-w_bound,
                                           high=w_bound,
                                           size=filters),
                               dtype=theano.config.floatX)
        self.w_in = theano.shared(value=w_in_init, name='w_in')
        w_r_init = np.asarray(rng.uniform(low=-rw_bound,
                                          high=rw_bound,
                                          size=rfilter),
                              dtype=theano.config.floatX)
        self.w_r = theano.shared(value=w_r_init, name='w_r')

        b_init = np.zeros(shape=filters[0], dtype=theano.config.floatX)
        self.b = theano.shared(value=b_init, name='b_in')
        b_r_init = np.zeros(shape=rfilter[0], dtype=theano.config.floatX)
        self.b_r = theano.shared(value=b_r_init, name='b_r')

        conv_input = conv.conv2d(input=input,
                                 filters=self.w_in,
                                 filter_shape=filters,
                                 image_shape=shape)

        print 'initialize the weight'

        state = conv_input + self.b_r.dimshuffle('x', 0, 'x', 'x')
        axis2Padleft = rfilter[2] / 2
        axis2Padright = (rfilter[2] - 1) / 2
        axis3Padleft = rfilter[3] / 2
        axis3Padright = (rfilter[3] - 1) / 2
        axis2Padright = layer_size[2] + rfilter[
            2] - 1 if axis2Padright == 0 else -axis2Padright
        axis3Padright = layer_size[3] + rfilter[
            3] - 1 if axis3Padright == 0 else -axis3Padright
        for i in xrange(time):
            conv_recurrent = conv.conv2d(input=state,
                                         filters=self.w_r,
                                         filter_shape=rfilter,
                                         image_shape=layer_size,
                                         border_mode='full')
            state = ReLU(conv_input +
                         conv_recurrent[:, :, axis2Padleft:axis2Padright,
                                        axis3Padleft:axis3Padright])
            #			padded_input=TensorPadding(TensorPadding(input=state,width=rfilter[2]-1,axis=2),width=rfilter[3]-1,axis=3)
            #			conv_recurrent=conv.conv2d(
            #				input=padded_input,
            #				filters=self.w_r,
            #				filter_shape=rfilter,
            #				image_shape=[layer_size[0],layer_size[1],layer_size[2]+rfilter[2]-1,layer_size[3]+rfilter[3]-1]
            #			)
            #			state=ReLU(conv_input+conv_recurrent)
            norm = NormLayer(input=state,
                             shape=layer_size,
                             alpha=alpha,
                             beta=beta,
                             N=N)
            state = norm.output

        pool_out = downsample.max_pool_2d(input=state,
                                          ds=pool,
                                          ignore_border=True)
        self.output = pool_out + self.b.dimshuffle('x', 0, 'x', 'x')
        self.param = [self.w_in, self.w_r, self.b, self.b_r]

        print 'recurrentconvlayer constructed!'
コード例 #54
0
def maxpool_layer(shared_params, x, maxpool_shape, options):
    return downsample.max_pool_2d(x, maxpool_shape, ignore_border=False)
コード例 #55
0
    def __init__(self, rng, input, filter_shape, image_shape, poolsize=(2, 2)):
        """
        Allocate a LeNetConvPoolLayer with shared variable internal parameters.

        :type rng: numpy.random.RandomState
        :param rng: a random number generator used to initialize weights

        :type input: theano.tensor.dtensor4
        :param input: symbolic image tensor, of shape image_shape

        :type filter_shape: tuple or list of length 4
        :param filter_shape: (number of filters, num input feature maps,
                              filter height, filter width)

        :type image_shape: tuple or list of length 4
        :param image_shape: (batch size, num input feature maps,
                             image height, image width)

        :type poolsize: tuple or list of length 2
        :param poolsize: the downsampling (pooling) factor (#rows, #cols)
        """

        assert image_shape[1] == filter_shape[1]
        self.input = input

        # there are "num input feature maps * filter height * filter width"
        # inputs to each hidden unit
        fan_in = numpy.prod(filter_shape[1:])
        # each unit in the lower layer receives a gradient from:
        # "num output feature maps * filter height * filter width" /
        #   pooling size
        fan_out = (filter_shape[0] * numpy.prod(filter_shape[2:]) /
                   numpy.prod(poolsize))
        # initialize weights with random weights
        W_bound = numpy.sqrt(6. / (fan_in + fan_out))
        self.W = theano.shared(
            numpy.asarray(
                rng.uniform(low=-W_bound, high=W_bound, size=filter_shape),
                dtype=theano.config.floatX
            ),
            borrow=True
        )

        # the bias is a 1D tensor -- one bias per output feature map
        b_values = numpy.zeros((filter_shape[0],), dtype=theano.config.floatX)
        self.b = theano.shared(value=b_values, borrow=True)

        # convolve input feature maps with filters
        conv_out = conv.conv2d(
            input=input,
            filters=self.W,
            filter_shape=filter_shape,
            image_shape=image_shape
        )

        # downsample each feature map individually, using maxpooling
        pooled_out = downsample.max_pool_2d(
            input=conv_out,
            ds=poolsize,
            ignore_border=True
        )

        # add the bias term. Since the bias is a vector (1D array), we first
        # reshape it to a tensor of shape (1, n_filters, 1, 1). Each bias will
        # thus be broadcasted across mini-batches and feature map
        # width & height
        act = pooled_out + self.b.dimshuffle('x', 0, 'x', 'x')
        self.output = T.switch(act<0, 0, act)

        # store parameters of this layer
        self.params = [self.W, self.b]
コード例 #56
0
 def forward(self, inData):
     fmap = DWS.max_pool_2d(inData, (self.knlSize, self.knlSize), ignore_border=True)
     
     return fmap
コード例 #57
0
 def mp(input):
     return max_pool_2d(input, maxpoolshp, ignore_border, mode=mode)
コード例 #58
0
    def __init__(self, rng, input, filter_shape, image_shape, poolsize=(2, 2)):
        """
        Allocate a LeNetConvPoolLayer with shared variable internal parameters.

        :type rng: numpy.random.RandomState
        :param rng: a random number generator used to initialize weights

        :type input: theano.tensor.dtensor4,四维矩阵
        :param input: symbolic image tensor, of shape image_shape

        :type filter_shape: tuple or list of length 4,四个维度的数字组成的 list
        :param filter_shape: (number of filters, num input feature maps,
                              filter height, filter width)
                              # of filters 也就是卷积输出层的 feature maps 数
                              这个参数其实就是卷积层的 W 的维度

        :type image_shape: tuple or list of length 4,四个维度的数字组成的 list
        :param image_shape: (batch size, num input feature maps,
                             image height, image width)
                             这个参数实际就是卷积输入层的 X 的维度

        :type poolsize: tuple or list of length 2
        :param poolsize: the downsampling (pooling) factor (#rows, #cols)
        效果就是pooling输入层的每 #rows X #cols 个点选出一个最大值,组成 pooling输出层
        比如,输入层为 (3,2,6,6),而 factor 为 (2,2),那么 max_pooling 得到 (3,2,3,3)

        该类实际上定义了一个卷积层,加一个 pooling 层 
        输入层 aka 卷积输入层 ---> 卷积输出层 aka Hidden or pooling 输入层 ---> 输出层 aka pooling 输出层
        """

        assert image_shape[1] == filter_shape[1] # # of input feature maps,卷积输入层 feature maps 个数
        self.input = input

        # there are "num input feature maps * filter height * filter width" inputs to each hidden unit
        # 看到是对每个 hidden unit 也就是卷积输出层的每个 feature map 计算的,
        # 故此第一维度也就是卷积输出层 feature map 数是不需要投入计算的
        # 用于初始化 卷积层 W 参数 (Notes: pooling 层并不需要 W 参数)
        fan_in = np.prod(filter_shape[1:])
        # each unit in the lower layer receives a gradient from:
        # "num output feature maps * filter height * filter width" /
        #   pooling size
        # 输入层的每个点,都会卷积到卷积层的每一个 feature map 上,每个 map上会影响到 filter width * filter height 个点
        # 而卷积输出层也即 pooling 输入层,会通过 pooling 缩小 poolsize (factor) 倍的尺寸
        fan_out = (filter_shape[0] * np.prod(filter_shape[2:]) /
                   np.prod(poolsize))
        # initialize weights with random weights,可以看 MLP 一章的数学定义,用于初始化 W,得到 4 维矩阵
        W_bound = np.sqrt(6. / (fan_in + fan_out))
        self.W = theano.shared(
            np.asarray(
                rng.uniform(low=-W_bound, high=W_bound, size=filter_shape),
                dtype=theano.config.floatX
            ),
            borrow=True
        )

        # the bias is a 1D tensor -- one bias per output feature map
        # 每个卷积输出层的 feature map 上固定一个 bias 不变,不管是那个输入层的 feature map 上过来的;
        # 一维,其值和 filter_shape[0] 一致,即 filter_shape[0] == len(b)
        b_values = np.zeros((filter_shape[0],), dtype=theano.config.floatX)
        self.b = theano.shared(value=b_values, borrow=True)

        # convolve input feature maps with filters
        # 通过调用函数,隐去了如何做卷积的过程
        conv_out = conv.conv2d(
            input=input,
            filters=self.W,
            filter_shape=filter_shape,
            image_shape=image_shape
        )

        # downsample each feature map individually, using maxpooling
        # 通过调用函数,隐去了如何做pooling的过程,如何利用 poolsize 做最大值比较,并返回正确维度的结果
        pooled_out = downsample.max_pool_2d(
            input=conv_out,
            ds=poolsize,
            ignore_border=True
        )

        # add the bias term. Since the bias is a vector (1D array), we first
        # reshape it to a tensor of shape (1, n_filters, 1, 1). Each bias will
        # thus be broadcasted across mini-batches and feature map
        # width & height
        # 做完卷积 + pooling 之后,再加上 bias,然后再调用激活函数 tanh
        # 卷积之后,貌似原来的第一维跑到了第二维,于是 b 进行了 dimshuffle 得到 1 * len(b) * 1 * 1 的 4 维矩阵
        self.output = T.tanh(pooled_out + self.b.dimshuffle('x', 0, 'x', 'x'))

        # store parameters of this layer
        self.params = [self.W, self.b]

        # keep track of model input
        self.input = input
コード例 #59
0
res = T.nnet.sigmoid(res)
res_p = T.nnet.sigmoid(res_p)


#Calc avg activation for convolution results
sprs = 0
if sparsity:
    sparse = T.mean(res, axis=(0, 2, 3))
    epsilon = 1e-20
    sparse = T.clip(sparse, epsilon, 1 - epsilon)
    KL = T.sum(sparsityParam * T.log(sparsityParam / sparse) +
               (1 - sparsityParam) * T.log((1 - sparsityParam) / (1 - sparse)))
    sprs = KL * beta

#Pooling
res = downsample.max_pool_2d(res, pool_shape, ignore_border=True)
res_p = downsample.max_pool_2d(res_p, pool_shape, ignore_border=True)

#Separate function if U want to estimate output just after pooling
#cnn = theano.function(inputs=[X], outputs=res, allow_input_downcast=True)

#------#

CV_size = 6000
dataSize = DATA.shape[0]
batchSize = 512

#------#

modelName = 'Conv+SM_autosave.txt'
print 'data: ' + str(DATA.shape)
コード例 #60
0
    def __init__(self, rng, input, filter_shape, image_shape,
                 pad = 0, poolsize=(2, 2), activation = T.tanh, poolstride=(2, 2),
                 init_type="tanh",
                 W=None, b=None):
        """
        Allocate a LeNetConvPoolLayer with shared variable internal parameters.

        :type rng: numpy.random.RandomState
        :param rng: a random number generator used to initialize weights

        :type input: theano.tensor.dtensor4
        :param input: symbolic image tensor, of shape image_shape

        :type filter_shape: tuple or list of length 4
        :param filter_shape: (number of filters, num input feature maps,
                              filter height, filter width)

        :type image_shape: tuple or list of length 4
        :param image_shape: (batch size, num input feature maps,
                             image height, image width)

        :type poolsize: tuple or list of length 2
        :param poolsize: the downsampling (pooling) factor (#rows, #cols)
        """

        assert image_shape[1] == filter_shape[1]
        self.input = input

	# there are "num input feature maps * filter height * filter width"
	# inputs to each hidden unit
	fan_in = numpy.prod(filter_shape[1:])
	# each unit in the lower layer receives a gradient from:
	# "num output feature maps * filter height * filter width" /
	#   pooling size
	
	if init_type=="ReLU":
	    print "ConvPoolLayer with He init"
	    std = numpy.sqrt(2.0/fan_in)
	    self.W = theano.shared(
		numpy.asarray(
		    rng.normal(0, std, size=filter_shape),
		    dtype=theano.config.floatX
		),
		borrow=True
	    )
	else:
	    print "ConvPoolLayer with Xavier init"
	    fan_out = (filter_shape[0] * numpy.prod(filter_shape[2:]) /
		       numpy.prod(poolsize))
	    # initialize weights with random weights
	    W_bound = numpy.sqrt(6. / (fan_in + fan_out))    
	    self.W = theano.shared(
		numpy.asarray(
		    rng.uniform(low=-W_bound, high=W_bound, size=filter_shape),
		    dtype=theano.config.floatX
		),
		borrow=True
	    )
        if W!=None:
            self.W.set_value(W)

	# the bias is a 1D tensor -- one bias per output feature map
	b_values = numpy.zeros((filter_shape[0],), dtype=theano.config.floatX)
	self.b = theano.shared(value=b_values, borrow=True)
        if b!=None:
            self.b.set_value(b)
            
        # convolve input feature maps with filters
        #conv_out = conv.conv2d(
        #    input=input,
        #    filters=self.W,
        #    filter_shape=filter_shape,
        #    image_shape=image_shape,
        #    border_mode='full'
        #)
        #input_shuffled = input.dimshuffle(1, 2, 3, 0) # bc01 to c01b
        #filters_shuffled = self.W.dimshuffle(1, 2, 3, 0) # bc01 to c01b
        #conv_op = FilterActs(stride=1, partial_sum=1, pad=pad)
        #contiguous_input = gpu_contiguous(input_shuffled)
        #contiguous_filters = gpu_contiguous(filters_shuffled)
        #conv_out_shuffled = conv_op(contiguous_input, contiguous_filters)
	
	conv_out = T.nnet.conv2d(input, self.W, border_mode=pad, filter_flip=False) 

        # downsample each feature map individually, using maxpooling
        pooled_out = downsample.max_pool_2d(
            input=conv_out,
            ds=poolsize,
            st=poolstride,
            ignore_border=False
        )
        #pool_op = MaxPool(ds=poolsize[0], stride=poolstride[0])
        #pooled_out_shuffled = pool_op(conv_out_shuffled)
        #pooled_out = pooled_out_shuffled.dimshuffle(3, 0, 1, 2) # c01b to bc01
    
	
    
        # add the bias term. Since the bias is a vector (1D array), we first
        # reshape it to a tensor of shape (1, n_filters, 1, 1). Each bias will
        # thus be broadcasted across mini-batches and feature map
        # width & height
        #self.output = T.tanh(pooled_out + self.b.dimshuffle('x', 0, 'x', 'x'))
        #self.output = relu(pooled_out + self.b.dimshuffle('x', 0, 'x', 'x'))
        self.output = activation(pooled_out + self.b.dimshuffle('x', 0, 'x', 'x'))

        stride = 1# not used
        assert (image_shape[2]-filter_shape[2]+2*pad)%stride==0
        output_im_size = (image_shape[2]-filter_shape[2]+2*pad)/stride+1
        assert output_im_size%poolsize[0]==0
        output_im_size = output_im_size//poolsize[0]
        self.output_shape = [image_shape[0],
                            filter_shape[0],
                            output_im_size,
                            output_im_size]
                            
        # store parameters of this layer
        self.params = [self.W, self.b]