コード例 #1
0
ファイル: train_ced_mse_d_dd.py プロジェクト: igotyooo/dcgan
def discrim( t, w1, w2, g2, b2, w3, g3, b3, w4, g4, b4, w5 ):
    h1 = lrelu( dnn_conv( t, w1, subsample=( 2, 2 ), border_mode = ( 2, 2 ) ) )
    h2 = lrelu( batchnorm( dnn_conv( h1, w2, subsample = ( 2, 2 ), border_mode = ( 2, 2 ) ), g = g2, b = b2 ) )
    h3 = lrelu( batchnorm( dnn_conv( h2, w3, subsample = ( 2, 2 ), border_mode = ( 2, 2 ) ), g = g3, b = b3 ) )
    h4 = lrelu( batchnorm( dnn_conv( h3, w4, subsample = ( 2, 2 ), border_mode = ( 2, 2 ) ), g = g4, b = b4 ) )
    yd = sigmoid( T.dot( T.flatten( h4, 2 ), w5 ) )
    return yd
コード例 #2
0
def discrim(X, w, b, w2, g2, b2, w3, g3, b3, w4, g4, b4, wy, wy1):
    h0 = dnn_conv(X, w, subsample=(2, 2), border_mode=(2, 2))
    if args.db1:
        h0 += b.dimshuffle('x', 0, 'x', 'x')
    h1 = lrelu(h0)
    h1 = dropout(h1, args.dropout)
    h1 = dnn_conv(h1, w2, subsample=(2, 2), border_mode=(2, 2))
    if args.dbn:
        h1 = batchnorm(h1, g=g2, b=b2)
    else:
        h1 += b2.dimshuffle('x', 0, 'x', 'x')
    h2 = lrelu(h1)
    h2 = dropout(h2, args.dropout)
    h2 = dnn_conv(h2, w3, subsample=(2, 2), border_mode=(2, 2))
    if args.dbn:
        h2 = batchnorm(h2, g=g3, b=b3)
    else:
        h2 += b3.dimshuffle('x', 0, 'x', 'x')
    h3 = lrelu(h2)
    h3 = dropout(h3, args.dropout)
    h3 = dnn_conv(h3, w4, subsample=(2, 2), border_mode=(2, 2))
    if args.dbn:
        h3 = batchnorm(h3, g=g4, b=b4)
    else:
        h3 += b4.dimshuffle('x', 0, 'x', 'x')
    h4 = lrelu(h3)
    h4 = dropout(h4, args.dropout)
    h4 = T.flatten(h4, 2)
    y = sigmoid(T.dot(h4, wy))
    y1 = sigmoid(T.dot(h4, wy1))
    return y, y1
コード例 #3
0
    def get_output(self, train=False):
        X = self.get_input(train)
        border_mode = self.border_mode
        if on_gpu() and dnn.dnn_available():
            if border_mode == 'same':
                assert(self.subsample == (1, 1))
                pad_x = (self.nb_row - self.subsample[0]) // 2
                pad_y = (self.nb_col - self.subsample[1]) // 2
                conv_out = dnn.dnn_conv(img=X,
                                        kerns=self.W,
                                        border_mode=(pad_x, pad_y))
            else:
                conv_out = dnn.dnn_conv(img=X,
                                        kerns=self.W,
                                        border_mode=border_mode,
                                        subsample=self.subsample)
        else:

        return self.activation(conv_out + self.b.dimshuffle('x', 0, 'x', 'x'))
    def get_config(self):
        return {"name": self.__class__.__name__,
                "nb_filter": self.nb_filter,
                "stack_size": self.stack_size,
                "nb_row": self.nb_row,
                "nb_col": self.nb_col,
                "init": self.init.__name__,
                "activation": self.activation.__name__,
                "border_mode": self.border_mode,
                "subsample": self.subsample,
                "W_regularizer": self.W_regularizer.get_config() if self.W_regularizer else None,
                "b_regularizer": self.b_regularizer.get_config() if self.b_regularizer else None,
                "activity_regularizer": self.activity_regularizer.get_config() if self.activity_regularizer else None,
                "W_constraint": self.W_constraint.get_config() if self.W_constraint else None,
                "b_constraint": self.b_constraint.get_config() if self.b_constraint else None}
コード例 #4
0
    def get_output(self, train=False):
        X = self.get_input(train)
        X = T.reshape(X, (X.shape[0], X.shape[1], X.shape[2], 1)).dimshuffle(0, 2, 1, 3)

        border_mode = self.border_mode
        if on_gpu() and dnn.dnn_available():
            if border_mode == 'same':
                assert(self.subsample_length == 1)
                pad_x = (self.filter_length - self.subsample_length) // 2
                conv_out = dnn.dnn_conv(img=X,
                                        kerns=self.W,
                                        border_mode=(pad_x, 0))
            else:
                conv_out = dnn.dnn_conv(img=X,
                                        kerns=self.W,
                                        border_mode=border_mode,
                                        subsample=self.subsample)
        else:
            if border_mode == 'same':
                assert(self.subsample_length == 1)
                border_mode = 'full'

            conv_out = T.nnet.conv.conv2d(X, self.W,
                                          border_mode=border_mode,
                                          subsample=self.subsample)
            if self.border_mode == 'same':
                shift_x = (self.filter_length - 1) // 2
                conv_out = conv_out[:, :, shift_x:X.shape[2] + shift_x, :]

        output = self.activation(conv_out + self.b.dimshuffle('x', 0, 'x', 'x'))
        output = T.reshape(output, (output.shape[0], output.shape[1], output.shape[2])).dimshuffle(0, 2, 1)
        return output
コード例 #5
0
ファイル: train_ced_mse_d_dd.py プロジェクト: igotyooo/dcgan
def encoder( s, w1, w2, g2, b2, w3, g3, b3, w4, g4, b4, w5, g5, b5 ):
    h1 = lrelu( dnn_conv( s, w1, subsample=( 2, 2 ), border_mode = ( 2, 2 ) ) )
    h2 = lrelu( batchnorm( dnn_conv( h1, w2, subsample = ( 2, 2 ), border_mode = ( 2, 2 ) ), g = g2, b = b2 ) )
    h3 = lrelu( batchnorm( dnn_conv( h2, w3, subsample = ( 2, 2 ), border_mode = ( 2, 2 ) ), g = g3, b = b3 ) )
    h4 = lrelu( batchnorm( dnn_conv( h3, w4, subsample = ( 2, 2 ), border_mode = ( 2, 2 ) ), g = g4, b = b4 ) )
    z = lrelu( batchnorm( dnn_conv( h4, w5, subsample = ( 1, 1 ), border_mode = ( 0, 0 ) ), g = g5, b = b5 ) )
    return T.flatten( z, 2 )
コード例 #6
0
ファイル: generate.py プロジェクト: gray0302/lrn2
 def sim_matrix_(self, img, img_filt, filt_size=(9, 9), measure='dot'):
     measures = ['dot', 'cos']
     assert measure in measures, "measure has to be in {0}".format(measures)
     s = img_filt.shape
     img_filt = T.reshape(img_filt,
                          newshape=(-1, s[1], filt_size[0], filt_size[1]),
                          ndim=4,
                          name='img_filt')
     #         img_filt = img_filt.dimshuffle(3,0,1,2)
     conv_out = dnn_conv(
         img=img,
         kerns=img_filt,
         conv_mode='cross',
         border_mode=(0, 0),  #(filt_size[0] // 2, filt_size[1] // 2),
     )
     if measure == 'dot':
         return conv_out
     elif measure == 'cos':
         filt_norm = T.sqrt(
             T.sum(T.sqr(img_filt), axis=(1, 2, 3), keepdims=False))
         img_sqr = T.sqr(img)
         filt_ones = T.ones((1, s[1], filt_size[0], filt_size[1]), dtype=fx)
         conv_out2 = dnn_conv(
             img=img_sqr,
             kerns=filt_ones,
             conv_mode='cross',
             border_mode=(0, 0),  #(filt_size[0] // 2, filt_size[1] // 2),
         )
         # TODO: GO on from here
         norm = T.sqrt(conv_out2) * filt_norm.dimshuffle('x', 0, 'x', 'x')
         return conv_out / norm
コード例 #7
0
ファイル: extra.py プロジェクト: Michlong/keras-extra
    def get_output(self, train=False):
        X = self.get_input(train)
        newshape = (X.shape[0]*X.shape[1], X.shape[2], X.shape[3], X.shape[4])
        Y = theano.tensor.reshape(X, newshape) #collapse num_samples and num_timesteps
        border_mode = self.border_mode
        if on_gpu() and dnn.dnn_available():
            if border_mode == 'same':
                assert(self.subsample == (1, 1))
                pad_x = (self.nb_row - self.subsample[0]) // 2
                pad_y = (self.nb_col - self.subsample[1]) // 2
                conv_out = dnn.dnn_conv(img=Y,
                                        kerns=self.W,
                                        border_mode=(pad_x, pad_y))
            else:
                conv_out = dnn.dnn_conv(img=Y,
                                        kerns=self.W,
                                        border_mode=border_mode,
                                        subsample=self.subsample)
        else:
            if border_mode == 'same':
                border_mode = 'full'

            conv_out = theano.tensor.nnet.conv.conv2d(Y, self.W,
                border_mode=border_mode, subsample=self.subsample)

            if self.border_mode == 'same':
                shift_x = (self.nb_row - 1) // 2
                shift_y = (self.nb_col - 1) // 2
                conv_out = conv_out[:, :, shift_x:Y.shape[2] + shift_x, shift_y:Y.shape[3] + shift_y]

        output = self.activation(conv_out + self.b.dimshuffle('x', 0, 'x', 'x'))
        newshape = (X.shape[0], X.shape[1], output.shape[1], output.shape[2], output.shape[3])
        return theano.tensor.reshape(output, newshape)
コード例 #8
0
ファイル: convolutional.py プロジェクト: Mofef/keras
    def get_output(self, train=False):
        X = self.get_input(train)
        X = T.reshape(X, (X.shape[0], X.shape[1], X.shape[2], 1)).dimshuffle(0, 2, 1, 3)

        border_mode = self.border_mode
        if on_gpu() and dnn.dnn_available():
            if border_mode == 'same':
                assert(self.subsample_length == 1)
                pad_x = (self.filter_length - self.subsample_length) // 2
                conv_out = dnn.dnn_conv(img=X,
                                        kerns=self.W,
                                        border_mode=(pad_x, 0))
            else:
                conv_out = dnn.dnn_conv(img=X,
                                        kerns=self.W,
                                        border_mode=border_mode,
                                        subsample=self.subsample)
        else:
            if border_mode == 'same':
                assert(self.subsample_length == 1)
                border_mode = 'full'

            conv_out = T.nnet.conv.conv2d(X, self.W,
                                          border_mode=border_mode,
                                          subsample=self.subsample)
            if self.border_mode == 'same':
                shift_x = (self.filter_length - 1) // 2
                conv_out = conv_out[:, :, shift_x:X.shape[2] + shift_x, :]

        output = self.activation(conv_out + self.b.dimshuffle('x', 0, 'x', 'x'))
        output = T.reshape(output, (output.shape[0], output.shape[1], output.shape[2])).dimshuffle(0, 2, 1)
        return output
コード例 #9
0
    def get_output(self, train):
        X = self.get_input(train)
        border_mode = self.border_mode
        if dnn.dnn_available() and theano.config.device[:3] == 'gpu':
            if border_mode == 'same':
                assert(self.subsample == (1, 1))
                pad_x = (self.nb_row - self.subsample[0]) // 2
                pad_y = (self.nb_col - self.subsample[1]) // 2
                conv_out = dnn.dnn_conv(img=X,
                                        kerns=self.W,
                                        border_mode=(pad_x, pad_y))
            else:
                conv_out = dnn.dnn_conv(img=X,
                                        kerns=self.W,
                                        border_mode=border_mode,
                                        subsample=self.subsample)
        else:
            if border_mode == 'same':
                border_mode = 'full'

            conv_out = T.nnet.conv.conv2d(X, self.W,
                                          border_mode=border_mode,
                                          subsample=self.subsample)
            if self.border_mode == 'same':
                shift_x = (self.nb_row - 1) // 2
                shift_y = (self.nb_col - 1) // 2
                conv_out = conv_out[:, :, shift_x:X.shape[2] + shift_x, shift_y:X.shape[3] + shift_y]

        return self.activation(conv_out + self.b.dimshuffle('x', 0, 'x', 'x'))
コード例 #10
0
def discrim(X, w, w2, g2, b2, w3, g3, b3, wy, by):
    h0 = dropout(relu(dnn_conv(X, w, subsample=(2, 2), border_mode=(2, 2))), p=0.5)
    h1 = dropout(relu(batchnorm(dnn_conv(h0, w2, subsample=(2, 2), border_mode=(2, 2)), g=g2, b=b2)), p=0.5)
    h2 = dropout(relu(batchnorm(dnn_conv(h1, w3, subsample=(2, 2), border_mode=(2, 2)), g=g3, b=b3)), p=0.5)
    h2 = T.flatten(h2, 2)
    y = -relu(T.dot(h2, wy)+by)
    return y
コード例 #11
0
    def get_output(self, train=False):
        X = self.get_input(train)
        border_mode = self.border_mode
        if on_gpu() and dnn.dnn_available():
            if border_mode == 'same':
                assert (self.subsample == (1, 1))
                pad_x = (self.nb_row - self.subsample[0]) // 2
                pad_y = (self.nb_col - self.subsample[1]) // 2
                conv_out = dnn.dnn_conv(img=X,
                                        kerns=self.W,
                                        border_mode=(pad_x, pad_y))
            else:
                conv_out = dnn.dnn_conv(img=X,
                                        kerns=self.W,
                                        border_mode=border_mode,
                                        subsample=self.subsample)
        else:
            if border_mode == 'same':
                border_mode = 'full'
                assert (self.subsample == (1, 1))

            conv_out = T.nnet.conv.conv2d(X,
                                          self.W,
                                          border_mode=border_mode,
                                          subsample=self.subsample)
            if self.border_mode == 'same':
                shift_x = (self.nb_row - 1) // 2
                shift_y = (self.nb_col - 1) // 2
                conv_out = conv_out[:, :, shift_x:X.shape[2] + shift_x,
                                    shift_y:X.shape[3] + shift_y]

        return self.activation(conv_out + self.b.dimshuffle('x', 0, 'x', 'x'))
コード例 #12
0
ファイル: convolutional.py プロジェクト: rahulmohan/keras
    def get_output(self, train=False):
        X = self.get_input(train)
        border_mode = self.border_mode
        if on_gpu() and dnn.dnn_available():
            if border_mode == 'same':
                assert(self.subsample == (1, 1))
                pad_x = (self.nb_row - self.subsample[0]) // 2
                pad_y = (self.nb_col - self.subsample[1]) // 2
                conv_out = dnn.dnn_conv(img=X,
                                        kerns=self.W,
                                        border_mode=(pad_x, pad_y))
            else:
                conv_out = dnn.dnn_conv(img=X,
                                        kerns=self.W,
                                        border_mode=border_mode,
                                        subsample=self.subsample)
        else:
            if border_mode == 'same':
                border_mode = 'full'
                assert(self.subsample == (1, 1))

            conv_out = T.nnet.conv.conv2d(X, self.W,
                                          border_mode=border_mode,
                                          subsample=self.subsample,
                                          image_shape=self.input_shape,
                                          filter_shape=self.W_shape)
            if self.border_mode == 'same':
                shift_x = (self.nb_row - 1) // 2
                shift_y = (self.nb_col - 1) // 2
                conv_out = conv_out[:, :, shift_x:X.shape[2] + shift_x, shift_y:X.shape[3] + shift_y]

        return self.activation(conv_out + self.b.dimshuffle('x', 0, 'x', 'x'))
コード例 #13
0
 def apply(self, input):
     """
     Apply this discriminator module to the given input. This produces a
     collection of filter responses for feedforward and a spatial grid of
     discriminator outputs.
     """
     bm = int((self.filt_dim - 1) / 2) # use "same" mode convolutions
     ss = self.ds_stride               # stride for "learned downsampling"
     # apply first conv layer
     h1 = dnn_conv(input, self.w1, subsample=(1, 1), border_mode=(bm, bm))
     if self.apply_bn_1:
         h1 = batchnorm(h1, g=self.g1, b=self.b1)
     h1 = lrelu(h1)
     # apply second conv layer (may include downsampling)
     if self.use_pooling:
         h2 = dnn_conv(h1, self.w2, subsample=(1, 1), border_mode=(bm, bm))
         if self.apply_bn_2:
             h2 = batchnorm(h2, g=self.g2, b=self.b2)
         h2 = lrelu(h2)
         h2 = dnn_pool(h2, (ss,ss), stride=(ss, ss), mode='max', pad=(0, 0))
     else:
         h2 = dnn_conv(h1, self.w2, subsample=(ss, ss), border_mode=(bm, bm))
         if self.apply_bn_2:
             h2 = batchnorm(h2, g=self.g2, b=self.b2)
         h2 = lrelu(h2)
     
     # apply discriminator layer
     y = dnn_conv(h2, self.wd, subsample=(1, 1), border_mode=(bm, bm))
     y = sigmoid(T.flatten(y, 2)) # flatten to (batch_size, num_preds)
     return h2, y
コード例 #14
0
    def get_output_for(self, input, **kwargs):
        image = input[0]
        filters = input[1]

        conv_mode = 'conv' if self.flip_filters else 'cross'
        border_mode = self.pad
        if border_mode == 'same':
            border_mode = tuple(s // 2 for s in self.filter_size)
        filter_size = self.filter_size

        if self.grouping:
            filter_localexpand_np = np.reshape(np.eye(np.prod(filter_size), np.prod(filter_size)), (np.prod(filter_size), 1, filter_size[0], filter_size[1]))
            filter_localexpand = T.cast(theano.shared(filter_localexpand_np), 'floatX')

            outputs = []
            for i in range(3):
                input_localexpanded = dnn.dnn_conv(img=image[:,[i],:,:], kerns=filter_localexpand, subsample=self.stride, border_mode=border_mode, conv_mode=conv_mode)
                output = T.sum(input_localexpanded * filters, axis=1, keepdims=True)
                outputs.append(output)

            output = T.concatenate(outputs, axis=1)
        else:
            filter_localexpand_np = np.reshape(np.eye(np.prod(filter_size), np.prod(filter_size)), (np.prod(filter_size), filter_size[2], filter_size[0], filter_size[1]))
            filter_localexpand = T.cast(theano.shared(filter_localexpand_np), 'floatX')
            input_localexpanded = dnn.dnn_conv(img=image, kerns=filter_localexpand, subsample=self.stride, border_mode=border_mode, conv_mode=conv_mode)
            output = input_localexpanded * filters
            output = T.sum(output, axis=1, keepdims=True)

        return output
コード例 #15
0
def discrim(X, w, b, w2, g2, b2, w3, g3, b3, w4, g4, b4, wy, wy1):
    h0 = dnn_conv(X, w, subsample=(2, 2), border_mode=(2, 2))
    if args.db1:
        h0 += b.dimshuffle('x', 0, 'x', 'x')
    h1 = lrelu(h0)
    h1 = dropout(h1, args.dropout)
    h1 = dnn_conv(h1, w2, subsample=(2, 2), border_mode=(2, 2))
    if args.dbn:
        h1 = batchnorm(h1, g=g2, b=b2)
    else:
        h1 += b2.dimshuffle('x', 0, 'x', 'x')
    h2 = lrelu(h1)
    h2 = dropout(h2, args.dropout)
    h2 = dnn_conv(h2, w3, subsample=(2, 2), border_mode=(2, 2))
    if args.dbn:
        h2 = batchnorm(h2, g=g3, b=b3)
    else:
        h2 += b3.dimshuffle('x', 0, 'x', 'x')
    h3 = lrelu(h2)
    h3 = dropout(h3, args.dropout)
    h3 = dnn_conv(h3, w4, subsample=(2, 2), border_mode=(2, 2))
    if args.dbn:
        h3 = batchnorm(h3, g=g4, b=b4)
    else:
        h3 += b4.dimshuffle('x', 0, 'x', 'x')
    h4 = lrelu(h3)
    h4 = dropout(h4, args.dropout)
    h4 = T.flatten(h4, 2)
    y = sigmoid(T.dot(h4, wy))
    y1 = sigmoid(T.dot(h4, wy1))
    return y, y1
コード例 #16
0
def discrim(X, w, w2, g2, b2, w3, g3, b3, wy, by):
    h = lrelu(dnn_conv(X, w, subsample=(2, 2), border_mode=(2, 2)))
    h2 = lrelu(batchnorm(dnn_conv(h, w2, subsample=(2, 2), border_mode=(2, 2)), g=g2, b=b2))
    h3 = lrelu(batchnorm(dnn_conv(h2, w3, subsample=(2, 2), border_mode=(2, 2)), g=g3, b=b3))
    h3 = T.flatten(h3, 2)
    y = -softplus(T.dot(h3, wy)+by)
    return y
コード例 #17
0
ファイル: job.py プロジェクト: mehdidc/dcgan
 def discrim(X, w, w2, w3, wy):
     h = lrelu(dnn_conv(X, w, subsample=(2, 2), border_mode=(2, 2)))
     h2 = lrelu(batchnorm(dnn_conv(h, w2, subsample=(2, 2), border_mode=(2, 2))))
     h2 = T.flatten(h2, 2)
     h3 = lrelu(batchnorm(T.dot(h2, w3)))
     y = sigmoid(T.dot(h3, wy))
     return y
コード例 #18
0
ファイル: train_uncond_dcgan.py プロジェクト: igotyooo/dcgan
def discrim(X, w, w2, g2, b2, w3, g3, b3, w4, g4, b4, wy):
    h = lrelu(dnn_conv(X, w, subsample=(2, 2), border_mode=(2, 2)))
    h2 = lrelu(batchnorm(dnn_conv(h, w2, subsample=(2, 2), border_mode=(2, 2)), g=g2, b=b2))
    h3 = lrelu(batchnorm(dnn_conv(h2, w3, subsample=(2, 2), border_mode=(2, 2)), g=g3, b=b3))
    h4 = lrelu(batchnorm(dnn_conv(h3, w4, subsample=(2, 2), border_mode=(2, 2)), g=g4, b=b4))
    h4 = T.flatten(h4, 2)
    y = sigmoid(T.dot(h4, wy))
    return y
コード例 #19
0
 def feature_function(input_data, is_train=True):
     h0 = relu(batchnorm(X=dnn_conv(input_data, conv_w0, subsample=(2, 2), border_mode=(2, 2)), g=bn_w0, b=bn_b0))
     h1 = relu(batchnorm(X=dnn_conv(h0,         conv_w1, subsample=(2, 2), border_mode=(2, 2)), g=bn_w1, b=bn_b1))
     h2 = relu(batchnorm(X=dnn_conv(h1,         conv_w2, subsample=(2, 2), border_mode=(2, 2)), g=bn_w2, b=bn_b2))
     h3 = relu(batchnorm(X=dnn_conv(h2,         conv_w3, subsample=(2, 2), border_mode=(2, 2)), g=bn_w3, b=bn_b3))
     h3 = T.flatten(h3, 2)
     f  = tanh(T.dot(h3, linear_w4)+linear_b4)
     return f
コード例 #20
0
def discrim(X, w, w2, g2, b2, w3, g3, b3, wy, wa):
    h0 = dropout(relu(dnn_conv(X, w, subsample=(2, 2), border_mode=(2, 2))), p=0.5)
    h1 = dropout(relu(batchnorm(dnn_conv(h0, w2, subsample=(2, 2), border_mode=(2, 2)), g=g2, b=b2)), p=0.5)
    h2 = dropout(relu(batchnorm(dnn_conv(h1, w3, subsample=(2, 2), border_mode=(2, 2)), g=g3, b=b3)), p=0.5)
    h2 = T.flatten(h2, 2)
    y = square(T.dot(h2, wy))
    y = T.dot(T.log(1+y), T.exp(wa))
    return y
コード例 #21
0
ファイル: train_ced_mse_d_dd.py プロジェクト: igotyooo/dcgan
def domain_discrim( st, w1, w2, g2, b2, w3, g3, b3, w4, g4, b4, w5, g5, b5, w6 ):
    h1 = lrelu( dnn_conv( st, w1, subsample=( 2, 2 ), border_mode = ( 2, 2 ) ) )
    h2 = lrelu( batchnorm( dnn_conv( h1, w2, subsample = ( 2, 2 ), border_mode = ( 2, 2 ) ), g = g2, b = b2 ) )
    h3 = lrelu( batchnorm( dnn_conv( h2, w3, subsample = ( 2, 2 ), border_mode = ( 2, 2 ) ), g = g3, b = b3 ) )
    h4 = lrelu( batchnorm( dnn_conv( h3, w4, subsample = ( 2, 2 ), border_mode = ( 2, 2 ) ), g = g4, b = b4 ) )
    h5 = lrelu( batchnorm( dnn_conv( h4, w5, subsample = ( 1, 1 ), border_mode = ( 0, 0 ) ), g = g5, b = b5 ) )
    ydd = sigmoid( T.dot( T.flatten( h5, 2 ), w6 ) )
    return ydd
コード例 #22
0
ファイル: Script_dcgan.py プロジェクト: gkgsd/keras-dcgan
def discrim(X, w, w2, g2, b2, w3, g3, b3, w4, g4, b4, wy):
    h = lrelu(dnn_conv(X, w, subsample=(2, 2), border_mode=(2, 2)))
    h2 = lrelu(batchnorm(dnn_conv(h, w2, subsample=(2, 2), border_mode=(2, 2)), g=g2, b=b2))
    h3 = lrelu(batchnorm(dnn_conv(h2, w3, subsample=(2, 2), border_mode=(2, 2)), g=g3, b=b3))
    h4 = lrelu(batchnorm(dnn_conv(h3, w4, subsample=(2, 2), border_mode=(2, 2)), g=g4, b=b4))
    h4 = T.flatten(h4, 2)
    y = sigmoid(T.dot(h4, wy))
    return y
コード例 #23
0
 def feature_function(input_data, is_train=True):
     # layer 0 (conv)
     h0 = relu(dnn_conv(input_data, conv_w0, subsample=(2, 2), border_mode=(2, 2))+conv_b0.dimshuffle('x', 0, 'x', 'x'))
     # layer 1 (conv)
     h1 = relu(dnn_conv(        h0, conv_w1, subsample=(2, 2), border_mode=(2, 2))+conv_b1.dimshuffle('x', 0, 'x', 'x'))
     # layer 2 (conv)
     h2 = dnn_conv(        h1, conv_w2, subsample=(2, 2), border_mode=(2, 2))+conv_b2.dimshuffle('x', 0, 'x', 'x')
     feature = T.flatten(h2, 2)
     return feature
コード例 #24
0
    def get_hog(self, x_o):
        use_bin = self.use_bin
        NO = self.NO
        BS = self.BS
        nc = self.nc
        x = (x_o + sharedX(1)) / (sharedX(2))
        Gx = np.array([[-1, 0, 1], [-2, 0, 2], [-1, 0, 1]]) / 4.0
        Gy = Gx.T
        f1_w = []
        for i in range(NO):
            t = np.pi / NO * i
            g = np.cos(t) * Gx + np.sin(t) * Gy
            gg = np.tile(g[np.newaxis, np.newaxis, :, :], [1, 1, 1, 1])
            f1_w.append(gg)
        f1_w = np.concatenate(f1_w, axis=0)
        G = np.concatenate([
            Gx[np.newaxis, np.newaxis, :, :], Gy[np.newaxis, np.newaxis, :, :]
        ],
                           axis=0)
        G_f = sharedX(floatX(G))

        a = np.cos(np.pi / NO)
        l1 = sharedX(floatX(1 / (1 - a)))
        l2 = sharedX(floatX(a / (1 - a)))
        eps = sharedX(1e-3)
        if nc == 3:
            x_gray = T.mean(x, axis=1).dimshuffle(0, 'x', 1, 2)
        else:
            x_gray = x
        f1 = sharedX(floatX(f1_w))
        h0 = T.abs_(dnn_conv(x_gray, f1, subsample=(1, 1), border_mode=(1, 1)))
        g = dnn_conv(x_gray, G_f, subsample=(1, 1), border_mode=(1, 1))

        if use_bin:
            gx = g[:, [0], :, :]
            gy = g[:, [1], :, :]
            gg = T.sqrt(gx * gx + gy * gy + eps)
            hk = T.maximum(0, l1 * h0 - l2 * gg)

            bf_w = np.zeros((NO, NO, 2 * BS, 2 * BS))
            b = 1 - np.abs(
                (np.arange(1, 2 * BS + 1) - (2 * BS + 1.0) / 2.0) / BS)
            b = b[np.newaxis, :]
            bb = b.T.dot(b)
            for n in range(NO):
                bf_w[n, n] = bb

            bf = sharedX(floatX(bf_w))
            h_f = dnn_conv(hk,
                           bf,
                           subsample=(BS, BS),
                           border_mode=(BS / 2, BS / 2))
            return h_f
        else:
            return g
コード例 #25
0
    def get_output(self):
        # RECURSE
        inp, time, updates = self.incoming.get_output()

        # CALCULATE SYNAPTIC SUMMED INPUT
        border_mode = self.border_mode
        if on_gpu() and dnn.dnn_available():
            if border_mode == 'same':
                assert (self.subsample == (1, 1))
                pad_x = (self.nb_row - self.subsample[0]) // 2
                pad_y = (self.nb_col - self.subsample[1]) // 2
                conv_out = dnn.dnn_conv(img=inp,
                                        kerns=self.W,
                                        border_mode=(pad_x, pad_y))
            else:
                conv_out = dnn.dnn_conv(img=inp,
                                        kerns=self.W,
                                        border_mode=border_mode,
                                        subsample=self.subsample)
        else:
            if border_mode == 'same':
                border_mode = 'full'
            conv_out = T.nnet.conv.conv2d(inp,
                                          self.W,
                                          border_mode=border_mode,
                                          subsample=self.subsample)
            if self.border_mode == 'same':
                shift_x = (self.nb_row - 1) // 2
                shift_y = (self.nb_col - 1) // 2
                conv_out = conv_out[:, :, shift_x:inp.shape[2] + shift_x,
                                    shift_y:inp.shape[3] + shift_y]

        # UPDATE NEURONS
        #   Get impulse
        impulse = conv_out
        #   Destroy impulse if in refrac
        masked_imp = T.set_subtensor(
            impulse[(self.refrac_until > time).nonzero()], 0.)
        #   Add impulse
        new_mem = self.mem + masked_imp
        #   Store spiking
        output_spikes = new_mem > self.threshold
        #   Reset neuron
        new_and_reset_mem = T.set_subtensor(new_mem[output_spikes.nonzero()],
                                            0.)
        #   Store refractory
        new_refractory = T.set_subtensor(
            self.refrac_until[output_spikes.nonzero()], time + self.refractory)

        # Store updates
        updates.append((self.refrac_until, new_refractory))
        updates.append((self.mem, new_and_reset_mem))

        # Finish
        return (T.cast(output_spikes, 'float32'), time, updates)
コード例 #26
0
ファイル: generate.py プロジェクト: aaajiao/typeface
def discrim(X, Y, w, w2, w3, wy):
    yb = Y.dimshuffle(0, 1, 'x', 'x')
    X = conv_cond_concat(X, yb)
    h = lrelu(dnn_conv(X, w, subsample=(2, 2), border_mode=(2, 2)))
    h = conv_cond_concat(h, yb)
    h2 = lrelu(batchnorm(dnn_conv(h, w2, subsample=(2, 2), border_mode=(2, 2))))
    h2 = T.flatten(h2, 2)
    h2 = T.concatenate([h2, Y], axis=1)
    h3 = lrelu(batchnorm(T.dot(h2, w3)))
    h3 = T.concatenate([h3, Y], axis=1)
    y = sigmoid(T.dot(h3, wy))
    return y
コード例 #27
0
ファイル: theano_backend.py プロジェクト: sfwlily/keras
def conv2d(x, kernel, strides=(1, 1), border_mode='valid', dim_ordering='th'):
    '''
    Run on cuDNN if available.
    border_mode: string, "same" or "valid".
    '''
    if dim_ordering not in {'th', 'tf'}:
        raise Exception('Unknown dim_ordering ' + str(dim_ordering))

    if dim_ordering == 'tf':
        # TF uses the last dimension as channel dimension,
        # instead of the 2nd one.
        # TH input shape: (samples, input_depth, rows, cols)
        # TF input shape: (samples, rows, cols, input_depth)
        # TH kernel shape: (depth, input_depth, rows, cols)
        # TF kernel shape: (rows, cols, input_depth, depth)
        x = x.dimshuffle((0, 3, 1, 2))
        kernel = kernel.dimshuffle((3, 2, 0, 1))

    if _on_gpu() and dnn.dnn_available():
        if border_mode == 'same':
            assert(strides == (1, 1))
            np_kernel = kernel.eval()
            pad_x = (np_kernel.shape[2] - strides[0]) // 2
            pad_y = (np_kernel.shape[3] - strides[1]) // 2
            conv_out = dnn.dnn_conv(img=x,
                                    kerns=kernel,
                                    border_mode=(pad_x, pad_y))
        else:
            conv_out = dnn.dnn_conv(img=x,
                                    kerns=kernel,
                                    border_mode=border_mode,
                                    subsample=strides)
    else:
        if border_mode == 'same':
            th_border_mode = 'full'
            assert(strides == (1, 1))
        elif border_mode == 'valid':
            th_border_mode = 'valid'
        else:
            raise Exception('Border mode not supported: ' + str(border_mode))

        conv_out = T.nnet.conv.conv2d(x, kernel,
                                      border_mode=th_border_mode,
                                      subsample=strides)
        if border_mode == 'same':
            shift_x = (kernel.shape[2] - 1) // 2
            shift_y = (kernel.shape[3] - 1) // 2
            conv_out = conv_out[:, :,
                                shift_x:x.shape[2] + shift_x,
                                shift_y:x.shape[3] + shift_y]
    if dim_ordering == 'tf':
        conv_out = conv_out.dimshuffle((0, 2, 3, 1))
    return conv_out
コード例 #28
0
 def feature_function(input_data, is_train=True):
     # layer 0 (conv)
     h0 = relu(dnn_conv(input_data, conv_w0, subsample=(2, 2), border_mode=(2, 2))+conv_b0.dimshuffle('x', 0, 'x', 'x'))
     # layer 1 (conv)
     h1 = relu(dnn_conv(        h0, conv_w1, subsample=(2, 2), border_mode=(2, 2))+conv_b1.dimshuffle('x', 0, 'x', 'x'))
     # layer 2 (conv)
     h2 = relu(dnn_conv(        h1, conv_w2, subsample=(2, 2), border_mode=(2, 2))+conv_b2.dimshuffle('x', 0, 'x', 'x'))
     # layer 3 (conv)
     h3 = relu(dnn_conv(        h2, conv_w3, subsample=(2, 2), border_mode=(2, 2))+conv_b3.dimshuffle('x', 0, 'x', 'x'))
     h3 = T.flatten(h3, 2)
     f  = tanh(T.dot(h3, linear_w4)+linear_b4)
     return f
コード例 #29
0
ファイル: theano_backend.py プロジェクト: Kevinwenya/keras
def conv2d(x, kernel, strides=(1, 1), border_mode='valid', dim_ordering='th'):
    '''
    Run on cuDNN if available.
    border_mode: string, "same" or "valid".
    '''
    if dim_ordering not in {'th', 'tf'}:
        raise Exception('Unknown dim_ordering ' + str(dim_ordering))

    if dim_ordering == 'tf':
        # TF uses the last dimension as channel dimension,
        # instead of the 2nd one.
        # TH input shape: (samples, input_depth, rows, cols)
        # TF input shape: (samples, rows, cols, input_depth)
        # TH kernel shape: (depth, input_depth, rows, cols)
        # TF kernel shape: (rows, cols, input_depth, depth)
        x = x.dimshuffle((0, 3, 1, 2))
        kernel = kernel.dimshuffle((3, 2, 0, 1))

    if _on_gpu() and dnn.dnn_available():
        if border_mode == 'same':
            assert (strides == (1, 1))
            np_kernel = kernel.eval()
            pad_x = (np_kernel.shape[2] - strides[0]) // 2
            pad_y = (np_kernel.shape[3] - strides[1]) // 2
            conv_out = dnn.dnn_conv(img=x,
                                    kerns=kernel,
                                    border_mode=(pad_x, pad_y))
        else:
            conv_out = dnn.dnn_conv(img=x,
                                    kerns=kernel,
                                    border_mode=border_mode,
                                    subsample=strides)
    else:
        if border_mode == 'same':
            th_border_mode = 'full'
            assert (strides == (1, 1))
        elif border_mode == 'valid':
            th_border_mode = 'valid'
        else:
            raise Exception('Border mode not supported: ' + str(border_mode))

        conv_out = T.nnet.conv.conv2d(x,
                                      kernel,
                                      border_mode=th_border_mode,
                                      subsample=strides)
        if border_mode == 'same':
            shift_x = (kernel.shape[2] - 1) // 2
            shift_y = (kernel.shape[3] - 1) // 2
            conv_out = conv_out[:, :, shift_x:x.shape[2] + shift_x,
                                shift_y:x.shape[3] + shift_y]
    if dim_ordering == 'tf':
        conv_out = conv_out.dimshuffle((0, 2, 3, 1))
    return conv_out
コード例 #30
0
 def feature_function(input_data, is_train=True):
     # layer 0 (conv)
     h0 = relu(
         dnn_conv(input_data, conv_w0, subsample=(2, 2), border_mode=(2, 2)) + conv_b0.dimshuffle("x", 0, "x", "x")
     )
     # layer 1 (conv)
     h1 = relu(dnn_conv(h0, conv_w1, subsample=(2, 2), border_mode=(2, 2)) + conv_b1.dimshuffle("x", 0, "x", "x"))
     # layer 2 (conv)
     h2 = relu(dnn_conv(h1, conv_w2, subsample=(2, 2), border_mode=(2, 2)) + conv_b2.dimshuffle("x", 0, "x", "x"))
     # layer 3 (conv)
     h3 = tanh(dnn_conv(h2, conv_w3, subsample=(2, 2), border_mode=(2, 2)) + conv_b3.dimshuffle("x", 0, "x", "x"))
     feature = T.flatten(h3, 2)
     return feature
コード例 #31
0
def discrim(X, Y, w, w2, w3, wy):
    yb = Y.dimshuffle(0, 1, 'x', 'x')
    X = conv_cond_concat(X, yb)
    h = lrelu(dnn_conv(X, w, subsample=(2, 2), border_mode=(2, 2)))
    h = conv_cond_concat(h, yb)
    h2 = lrelu(batchnorm(dnn_conv(h, w2, subsample=(2, 2),
                                  border_mode=(2, 2))))
    h2 = T.flatten(h2, 2)
    h2 = T.concatenate([h2, Y], axis=1)
    h3 = lrelu(batchnorm(T.dot(h2, w3)))
    h3 = T.concatenate([h3, Y], axis=1)
    y = sigmoid(T.dot(h3, wy))
    return y
コード例 #32
0
def model(X,
    h2_u, h3_u,
    h2_s, h3_s,
    w, w2, g2, b2, w3, g3, b3, wy
    ):
    h = lrelu(dnn_conv(X, w, subsample=(2, 2), border_mode=(2, 2)))
    h2 = lrelu(batchnorm(dnn_conv(h, w2, subsample=(2, 2), border_mode=(2, 2)), g=g2, b=b2, u=h2_u, s=h2_s))
    h3 = lrelu(batchnorm(dnn_conv(h2, w3, subsample=(2, 2), border_mode=(2, 2)), g=g3, b=b3, u=h3_u, s=h3_s))
    h = T.flatten(dnn_pool(h, (4, 4), (4, 4), mode='max'), 2)
    h2 = T.flatten(dnn_pool(h2, (2, 2), (2, 2), mode='max'), 2)
    h3 = T.flatten(dnn_pool(h3, (1, 1), (1, 1), mode='max'), 2)
    f = T.concatenate([h, h2, h3], axis=1)
    return [f]
コード例 #33
0
def test_dnn_conv_inplace():
    """This test that we have inplace work correctly even when
    GpuAllocEmpty get merged together.

    """
    if not cuda.dnn.dnn_available():
        raise SkipTest(cuda.dnn.dnn_available.msg)
    img_shp = [2, 5, 6, 8]
    kern_shp = [3, 5, 5, 6]
    img = T.ftensor4('img')
    kern = T.ftensor4('kern')
    out = T.ftensor4('out')
    desc1 = dnn.GpuDnnConvDesc(border_mode='valid',
                               conv_mode='conv')(img.shape, kern.shape)
    desc2 = dnn.GpuDnnConvDesc(border_mode='valid',
                               conv_mode='cross')(img.shape, kern.shape)

    # Test forward op
    o1 = dnn.dnn_conv(img, kern, conv_mode='conv')
    o2 = dnn.dnn_conv(img, kern, conv_mode='cross')
    f = theano.function([img, kern], [o1, o2], mode=mode_with_gpu)
    d1, d2 = f(
        numpy.random.rand(*img_shp).astype('float32'),
        numpy.random.rand(*kern_shp).astype('float32'))
    topo = f.maker.fgraph.toposort()
    convs = [n for n in topo if isinstance(n.op, dnn.GpuDnnConv)]
    assert len(convs) == 2
    assert all([node.op.inplace for node in convs])
    assert len([n for n in topo if isinstance(n.op, GpuAllocEmpty)]) == 2

    # Test grad w op
    out = gpu_alloc_empty(*kern.shape)
    o1 = dnn.GpuDnnConvGradW()(img, kern, out, desc1)
    o2 = dnn.GpuDnnConvGradW()(img, kern, out, desc2)
    f = theano.function([img, kern], [o1, o2], mode=mode_with_gpu)
    topo = f.maker.fgraph.toposort()
    convs = [n for n in topo if isinstance(n.op, dnn.GpuDnnConvGradW)]
    assert len(convs) == 2
    assert all([node.op.inplace for node in convs])
    assert len([n for n in topo if isinstance(n.op, GpuAllocEmpty)]) == 2

    # Test grad i op
    out = gpu_alloc_empty(*img.shape)
    o1 = dnn.GpuDnnConvGradI()(img, kern, out, desc1)
    o2 = dnn.GpuDnnConvGradI()(img, kern, out, desc2)
    f = theano.function([img, kern], [o1, o2], mode=mode_with_gpu)
    topo = f.maker.fgraph.toposort()
    convs = [n for n in topo if isinstance(n.op, dnn.GpuDnnConvGradI)]
    assert len(convs) == 2
    assert all([node.op.inplace for node in convs])
    assert len([n for n in topo if isinstance(n.op, GpuAllocEmpty)]) == 2
コード例 #34
0
ファイル: dcgan_theano.py プロジェクト: hsab/iGAN
def predict_test(_x, _params, _batchnorm, n_layers=3):
    w = _params[0]
    h0 = lrelu(dnn_conv(_x, w, subsample=(2, 2), border_mode=(2, 2)))
    hs = [h0]
    for n in range(n_layers):
        hin = hs[-1]
        w, g, b = _params[1 + 3 * n:1 + 3 * (n + 1)]
        u = _batchnorm[n]
        s = _batchnorm[n + n_layers]
        hout = lrelu(batchnorm(dnn_conv(hin, w, subsample=(2, 2), border_mode=(2, 2)), u=u, s=s, g=g, b=b))
        hs.append(hout)
    h = T.flatten(hs[-1], 2)
    y = tanh(T.dot(h, _params[-1]))
    return y
コード例 #35
0
def bnorm_statistics(X, w, w2, g2, b2, w3, g3, b3, wy):
    h = lrelu(dnn_conv(X, w, subsample=(2, 2), border_mode=(2, 2)))

    h2 = dnn_conv(h, w2, subsample=(2, 2), border_mode=(2, 2))
    h2_u, h2_s = mean_and_var(h2)
    h2 = lrelu(batchnorm(h2, g=g2, b=b2))

    h3 = dnn_conv(h2, w3, subsample=(2, 2), border_mode=(2, 2))
    h3_u, h3_s = mean_and_var(h3)
    h3 = lrelu(batchnorm(h3, g=g3, b=b3))

    h_us = [h2_u, h3_u]
    h_ss = [h2_s, h3_s]
    return h_us, h_ss
コード例 #36
0
def conv2d(x, kernel, strides=(1, 1), border_mode="valid", dim_ordering="th", image_shape=None, filter_shape=None):
    """
    Run on cuDNN if available.
    border_mode: string, "same" or "valid".
    """
    if dim_ordering not in {"th", "tf"}:
        raise Exception("Unknown dim_ordering " + str(dim_ordering))

    if dim_ordering == "tf":
        # TF uses the last dimension as channel dimension,
        # instead of the 2nd one.
        # TH input shape: (samples, input_depth, rows, cols)
        # TF input shape: (samples, rows, cols, input_depth)
        # TH kernel shape: (depth, input_depth, rows, cols)
        # TF kernel shape: (rows, cols, input_depth, depth)
        x = x.dimshuffle((0, 3, 1, 2))
        kernel = kernel.dimshuffle((3, 2, 0, 1))
        if image_shape:
            image_shape = (image_shape[0], image_shape[3], image_shape[1], image_shape[2])
        if filter_shape:
            filter_shape = (filter_shape[3], filter_shape[2], filter_shape[0], filter_shape[1])

    if _on_gpu() and dnn.dnn_available():
        if border_mode == "same":
            assert strides == (1, 1)
            np_kernel = kernel.eval()
            pad_x = (np_kernel.shape[2] - strides[0]) // 2
            pad_y = (np_kernel.shape[3] - strides[1]) // 2
            conv_out = dnn.dnn_conv(img=x, kerns=kernel, border_mode=(pad_x, pad_y))
        else:
            conv_out = dnn.dnn_conv(img=x, kerns=kernel, border_mode=border_mode, subsample=strides)
    else:
        if border_mode == "same":
            th_border_mode = "full"
            assert strides == (1, 1)
        elif border_mode == "valid":
            th_border_mode = "valid"
        else:
            raise Exception("Border mode not supported: " + str(border_mode))

        conv_out = T.nnet.conv.conv2d(
            x, kernel, border_mode=th_border_mode, subsample=strides, image_shape=image_shape, filter_shape=filter_shape
        )
        if border_mode == "same":
            shift_x = (kernel.shape[2] - 1) // 2
            shift_y = (kernel.shape[3] - 1) // 2
            conv_out = conv_out[:, :, shift_x : x.shape[2] + shift_x, shift_y : x.shape[3] + shift_y]
    if dim_ordering == "tf":
        conv_out = conv_out.dimshuffle((0, 2, 3, 1))
    return conv_out
コード例 #37
0
def bnorm_statistics(X, w, w2, g2, b2, w3, g3, b3, wy):
    h = lrelu(dnn_conv(X, w, subsample=(2, 2), border_mode=(2, 2)))

    h2 = dnn_conv(h, w2, subsample=(2, 2), border_mode=(2, 2))
    h2_u, h2_s = mean_and_var(h2)
    h2 = lrelu(batchnorm(h2, g=g2, b=b2))

    h3 = dnn_conv(h2, w3, subsample=(2, 2), border_mode=(2, 2))
    h3_u, h3_s = mean_and_var(h3)
    h3 = lrelu(batchnorm(h3, g=g3, b=b3))

    h_us = [h2_u, h3_u]
    h_ss = [h2_s, h3_s]
    return h_us, h_ss
コード例 #38
0
    def get_output(self):
        # RECURSE
        inp, time, updates = self.incoming.get_output()

        # CALCULATE SYNAPTIC SUMMED INPUT
        border_mode = self.border_mode
        if on_gpu() and dnn.dnn_available():
            if border_mode == 'same':
                assert(self.subsample == (1, 1))
                pad_x = (self.nb_row - self.subsample[0]) // 2
                pad_y = (self.nb_col - self.subsample[1]) // 2
                conv_out = dnn.dnn_conv(img=inp,
                                        kerns=self.W,
                                        border_mode=(pad_x, pad_y))
            else:
                conv_out = dnn.dnn_conv(img=inp,
                                        kerns=self.W,
                                        border_mode=border_mode,
                                        subsample=self.subsample)
        else:
            if border_mode == 'same':
                border_mode = 'full'
            conv_out = T.nnet.conv.conv2d(inp, self.W,
                                          border_mode=border_mode,
                                          subsample=self.subsample)
            if self.border_mode == 'same':
                shift_x = (self.nb_row - 1) // 2
                shift_y = (self.nb_col - 1) // 2
                conv_out = conv_out[:, :, shift_x:inp.shape[2] + shift_x, shift_y:inp.shape[3] + shift_y]

        # UPDATE NEURONS
        #   Get impulse
        impulse = conv_out
        #   Destroy impulse if in refrac
        masked_imp = T.set_subtensor(impulse[(self.refrac_until>time).nonzero()], 0.)
        #   Add impulse
        new_mem = self.mem + masked_imp
        #   Store spiking
        output_spikes = new_mem > self.threshold
        #   Reset neuron
        new_and_reset_mem = T.set_subtensor(new_mem[output_spikes.nonzero()], 0.)
        #   Store refractory
        new_refractory = T.set_subtensor(self.refrac_until[output_spikes.nonzero()], time + self.refractory)

        # Store updates
        updates.append( (self.refrac_until, new_refractory) )
        updates.append( (self.mem, new_and_reset_mem) )

        # Finish
        return (T.cast(output_spikes,'float32'), time, updates)
コード例 #39
0
ファイル: dcgan_theano.py プロジェクト: jevenzh/iGAN
def disc_test(_x, _params, _batchnorm, n_layers=3):
    w = _params[0]
    h0 = lrelu(dnn_conv(_x, w, subsample=(2, 2), border_mode=(2, 2)))
    hs = [h0]
    for n in range(n_layers):
        hin = hs[-1]
        w, g, b = _params[1 + 3 * n:1 + 3 * (n + 1)]
        u = _batchnorm[n]
        s = _batchnorm[n + n_layers]
        hout = lrelu(batchnorm(dnn_conv(hin, w, subsample=(2, 2), border_mode=(2, 2)), u=u, s=s, g=g, b=b))
        hs.append(hout)
    h = T.flatten(hs[-1], 2)
    y = sigmoid(T.dot(h, _params[-1]))
    return y
コード例 #40
0
ファイル: test_dnn.py プロジェクト: ChienliMa/Theano
def test_dnn_conv_inplace():
    """This test that we have inplace work correctly even when
    GpuAllocEmpty get merged together.

    """
    if not cuda.dnn.dnn_available():
        raise SkipTest(cuda.dnn.dnn_available.msg)
    img_shp = [2, 5, 6, 8]
    kern_shp = [3, 5, 5, 6]
    img = T.ftensor4('img')
    kern = T.ftensor4('kern')
    out = T.ftensor4('out')
    desc1 = dnn.GpuDnnConvDesc(border_mode='valid', conv_mode='conv')(
        img.shape, kern.shape)
    desc2 = dnn.GpuDnnConvDesc(
        border_mode='valid', conv_mode='cross')(img.shape, kern.shape)

    # Test forward op
    o1 = dnn.dnn_conv(img, kern, conv_mode='conv')
    o2 = dnn.dnn_conv(img, kern, conv_mode='cross')
    f = theano.function([img, kern], [o1, o2], mode=mode_with_gpu)
    d1, d2 = f(numpy.random.rand(*img_shp).astype('float32'),
               numpy.random.rand(*kern_shp).astype('float32'))
    topo = f.maker.fgraph.toposort()
    convs = [n for n in topo if isinstance(n.op, dnn.GpuDnnConv)]
    assert len(convs) == 2
    assert all([node.op.inplace for node in convs])
    assert len([n for n in topo if isinstance(n.op, GpuAllocEmpty)]) == 2

    # Test grad w op
    out = gpu_alloc_empty(*kern.shape)
    o1 = dnn.GpuDnnConvGradW()(img, kern, out, desc1)
    o2 = dnn.GpuDnnConvGradW()(img, kern, out, desc2)
    f = theano.function([img, kern], [o1, o2], mode=mode_with_gpu)
    topo = f.maker.fgraph.toposort()
    convs = [n for n in topo if isinstance(n.op, dnn.GpuDnnConvGradW)]
    assert len(convs) == 2
    assert all([node.op.inplace for node in convs])
    assert len([n for n in topo if isinstance(n.op, GpuAllocEmpty)]) == 2

    # Test grad i op
    out = gpu_alloc_empty(*img.shape)
    o1 = dnn.GpuDnnConvGradI()(img, kern, out, desc1)
    o2 = dnn.GpuDnnConvGradI()(img, kern, out, desc2)
    f = theano.function([img, kern], [o1, o2], mode=mode_with_gpu)
    topo = f.maker.fgraph.toposort()
    convs = [n for n in topo if isinstance(n.op, dnn.GpuDnnConvGradI)]
    assert len(convs) == 2
    assert all([node.op.inplace for node in convs])
    assert len([n for n in topo if isinstance(n.op, GpuAllocEmpty)]) == 2
コード例 #41
0
 def apply(self, input, rand_vals=None):
     """
     Apply this generator module to some input.
     """
     batch_size = input.shape[0]
     bm = int((self.filt_dim - 1) / 2)  # use "same" mode convolutions
     ss = self.us_stride  # stride for "learned upsampling"
     if self.use_pooling:
         # "unpool" the input if desired
         input = input.repeat(ss, axis=2).repeat(ss, axis=3)
     # get shape for random values that will augment input
     rand_shape = (batch_size, self.rand_chans, input.shape[2],
                   input.shape[3])
     if self.use_rand:
         # augment input with random channels
         if rand_vals is None:
             if self.rand_type == 'normal':
                 rand_vals = self.rng.normal(size=rand_shape, avg=0.0, std=1.0, \
                                             dtype=theano.config.floatX)
             else:
                 rand_vals = self.rng.uniform(size=rand_shape, low=-1.0, high=1.0, \
                                              dtype=theano.config.floatX)
         rand_vals = rand_vals.reshape(rand_shape)
         # stack random values on top of input
         full_input = T.concatenate([rand_vals, input], axis=1)
     else:
         # don't augment input with random channels
         full_input = input
     # apply first convolution, perhaps with fractional striding
     if self.use_pooling:
         h1 = dnn_conv(full_input,
                       self.w1,
                       subsample=(1, 1),
                       border_mode=(bm, bm))
     else:
         # apply first conv layer (with fractional stride for upsampling)
         h1 = deconv(full_input,
                     self.w1,
                     subsample=(ss, ss),
                     border_mode=(bm, bm))
     if self.apply_bn_1:
         h1 = batchnorm(h1, g=self.g1, b=self.b1)
     h1 = relu(h1)
     # apply second conv layer
     h2 = dnn_conv(h1, self.w2, subsample=(1, 1), border_mode=(bm, bm))
     if self.apply_bn_2:
         h2 = batchnorm(h2, g=self.g2, b=self.b2)
     h2 = relu(h2)
     return h2
コード例 #42
0
ファイル: inisim.py プロジェクト: rsantana-isg/snn_toolbox
    def get_output(self):
        """Get output."""

        # Recurse
        inp, time, updates = get_input(self)
        if settings['payloads']:
            # Add payload from previous layer
            prev_layer = self.inbound_nodes[0].inbound_layers[0]
            inp = add_payloads(prev_layer, inp)

        if settings['online_normalization']:
            # Modify threshold if firing rate of layer too low
            updates.append((self.v_thresh, get_new_thresh(self, time)))

        # CALCULATE SYNAPTIC SUMMED INPUT
        border_mode = self.border_mode
        if on_gpu() and dnn.dnn_available():
            conv_mode = 'conv' if self.filter_flip else 'cross'
            if border_mode == 'same':
                assert (self.subsample == (1, 1))
                pad_x = (self.nb_row - self.subsample[0]) // 2
                pad_y = (self.nb_col - self.subsample[1]) // 2
                conv_out = dnn.dnn_conv(img=inp,
                                        kerns=self.W,
                                        border_mode=(pad_x, pad_y),
                                        conv_mode=conv_mode)
            else:
                conv_out = dnn.dnn_conv(img=inp,
                                        kerns=self.W,
                                        border_mode=border_mode,
                                        subsample=self.subsample,
                                        conv_mode=conv_mode)
        else:
            if border_mode == 'same':
                border_mode = 'full'
            conv_out = t.nnet.conv2d(inp,
                                     self.W,
                                     border_mode=border_mode,
                                     subsample=self.subsample,
                                     filter_flip=self.filter_flip)
            if self.border_mode == 'same':
                shift_x = (self.nb_row - 1) // 2
                shift_y = (self.nb_col - 1) // 2
                conv_out = conv_out[:, :, shift_x:inp.shape[2] + shift_x,
                                    shift_y:inp.shape[3] + shift_y]
        self.impulse = conv_out + k.reshape(self.b, (1, self.nb_filter, 1, 1))
        output_spikes = update_neurons(self, time, updates)
        self.updates = updates
        return t.cast(output_spikes, 'float32')
コード例 #43
0
def predict(_x, _params, n_layers=3):
    w = _params[0]
    h0 = lrelu(dnn_conv(_x, w, subsample=(2, 2), border_mode=(2, 2)))
    hs = [h0]
    for n in range(n_layers):
        hin = hs[-1]
        w, g, b = _params[1 + 3 * n:1 + 3 * (n + 1)]
        hout = lrelu(
            batchnorm(dnn_conv(hin, w, subsample=(2, 2), border_mode=(2, 2)),
                      g=g,
                      b=b))
        hs.append(hout)
    h = T.flatten(hs[-1], 2)
    y = tanh(T.dot(h, _params[-1]))
    return y
コード例 #44
0
def discrim_batchnorm(_x, _params, n_layers=3):
    w = _params[0]
    h0 = lrelu(dnn_conv(_x, w, subsample=(2, 2), border_mode=(2, 2)))
    hs = [h0]
    output = []
    for n in range(n_layers):
        hin = hs[-1]
        w, g, b = _params[1 + 3 * n:1 + 3 * (n + 1)]
        h_o = dnn_conv(hin, w, subsample=(2, 2), border_mode=(2, 2))
        hout = lrelu(batchnorm(h_o, g=g, b=b))
        hs.append(hout)
        output.append(h_o)
    h = T.flatten(hs[-1], 2)
    y = sigmoid(T.dot(h, _params[-1]))
    return y, output
コード例 #45
0
    def output(self, input):
        '''
        dnn
        '''
        if self.border_mode == 'same':
            conv_out = dnn.dnn_conv(
                img=input,
                kerns=self.W,
                subsample=(1, 1),
                border_mode=self.border,
                #conv_mode='cross'
            )
        else:
            raise Exception('Unknown conv type')

        # downsample each feature map individually, using maxpooling

        if self.poolsize[0] == 1 and self.poolsize[1] == 1:
            pooled_out = conv_out
        else:
            pooled_out = dnn.dnn_pool(img=conv_out,
                                      ws=self.poolsize,
                                      stride=self.poolsize,
                                      mode='max',
                                      pad=(0, 0))

        # add the bias term. Since the bias is a vector (1D array), we first
        # reshape it to a tensor of shape (1, n_filters, 1, 1). Each bias will
        # thus be broadcasted across mini-batches and feature map
        # width & height
        lin_output = pooled_out + self.b.dimshuffle('x', 0, 'x', 'x')
        return (lin_output
                if self.activation is None else self.activation(lin_output))
コード例 #46
0
 def encoder_function(input_data):
     # layer 0 (conv)
     h0 = dnn_conv(input_data, conv_w0, subsample=(2, 2), border_mode=(2, 2))
     h0 = relu(batchnorm(h0, g=bn_w0, b=bn_b0))
     # layer 1 (conv)
     h1 = dnn_conv(h0, conv_w1, subsample=(2, 2), border_mode=(2, 2))
     h1 = relu(batchnorm(h1, g=bn_w1, b=bn_b1))
     # layer 2 (conv)
     h2 = dnn_conv(h1, conv_w2, subsample=(2, 2), border_mode=(2, 2))
     h2 = relu(batchnorm(h2, g=bn_w2, b=bn_b2))
     # layer 3 (conv)
     h3 = dnn_conv(h2, conv_w3, subsample=(2, 2), border_mode=(2, 2))
     h3 = T.flatten(relu(batchnorm(h3, g=bn_w3, b=bn_b3)), 2)
     # layer output
     hidden_data = T.dot(h3, hidden_w) + hidden_b
     return hidden_data
コード例 #47
0
 def output(self):
     # conv_out = conv.conv2d( input=self.X, filters=self.W, filter_shape=self.filter_shape, subsample=self.subsample)
     conv_out = dnn_conv(self.X,
                         self.W,
                         border_mode=self.border_mode,
                         subsample=self.subsample)
     return conv_out + self.b.dimshuffle('x', 0, 'x', 'x')
コード例 #48
0
ファイル: deconv_layer.py プロジェクト: ybzhou/Gemini
    def fprop(self, x):
        
        # this is the forward direction as if we are going from bottom up
        dummy_v = theano.shared(numpy.zeros(self.outputShape, dtype='float32'))
        desc = cuDNN.GpuDnnConvDesc(border_mode=(self.nPad, self.nPad), 
                    subsample=(self.strideSize, self.strideSize),
                  )(dummy_v.shape, self.params.getParameter('W').shape)
        
        if self.weight_outside is None or self.weight_outside[1]==False:
            W = self.params.getParameter('W')
        else:
            W = self.params.getParameter('W').dimshuffle(1,0,2,3)
        
        z_hs = cuDNN.dnn_conv(
                    img = dummy_v,
                    kerns = W,
                    border_mode=(self.nPad, self.nPad),
                    subsample=(self.strideSize, self.strideSize),
                    algo = self.algo
                )
        # this is the real direction for deconv, which is just the opposite of
        # the true convolution
        conv_out = z_hs.owner.op.grad(
                  (dummy_v, self.params.getParameter('W'), 1, desc, 1, 1), 
                  (x,))
        conv_out = conv_out[0]

        conv_out += self.params.getParameter('b').dimshuffle('x', 0, 'x', 'x')
            
        conv_out = conv_out if self.actFunc is None else self.actFunc(conv_out)

        return conv_out
    
# End DeConvLayer
#-------------------------------------------------------------------------------
コード例 #49
0
 def _conv(self, w, b, img, pad=0, ):
     if None == w:
         return None
     out = dnn.dnn_conv(
         img=img, kerns=w, subsample=(1, 1), border_mode=pad)
     out += b.dimshuffle('x', 0, 'x', 'x')
     return out
コード例 #50
0
    def apply(self, input_):
        """Perform the convolution.

        Parameters
        ----------
        input_ : :class:`~tensor.TensorVariable`
            A 3D tensor with axes batch size, sequence, features

        Returns
        -------
        output : :class: `~tensor.TensorVariable`
            A 3D tensor of filtered sequences with axes batch size, sequence,
            filter map response
        """
        W, b = self.params
        shuffled = input_.dimshuffle(0, 2, 1, 'x')

        # batch_size, num_filters, x_map, 1
        #output = conv2d(
        #shuffled, W,
        ##filter_shape=(self.num_filters, self.input_dim, self.filter_length, 1),
        #subsample=(self.step, 1),
        #border_mode='valid')

        output = dnn_conv(
            shuffled,
            W,
            #filter_shape=(self.num_filters, self.input_dim, self.filter_length, 1),
            subsample=(self.step, 1),
            border_mode='valid')

        sequence_out = output[:, :, :, 0].dimshuffle(0, 2, 1)
        return sequence_out + b.dimshuffle('x', 0)
コード例 #51
0
def convlayer(tparams, state_below, options, index,
             prefix='rconv',
             activ='lambda x: tensor.tanh(x)',
             stride=None,trans_weights=False):

     kernel_shape = tparams[prefix+"_W"].get_value().shape[2]
     if kernel_shape == 5:
         if stride == 2 or stride == -2:
             padsize = 2
         else:
             padsize = 2
     elif kernel_shape == 1:
         padsize = 0
     else:
         raise Exception(kernel_shape)
     weights = tparams[prefix+'_W']
     if trans_weights:
         weights = weights.transpose(1,0,2,3)
     if stride == -2:
         conv_out = deconv(state_below,weights.transpose(1,0,2,3),subsample=(2,2), border_mode=(2,2))
     else:
         conv_out = dnn.dnn_conv(img=state_below,kerns=weights,subsample=(stride, stride),border_mode=padsize,precision='float32')
     conv_out = conv_out + tparams[prefix+'_b'].dimshuffle('x', 0, 'x', 'x')
     if prefix+"_newmu" in tparams:
         batch_norm = True
         #print "using batch norm for prefix", prefix
     else:
         batch_norm = False
     if batch_norm:
         conv_out = (conv_out - T.mean(conv_out, axis=(0,2,3), keepdims=True)) / (0.01 + T.std(conv_out, axis=(0,2,3), keepdims=True))
         conv_out = conv_out*tparams[prefix+'_newsigma'][index].dimshuffle('x',0,'x','x') + tparams[prefix+'_newmu'][index].dimshuffle('x',0,'x','x')
     conv_out = eval(activ)(conv_out)
     return conv_out
コード例 #52
0
ファイル: ConvolutionalLayer.py プロジェクト: alexmlamb/ali2
    def output(self, input):

        if self.unflatten_input != None:
            input = T.reshape(input, self.unflatten_input)

        W_shuffled = self.W.val.dimshuffle(3, 0, 1, 2)  # c01b to bc01

        conv_out = dnn.dnn_conv(img=input,
                                        kerns=W_shuffled,
                                        subsample=(self.convstride, self.convstride),
                                        border_mode=self.padsize)

        conv_out = conv_out + self.b.val.dimshuffle('x', 0, 'x', 'x')

        if self.batch_norm:
            conv_out = (conv_out - T.mean(conv_out, axis = (0,2,3), keepdims = True)) / (1.0 + T.std(conv_out, axis=(0,2,3), keepdims = True))
            conv_out = conv_out * T.addbroadcast(self.bn_std,0,2,3) + T.addbroadcast(self.bn_mean, 0,2,3)

        self.out_store = conv_out

        if self.activation == "relu":
            self.out = T.maximum(0.0, conv_out)
        elif self.activation == "tanh":
            self.out = T.tanh(conv_out)
        elif self.activation == None:
            self.out = conv_out

        #if self.residual:
        #    print "USING RESIDUAL"
        #    self.out += input


        return self.out
コード例 #53
0
def linear_layer(tensor, W, b):
    tensor = cudnn.dnn_conv(
        tensor,
        W[:, :, None, None],
    )
    tensor = tensor + b[None, :, None, None]
    return tensor
コード例 #54
0
    def output(self, input, n_batch=None):
        ###--- Unpool

        if self.poolsize[0] == 1 and self.poolsize[1] == 1:
            unpool_out = input
        else:
            unpool_out = Textra.repeat(Textra.repeat(
                input, self.poolsize[0], axis=2),
                                       self.poolsize[1],
                                       axis=3) * self.mask

        image_shape = list(self.image_shape)
        if n_batch is not None:
            image_shape[0] = n_batch

        ###--- Unpool + conv
        # convolve input feature maps with filters
        if self.border_mode == 'same':
            conv_out = dnn.dnn_conv(
                img=unpool_out,
                kerns=self.W,
                subsample=(1, 1),
                border_mode=self.border,
                #conv_mode='cross'
            )
        else:
            raise Exception('Unknown conv type')

        # add the bias term. Since the bias is a vector (1D array), we first
        # reshape it to a tensor of shape (1, n_filters, 1, 1). Each bias will
        # thus be broadcasted across mini-batches and feature map
        # width & height
        lin_output = conv_out + self.b.dimshuffle('x', 0, 'x', 'x')
        return (lin_output
                if self.activation is None else self.activation(lin_output))
コード例 #55
0
    def output(self, input):
        '''
        dnn
        '''
        if self.border_mode == 'same':
            conv_out = dnn.dnn_conv(
                img=input,
                kerns=self.W,
                subsample=(1, 1),
                border_mode=self.border,
                #conv_mode='cross'
            )
        else:
            raise Exception('Unknown conv type')

        # downsample each feature map individually, using maxpooling

        if self.poolsize[0] == 1 and self.poolsize[1] == 1:
            pooled_out = conv_out
        else:
            pooled_out = downsample.max_pool_2d(input=conv_out,
                                                ds=self.poolsize,
                                                ignore_border=True)
        if self.cnorm:
            print 'cnorm size', self.filter_shape[0] / 8 + 1
            pooled_out = ContrastCrossChannels.ContrastCrossChannels(
                input=pooled_out, n=self.filter_shape[0] / 8 + 1)

        # add the bias term. Since the bias is a vector (1D array), we first
        # reshape it to a tensor of shape (1, n_filters, 1, 1). Each bias will
        # thus be broadcasted across mini-batches and feature map
        # width & height
        lin_output = pooled_out + self.b.dimshuffle('x', 0, 'x', 'x')
        return (lin_output
                if self.activation is None else self.activation(lin_output))
コード例 #56
0
def test_dnn_conv_merge_mouts():
    # make sure it doesn't attempt to output/alpha merge a convolution
    # that has multiple clients.
    if not cuda.dnn.dnn_available():
        raise SkipTest(cuda.dnn.dnn_available.msg)
    img = T.ftensor4()
    kern = T.ftensor4()
    out = T.ftensor4()

    conv = dnn.dnn_conv(img, kern)

    lr = numpy.asarray(0.05, dtype='float32')

    if cuda.dnn.version() == -1:
        # Can't merge alpha with cudnn v1
        fr = conv + out
    else:
        fr = lr * (conv + out)
    rr = conv * lr

    f = theano.function([img, kern, out], [fr, rr], mode=mode_with_gpu)
    convs = [
        n for n in f.maker.fgraph.toposort()
        if isinstance(n.op, dnn.GpuDnnConv)
    ]
    assert len(convs) == 1
コード例 #57
0
ファイル: test_dnn.py プロジェクト: dapeng2018/Theano
def test_dnn_conv_merge():
    if not cuda.dnn.dnn_available():
        raise SkipTest(cuda.dnn.dnn_available.msg)
    img = T.ftensor4()
    kern = T.ftensor4()
    out = T.ftensor4()

    b = 1
    c = 4
    f = 3
    ih = 5
    iw = 8
    kh = 2
    kw = 6
    img_val = numpy.random.random((b, c, ih, iw)).astype("float32")
    kern_val = numpy.random.random((f, c, kh, kw)).astype("float32")
    out_val = numpy.random.random((b, f, ih - kh + 1, iw - kw + 1)).astype("float32")

    conv = dnn.dnn_conv(img, kern)
    gw = theano.grad(conv.sum(), kern)
    gi = theano.grad(conv.sum(), img)

    lr = numpy.asarray(0.05, dtype="float32")

    if cuda.dnn.version() == -1:
        # Can't merge alpha with cudnn v1
        fr = conv + out
        wr = kern + gw
        ir = img + gi
    else:
        fr = lr * (conv + out)
        wr = kern + lr * gw
        ir = img + lr * gi

    f1 = theano.function([img, kern, out], [fr, wr, ir], mode=mode_with_gpu)
    assert isinstance(f1.maker.fgraph.outputs[0].owner.inputs[0].owner.op, dnn.GpuDnnConv)
    assert isinstance(f1.maker.fgraph.outputs[1].owner.inputs[0].owner.op, dnn.GpuDnnConvGradW)
    assert isinstance(f1.maker.fgraph.outputs[2].owner.inputs[0].owner.op, dnn.GpuDnnConvGradI)

    mode = mode_with_gpu
    mode = mode.excluding("local_dnn_conv_alpha_merge")
    mode = mode.excluding("local_dnn_convw_alpha_merge")
    mode = mode.excluding("local_dnn_convi_alpha_merge")
    mode = mode.excluding("local_dnn_conv_output_merge")
    mode = mode.excluding("local_dnn_convw_output_merge")
    mode = mode.excluding("local_dnn_convi_output_merge")

    f2 = theano.function([img, kern, out], [fr, wr, ir], mode=mode)

    assert not isinstance(f2.maker.fgraph.outputs[0].owner.inputs[0].owner.op, dnn.GpuDnnConv)
    assert not isinstance(f2.maker.fgraph.outputs[1].owner.inputs[0].owner.op, dnn.GpuDnnConvGradW)
    assert not isinstance(f2.maker.fgraph.outputs[2].owner.inputs[0].owner.op, dnn.GpuDnnConvGradI)

    out_f1 = f1(img_val, kern_val, out_val)
    out_f2 = f2(img_val, kern_val, out_val)

    assert len(out_f1) == len(out_f2)

    for v1, v2 in zip(out_f1, out_f2):
        utt.assert_allclose(v1, v2)
コード例 #58
0
 def _output(self, input,  *args, **kwargs):
     if self.n_channel == self.n_in:
         return input
     out = dnn.dnn_conv(
         img=input, kerns=self.W, subsample=(1, 1), border_mode=0)
     out += self.b.dimshuffle('x', 0, 'x', 'x')
     r = T.concatenate([out, input], axis=1)
     return r
コード例 #59
0
    def conv(self, X, subsample=(2, 2), border_mode=(2, 2), atype='sigmoid'):

        ConH0 = dnn_conv(X,
                         self.W.dimshuffle(1, 0, 2, 3),
                         subsample=subsample,
                         border_mode=border_mode)
        return activation_fn_th(ConH0 + self.c.dimshuffle('x', 0, 'x', 'x'),
                                atype=atype)
コード例 #60
0
def deconv(X, w, b=None):
    z = dnn_conv(X,
                 w,
                 direction_hint="*not* 'forward!",
                 border_mode=int(np.floor(w.get_value().shape[-1] / 2.)))
    if b is not None:
        z += b.dimshuffle('x', 0, 'x', 'x')
    return z