예제 #1
0
def build_nips_dnn(n_actions, input_var):
    from lasagne.layers import dnn

    network = lasagne.layers.InputLayer(shape=(32, 4, 80, 80),
                                        input_var=input_var)

    network = dnn.Conv2DDNNLayer(
        network, num_filters=16, filter_size=(8, 8), stride=4,
        nonlinearity=lasagne.nonlinearities.rectify,
        W=lasagne.init.GlorotUniform())

    network = dnn.Conv2DDNNLayer(
        network, num_filters=32, filter_size=(4, 4), stride=2,
        nonlinearity=lasagne.nonlinearities.rectify)

    network = lasagne.layers.DenseLayer(
        network,
        num_units=256,
        nonlinearity=lasagne.nonlinearities.rectify)

    network = lasagne.layers.DenseLayer(
        network,
        num_units=n_actions,
        nonlinearity=None,
        W=lasagne.init.HeUniform(),
        b=lasagne.init.Constant(.1))

    return network
def build_DCNN_softmax_special(input_var_ipw = None,input_var_refl = None):
    
    print('Single layer conv net')
    # Define the input variable which is 4 frames of IPW fields and 4 frames of 
    # reflectivity fields
    l_in_ipw = lasagne.layers.InputLayer(shape = (None,4,33,33),
                                        input_var = input_var_ipw)
    
    l_in_refl = lasagne.layers.InputLayer(shape = (None,4,33,33),
                                        input_var = input_var_refl)
    
    
    l_conv1_ipw = dnn.Conv2DDNNLayer(
            l_in_ipw,
            num_filters=8,
            filter_size=(5, 5),
            stride=(2, 2),
            nonlinearity=lasagne.nonlinearities.rectify,
            W=lasagne.init.HeUniform(),
            b=lasagne.init.Constant(.1),
            pad = 'full'
        )
    
    conv_shape1 = lasagne.layers.get_output_shape(l_conv1_ipw)
    print conv_shape1
    
    l_conv1_refl = dnn.Conv2DDNNLayer(
            l_in_refl,
            num_filters=8,
            filter_size=(5, 5),
            stride=(2, 2),
            nonlinearity=lasagne.nonlinearities.rectify,
            W=lasagne.init.HeUniform(),
            b=lasagne.init.Constant(.1),
            pad = 'full'
        )
    
    conv_shape2 = lasagne.layers.get_output_shape(l_conv1_refl)
    
    print conv_shape2
    
    l_concat = lasagne.layers.concat([l_conv1_ipw,l_conv1_refl],axis = 1)
    concat_shape = lasagne.layers.get_output_shape(l_concat)
    
    print concat_shape
    
    l_hidden1 = lasagne.layers.DenseLayer(
            lasagne.layers.dropout(l_concat,p=0.2),
            num_units=2048,
            nonlinearity=lasagne.nonlinearities.rectify,
            W=lasagne.init.HeUniform(),
            b=lasagne.init.Constant(.1)
        )
        
    network = lasagne.layers.DenseLayer(
            l_hidden1,
            num_units=2,
            nonlinearity=lasagne.nonlinearities.softmax)
    
    return network,l_hidden1
예제 #3
0
    def build_nature_network_dnn(self, input_width, input_height, output_dim,
                                 num_frames, batch_size):
        """
        Build a large network consistent with the DeepMind Nature paper.
        """
        from lasagne.layers import dnn

        l_in = lasagne.layers.InputLayer(
            shape=(batch_size, num_frames, input_width, input_height)
        )

        l_conv1 = dnn.Conv2DDNNLayer(
            l_in,
            num_filters=32,
            filter_size=(8, 8),
            stride=(4, 4),
            nonlinearity=lasagne.nonlinearities.rectify,
            W=lasagne.init.HeUniform(),
            b=lasagne.init.Constant(.1)
        )

        l_conv2 = dnn.Conv2DDNNLayer(
            l_conv1,
            num_filters=64,
            filter_size=(4, 4),
            stride=(2, 2),
            nonlinearity=lasagne.nonlinearities.rectify,
            W=lasagne.init.HeUniform(),
            b=lasagne.init.Constant(.1)
        )

        l_conv3 = dnn.Conv2DDNNLayer(
            l_conv2,
            num_filters=64,
            filter_size=(3, 3),
            stride=(1, 1),
            nonlinearity=lasagne.nonlinearities.rectify,
            W=lasagne.init.HeUniform(),
            b=lasagne.init.Constant(.1)
        )

        l_hidden1 = lasagne.layers.DenseLayer(
            l_conv3,
            num_units=512,
            nonlinearity=lasagne.nonlinearities.rectify,
            W=lasagne.init.HeUniform(),
            b=lasagne.init.Constant(.1)
        )

        l_out = lasagne.layers.DenseLayer(
            l_hidden1,
            num_units=output_dim,
            nonlinearity=None,
            W=lasagne.init.HeUniform(),
            b=lasagne.init.Constant(.1)
        )

        return l_out
def build_model(batch_size=batch_size):
    l_in = nn.layers.InputLayer(shape=(batch_size,)+image.shape)
    l = l_in

    l = conv3(l, num_filters=64)
    l = conv3(l, num_filters=64)

    l = max_pool(l)

    l = conv3(l, num_filters=128)
    l = conv3(l, num_filters=128)

    l = max_pool(l)

    l = conv3(l, num_filters=256)
    l = conv3(l, num_filters=256)
    l = conv3(l, num_filters=256)

    l = max_pool(l)

    l = conv3(l, num_filters=512)
    l = conv3(l, num_filters=512)
    l = conv3(l, num_filters=512)

    l = max_pool(l)

    l = conv3(l, num_filters=512)
    l = conv3(l, num_filters=512)
    l = conv3(l, num_filters=512)

    l = max_pool(l)

    l = dnn.Conv2DDNNLayer(l,
                num_filters=4096,
                strides=(1, 1),
                border_mode="valid",
                filter_size=(7,7))
    l = dnn.Conv2DDNNLayer(l,
                num_filters=4096,
                strides=(1, 1),
                border_mode="same",
                filter_size=(1,1))

    l = dnn.Conv2DDNNLayer(l,
                num_filters=n_classes,
                strides=(1,1),
                border_mode="same",
                filter_size=(1,1),
                nonlinearity=None)

    l_to_strengthen = l
    l_out = l

    return utils.struct(
        input=l_in,
        out=l_out,
        to_strengthen=l_to_strengthen)
예제 #5
0
def build_nature_dnn(n_actions, input_var):
    from lasagne.layers import dnn

    l_in = lasagne.layers.InputLayer(
        shape=(32, 4, 80, 80),
        input_var=input_var
    )

    l_conv1 = dnn.Conv2DDNNLayer(
        l_in,
        num_filters=32,
        filter_size=(8, 8),
        stride=(4, 4),
        nonlinearity=lasagne.nonlinearities.rectify,
        W=lasagne.init.HeUniform(),
        b=lasagne.init.Constant(.1)
    )

    l_conv2 = dnn.Conv2DDNNLayer(
        l_conv1,
        num_filters=64,
        filter_size=(4, 4),
        stride=(2, 2),
        nonlinearity=lasagne.nonlinearities.rectify,
        W=lasagne.init.HeUniform(),
        b=lasagne.init.Constant(.1)
    )

    l_conv3 = dnn.Conv2DDNNLayer(
        l_conv2,
        num_filters=64,
        filter_size=(3, 3),
        stride=(1, 1),
        nonlinearity=lasagne.nonlinearities.rectify,
        W=lasagne.init.HeUniform(),
        b=lasagne.init.Constant(.1)
    )

    l_hidden1 = lasagne.layers.DenseLayer(
        l_conv3,
        num_units=512,
        nonlinearity=lasagne.nonlinearities.rectify,
        W=lasagne.init.HeUniform(),
        b=lasagne.init.Constant(.1)
    )

    l_out = lasagne.layers.DenseLayer(
        l_hidden1,
        num_units=n_actions,
        nonlinearity=None,
        W=lasagne.init.HeUniform(),
        b=lasagne.init.Constant(.1)
    )

    return l_out
예제 #6
0
    def build_nips_network_dnn(self, input_width, input_height, output_dim,
                               num_frames, batch_size):
        """
        Build a network consistent with the 2013 NIPS paper.
        """
        # Import it here, in case it isn't installed.
        from lasagne.layers import dnn

        l_in = lasagne.layers.InputLayer(
            shape=(batch_size, num_frames, input_width, input_height)
        )


        l_conv1 = dnn.Conv2DDNNLayer(
            l_in,
            num_filters=16,
            filter_size=(8, 8),
            stride=(4, 4),
            nonlinearity=lasagne.nonlinearities.rectify,
            #W=lasagne.init.HeUniform(),
            W=lasagne.init.Normal(.01),
            b=lasagne.init.Constant(.1)
        )

        l_conv2 = dnn.Conv2DDNNLayer(
            l_conv1,
            num_filters=32,
            filter_size=(4, 4),
            stride=(2, 2),
            nonlinearity=lasagne.nonlinearities.rectify,
            #W=lasagne.init.HeUniform(),
            W=lasagne.init.Normal(.01),
            b=lasagne.init.Constant(.1)
        )

        l_hidden1 = lasagne.layers.DenseLayer(
            l_conv2,
            num_units=256,
            nonlinearity=lasagne.nonlinearities.rectify,
            #W=lasagne.init.HeUniform(),
            W=lasagne.init.Normal(.01),
            b=lasagne.init.Constant(.1)
        )

        l_out = lasagne.layers.DenseLayer(
            l_hidden1,
            num_units=output_dim,
            nonlinearity=None,
            #W=lasagne.init.HeUniform(),
            W=lasagne.init.Normal(.01),
            b=lasagne.init.Constant(.1)
        )

        return l_out
예제 #7
0
	def build_nips_network_dnn(self, input_width, input_height, output_dim,
							   num_frames, batch_size):
		"""
		Build a network based on google atari deep learning paper
		"""

		from lasagne.layers import dnn

		l_in = lasagne.layers.InputLayer(
			shape=(batch_size, num_frames, input_width, input_height)
		)


		l_conv1 = dnn.Conv2DDNNLayer(
			l_in,
			num_filters=16,
			filter_size=(8, 8),
			stride=(4, 4),
			nonlinearity=lasagne.nonlinearities.rectify,
			#W=lasagne.init.HeUniform(),
			W=lasagne.init.Normal(.01),
			b=lasagne.init.Constant(.1)
		)

		l_conv2 = dnn.Conv2DDNNLayer(
			l_conv1,
			num_filters=32,
			filter_size=(4, 4),
			stride=(2, 2),
			nonlinearity=lasagne.nonlinearities.rectify,
			#W=lasagne.init.HeUniform(),
			W=lasagne.init.Normal(.01),
			b=lasagne.init.Constant(.1)
		)

		l_hidden1 = lasagne.layers.DenseLayer(
			l_conv2,
			num_units=256,
			nonlinearity=lasagne.nonlinearities.rectify,
			#W=lasagne.init.HeUniform(),
			W=lasagne.init.Normal(.01),
			b=lasagne.init.Constant(.1)
		)

		l_out = lasagne.layers.DenseLayer(
			l_hidden1,
			num_units=output_dim,
			nonlinearity=None,
			#W=lasagne.init.HeUniform(),
			W=lasagne.init.Normal(.01),
			b=lasagne.init.Constant(.1)
		)

		return l_out
예제 #8
0
    def build_DCNN_3_softmax(self, input_var=None):

        from lasagne.layers import dnn
        print('Training the softmax network!!')
        # Define the input variable which is 4 frames of IPW fields and 4 frames of
        # reflectivity fields
        l_in = lasagne.layers.InputLayer(shape=self.input_shape,
                                         input_var=input_var)

        l_conv1 = dnn.Conv2DDNNLayer(
            l_in,
            num_filters=32,
            filter_size=(5, 5),
            stride=(2, 2),
            nonlinearity=lasagne.nonlinearities.rectify,
            W=lasagne.init.HeUniform(),
            b=lasagne.init.Constant(.1))

        l_conv2 = dnn.Conv2DDNNLayer(
            l_conv1,
            num_filters=64,
            filter_size=(3, 3),
            stride=(2, 2),
            nonlinearity=lasagne.nonlinearities.rectify,
            W=lasagne.init.HeUniform(),
            b=lasagne.init.Constant(.1))

        l_conv3 = dnn.Conv2DDNNLayer(
            l_conv2,
            num_filters=64,
            filter_size=(3, 3),
            stride=(2, 2),
            nonlinearity=lasagne.nonlinearities.rectify,
            W=lasagne.init.HeUniform(),
            b=lasagne.init.Constant(.1))

        l_hidden1 = lasagne.layers.DenseLayer(
            l_conv3,
            num_units=2048,
            nonlinearity=lasagne.nonlinearities.rectify,
            W=lasagne.init.HeUniform(),
            b=lasagne.init.Constant(.1))

        l_out = lasagne.layers.DenseLayer(l_hidden1,
                                          num_units=1,
                                          nonlinearity=None,
                                          W=lasagne.init.HeUniform(),
                                          b=lasagne.init.Constant(.1))

        network = lasagne.layers.DenseLayer(
            l_out, num_units=6, nonlinearity=lasagne.nonlinearities.softmax)

        return network, l_hidden1
예제 #9
0
    def __init__(self, args):

        self.args = args

        rng = np.random.RandomState(self.args.seed) # fixed random seeds
        theano_rng = MRG_RandomStreams(rng.randint(2 ** 15))
        lasagne.random.set_rng(np.random.RandomState(rng.randint(2 ** 15)))
        data_rng = np.random.RandomState(self.args.seed_data)

        ''' specify pre-trained generator E '''
        self.enc_layers = [LL.InputLayer(shape=(None, 3, 32, 32), input_var=None)]
        enc_layer_conv1 = dnn.Conv2DDNNLayer(self.enc_layers[-1], 64, (5,5), pad=0, stride=1, W=Normal(0.01), nonlinearity=nn.relu)
        self.enc_layers.append(enc_layer_conv1)
        enc_layer_pool1 = LL.MaxPool2DLayer(self.enc_layers[-1], pool_size=(2, 2))
        self.enc_layers.append(enc_layer_pool1)
        enc_layer_conv2 = dnn.Conv2DDNNLayer(self.enc_layers[-1], 128, (5,5), pad=0, stride=1, W=Normal(0.01), nonlinearity=nn.relu)
        self.enc_layers.append(enc_layer_conv2)
        enc_layer_pool2 = LL.MaxPool2DLayer(self.enc_layers[-1], pool_size=(2, 2))
        self.enc_layers.append(enc_layer_pool2)
        self.enc_layer_fc3 = LL.DenseLayer(self.enc_layers[-1], num_units=256, nonlinearity=T.nnet.relu)
        self.enc_layers.append(self.enc_layer_fc3)
        self.enc_layer_fc4 = LL.DenseLayer(self.enc_layers[-1], num_units=10, nonlinearity=T.nnet.softmax)
        self.enc_layers.append(self.enc_layer_fc4)


        ''' load pretrained weights for encoder '''
        weights_toload = np.load('pretrained/encoder.npz')
        weights_list_toload = [weights_toload['arr_{}'.format(k)] for k in range(len(weights_toload.files))]
        LL.set_all_param_values(self.enc_layers[-1], weights_list_toload)


        ''' input tensor variables '''
        #self.G_weights
        #self.D_weights
        self.dummy_input = T.scalar()
        self.G_layers = []
        self.z = theano_rng.uniform(size=(self.args.batch_size, self.args.z0dim))
        self.x = T.tensor4()
        self.meanx = T.tensor3()
        self.Gen_x = T.tensor4() 
        self.D_layers = []
        self.D_layer_adv = [] 
        self.D_layer_z_recon = []
        self.gen_lr = T.scalar() # learning rate
        self.disc_lr = T.scalar() # learning rate
        self.y = T.ivector()
        self.y_1hot = T.matrix()
        self.Gen_x_list = []
        self.y_recon_list = []
        self.mincost = T.scalar()
        #self.enc_layer_fc3 = self.get_enc_layer_fc3()

        self.real_fc3 = LL.get_output(self.enc_layer_fc3, self.x, deterministic=True)
예제 #10
0
    def get_discriminator(self):
        ''' specify discriminator D0 '''
        """
        disc0_layers = [LL.InputLayer(shape=(self.args.batch_size, 3, 32, 32))]
        disc0_layers.append(LL.GaussianNoiseLayer(disc0_layers[-1], sigma=0.05))
        disc0_layers.append(dnn.Conv2DDNNLayer(disc0_layers[-1], 96, (3,3), pad=1, W=Normal(0.02), nonlinearity=nn.lrelu))
        disc0_layers.append(nn.batch_norm(dnn.Conv2DDNNLayer(disc0_layers[-1], 96, (3,3), pad=1, stride=2, W=Normal(0.02), nonlinearity=nn.lrelu))) # 16x16
        disc0_layers.append(LL.DropoutLayer(disc0_layers[-1], p=0.1))
        disc0_layers.append(nn.batch_norm(dnn.Conv2DDNNLayer(disc0_layers[-1], 192, (3,3), pad=1, W=Normal(0.02), nonlinearity=nn.lrelu)))
        disc0_layers.append(nn.batch_norm(dnn.Conv2DDNNLayer(disc0_layers[-1], 192, (3,3), pad=1, stride=2, W=Normal(0.02), nonlinearity=nn.lrelu))) # 8x8
        disc0_layers.append(LL.DropoutLayer(disc0_layers[-1], p=0.1))
        disc0_layers.append(nn.batch_norm(dnn.Conv2DDNNLayer(disc0_layers[-1], 192, (3,3), pad=0, W=Normal(0.02), nonlinearity=nn.lrelu))) # 6x6
        disc0_layer_shared = LL.NINLayer(disc0_layers[-1], num_units=192, W=Normal(0.02), nonlinearity=nn.lrelu) # 6x6
        disc0_layers.append(disc0_layer_shared)

        disc0_layer_z_recon = LL.DenseLayer(disc0_layer_shared, num_units=50, W=Normal(0.02), nonlinearity=None)
        disc0_layers.append(disc0_layer_z_recon) # also need to recover z from x

        disc0_layers.append(LL.GlobalPoolLayer(disc0_layer_shared))
        disc0_layer_adv = LL.DenseLayer(disc0_layers[-1], num_units=10, W=Normal(0.02), nonlinearity=None)
        disc0_layers.append(disc0_layer_adv)

        return disc0_layers, disc0_layer_adv, disc0_layer_z_recon
        """
        disc_x_layers = [LL.InputLayer(shape=(None, 3, 32, 32))]
        disc_x_layers.append(LL.GaussianNoiseLayer(disc_x_layers[-1], sigma=0.2))
        disc_x_layers.append(dnn.Conv2DDNNLayer(disc_x_layers[-1], 96, (3,3), pad=1, W=Normal(0.01), nonlinearity=nn.lrelu))
        disc_x_layers.append(nn.batch_norm(dnn.Conv2DDNNLayer(disc_x_layers[-1], 96, (3,3), pad=1, stride=2, W=Normal(0.01), nonlinearity=nn.lrelu)))
        disc_x_layers.append(LL.DropoutLayer(disc_x_layers[-1], p=0.5))
        disc_x_layers.append(nn.batch_norm(dnn.Conv2DDNNLayer(disc_x_layers[-1], 192, (3,3), pad=1, W=Normal(0.01), nonlinearity=nn.lrelu)))
        disc_x_layers.append(nn.batch_norm(dnn.Conv2DDNNLayer(disc_x_layers[-1], 192, (3,3), pad=1, stride=2, W=Normal(0.01), nonlinearity=nn.lrelu)))
        disc_x_layers.append(LL.DropoutLayer(disc_x_layers[-1], p=0.5))
        disc_x_layers.append(nn.batch_norm(dnn.Conv2DDNNLayer(disc_x_layers[-1], 192, (3,3), pad=0, W=Normal(0.01), nonlinearity=nn.lrelu)))
        disc_x_layers_shared = LL.NINLayer(disc_x_layers[-1], num_units=192, W=Normal(0.01), nonlinearity=nn.lrelu)
        disc_x_layers.append(disc_x_layers_shared)

        disc_x_layer_z_recon = LL.DenseLayer(disc_x_layers_shared, num_units=self.args.z0dim, nonlinearity=None)
        disc_x_layers.append(disc_x_layer_z_recon) # also need to recover z from x

        # disc_x_layers.append(nn.MinibatchLayer(disc_x_layers_shared, num_kernels=100))
        disc_x_layers.append(LL.GlobalPoolLayer(disc_x_layers_shared))
        disc_x_layer_adv = LL.DenseLayer(disc_x_layers[-1], num_units=10, W=Normal(0.01), nonlinearity=None)
        disc_x_layers.append(disc_x_layer_adv)

        #output_before_softmax_x = LL.get_output(disc_x_layer_adv, x, deterministic=False)
        #output_before_softmax_gen = LL.get_output(disc_x_layer_adv, gen_x, deterministic=False)

        # temp = LL.get_output(gen_x_layers[-1], deterministic=False, init=True)
        # temp = LL.get_output(disc_x_layers[-1], x, deterministic=False, init=True)
        # init_updates = [u for l in LL.get_all_layers(gen_x_layers)+LL.get_all_layers(disc_x_layers) for u in getattr(l,'init_updates',[])]
        return disc_x_layers, disc_x_layer_adv, disc_x_layer_z_recon
def build_2DCNN_softmax_special_refl(input_var_refl = None):
    
    print('2 CNN refl special')
    # Define the input variable which is 4 frames of IPW fields and 4 frames of 
    # reflectivity fields
#    l_in_ipw = lasagne.layers.InputLayer(shape = (None,4,33,33),
#                                        input_var = input_var_ipw)
    
    l_in_refl = lasagne.layers.InputLayer(shape = (None,4,33,33),
                                        input_var = input_var_refl)
    
    
    
    l_conv1_refl = dnn.Conv2DDNNLayer(
            l_in_refl,
            num_filters=8,
            filter_size=(5, 5),
            stride=(2, 2),
            nonlinearity=lasagne.nonlinearities.rectify,
            W=lasagne.init.HeUniform(),
            b=lasagne.init.Constant(.1),
            pad = 'full'
        )
    
    l_conv2_refl = dnn.Conv2DDNNLayer(
            l_conv1_refl,
            num_filters=8,
            filter_size=(5, 5),
            stride=(1, 1),
            nonlinearity=lasagne.nonlinearities.rectify,
            W=lasagne.init.HeUniform(),
            b=lasagne.init.Constant(.1),
            pad = 'valid'
        )
    
    
    l_hidden1 = lasagne.layers.DenseLayer(
            lasagne.layers.dropout(l_conv2_refl,p=0.2),
            num_units=2048,
            nonlinearity=lasagne.nonlinearities.rectify,
            W=lasagne.init.HeUniform(),
            b=lasagne.init.Constant(.1)
        )
        
    network = lasagne.layers.DenseLayer(
            l_hidden1,
            num_units=2,
            nonlinearity=lasagne.nonlinearities.softmax)
    
    return network,l_hidden1
def build_2layer_cnn_maxpool_2(input_var = None):
    
#    from lasagne.layers import Conv2DLayer, MaxPool2DLayer
    print('Training 2 layer CNN max-pool network!!')
    # Define the input variable which is 4 frames of IPW fields and 4 frames of 
    # reflectivity fields
    l_in = lasagne.layers.InputLayer(shape=(None,4,33,33),
                                        input_var=input_var)
                                        
    l_conv1 = dnn.Conv2DDNNLayer(
            l_in,
            num_filters=16,
            filter_size=(5, 5),
            stride=(1, 1),
            nonlinearity=lasagne.nonlinearities.rectify,
            W=lasagne.init.HeUniform(),
            b=lasagne.init.Constant(.1),
            pad = 'full'
        )
    l_maxpool1 = dnn.MaxPool2DDNNLayer(l_conv1,(2,2))
        
    l_conv2 = dnn.Conv2DDNNLayer(
            l_maxpool1,
            num_filters=32,
            filter_size=(5, 5),
            stride=(1, 1),
            nonlinearity=lasagne.nonlinearities.rectify,
            W=lasagne.init.HeUniform(),
            b=lasagne.init.Constant(.1),
            pad = 'full'
        )
    
    l_maxpool2 = dnn.MaxPool2DDNNLayer(l_conv2,(2,2))
        
    l_hidden1 = lasagne.layers.DenseLayer(
            lasagne.layers.dropout(l_maxpool2,p=0.4),
            num_units=2048,
            nonlinearity=lasagne.nonlinearities.rectify,
            W=lasagne.init.HeUniform(),
            b=lasagne.init.Constant(.1)
        )
        
    network = lasagne.layers.DenseLayer(
            l_hidden1,
            num_units=2,
            nonlinearity=lasagne.nonlinearities.softmax)
    
    return network,l_hidden1
def build_DCNN_softmax(input_var = None):
    
    print('Single layer conv net')
    # Define the input variable which is 4 frames of IPW fields and 4 frames of 
    # reflectivity fields
    l_in = lasagne.layers.InputLayer(shape=(None,4,33,33),
                                        input_var=input_var)
    
    l_conv1 = dnn.Conv2DDNNLayer(
            l_in,
            num_filters=16,
            filter_size=(5, 5),
            stride=(2, 2),
            nonlinearity=lasagne.nonlinearities.rectify,
            W=lasagne.init.HeUniform(),
            b=lasagne.init.Constant(.1),
            pad = 'full'
        )
        
    l_hidden1 = lasagne.layers.DenseLayer(
            lasagne.layers.dropout(l_conv1,p=0.2),
            num_units=2048,
            nonlinearity=lasagne.nonlinearities.rectify,
            W=lasagne.init.HeUniform(),
            b=lasagne.init.Constant(.1)
        )
        
    network = lasagne.layers.DenseLayer(
            l_hidden1,
            num_units=2,
            nonlinearity=lasagne.nonlinearities.softmax)
    
    return network,l_hidden1
예제 #14
0
    def build_DCNN(self, input_var=None):

        from lasagne.layers import dnn
        print 'We hit the GPU code!'
        # Define the input variable which is 4 frames of IPW fields and 4 frames of
        # reflectivity fields
        l_in = lasagne.layers.InputLayer(shape=self.input_shape,
                                         input_var=input_var)

        l_conv1 = dnn.Conv2DDNNLayer(
            l_in,
            num_filters=32,
            filter_size=(11, 11),
            stride=(2, 2),
            nonlinearity=lasagne.nonlinearities.rectify,
            W=lasagne.init.HeUniform(),
            b=lasagne.init.Constant(.1))

        l_hidden1 = lasagne.layers.DenseLayer(
            l_conv1,
            num_units=256,
            nonlinearity=lasagne.nonlinearities.rectify,
            W=lasagne.init.HeUniform(),
            b=lasagne.init.Constant(.1))

        l_out = lasagne.layers.DenseLayer(l_hidden1,
                                          num_units=1,
                                          nonlinearity=None,
                                          W=lasagne.init.HeUniform(),
                                          b=lasagne.init.Constant(.1))

        return l_out, l_hidden1
예제 #15
0
def build_model(input_width, input_height, output_dim,
                batch_size=BATCH_SIZE):
    l_in = lasagne.layers.InputLayer(
        shape=(batch_size, 1, input_width, input_height),
    )

    l_conv1 = dnn.Conv2DDNNLayer(
        l_in,
        num_filters=32,
        filter_size=(5, 5),
        nonlinearity=lasagne.nonlinearities.rectify,
        W=lasagne.init.GlorotUniform(),
    )
    l_pool1 = dnn.MaxPool2DDNNLayer(l_conv1, pool_size=(2, 2))

    l_conv2 = dnn.Conv2DDNNLayer(
        l_pool1,
        num_filters=32,
        filter_size=(5, 5),
        nonlinearity=lasagne.nonlinearities.rectify,
        W=lasagne.init.GlorotUniform(),
    )
    l_pool2 = dnn.MaxPool2DDNNLayer(l_conv2, pool_size=(2, 2))

    l_hidden1 = lasagne.layers.DenseLayer(
        l_pool2,
        num_units=256,
        nonlinearity=lasagne.nonlinearities.rectify,
        W=lasagne.init.GlorotUniform(),
    )

    l_hidden1_dropout = lasagne.layers.DropoutLayer(l_hidden1, p=0.5)

    l_out = lasagne.layers.DenseLayer(
        l_hidden1_dropout,
        num_units=output_dim,
        nonlinearity=lasagne.nonlinearities.softmax,
        W=lasagne.init.GlorotUniform(),
    )

    return l_out
예제 #16
0
 def _create_layer(self):
     """ Build a network consistent with the DeepMind Nature paper. """
     _logger.debug("Output shape = %d" % self.output_shape)
     l_in = lasagne.layers.InputLayer(shape=self.input_shape)
     l_conv1 = dnn.Conv2DDNNLayer(
         l_in,
         name='conv_layer_1',
         num_filters=32,
         filter_size=(8, 8),
         stride=(4, 4),
         nonlinearity=lasagne.nonlinearities.rectify,
         W=lasagne.init.HeUniform(),
         b=lasagne.init.Constant(.1))
     l_conv2 = dnn.Conv2DDNNLayer(
         l_conv1,
         num_filters=64,
         filter_size=(4, 4),
         stride=(2, 2),
         nonlinearity=lasagne.nonlinearities.rectify,
         W=lasagne.init.HeUniform(),
         b=lasagne.init.Constant(.1))
     l_conv3 = dnn.Conv2DDNNLayer(
         l_conv2,
         num_filters=64,
         filter_size=(3, 3),
         stride=(1, 1),
         nonlinearity=lasagne.nonlinearities.rectify,
         W=lasagne.init.HeUniform(),
         b=lasagne.init.Constant(.1))
     l_hidden1 = lasagne.layers.DenseLayer(
         l_conv3,
         num_units=512,
         nonlinearity=lasagne.nonlinearities.rectify,
         W=lasagne.init.HeUniform(),
         b=lasagne.init.Constant(.1))
     l_out = lasagne.layers.DenseLayer(l_hidden1,
                                       num_units=self.output_shape,
                                       nonlinearity=None,
                                       W=lasagne.init.HeUniform(),
                                       b=lasagne.init.Constant(.1))
     return l_out
def discriminator(input_var):
    network = lasagne.layers.InputLayer(shape=(None, 1, 28, 28),
                                        input_var=input_var)

    network = ll.DropoutLayer(network, p=0.5)

    network = nn.weight_norm(dnn.Conv2DDNNLayer(network, 64, (4,4), pad='valid', W=Normal(0.05), nonlinearity=nn.lrelu))

    network = nn.weight_norm(dnn.Conv2DDNNLayer(network, 32, (5,5), stride=2, pad='valid', W=Normal(0.05), nonlinearity=nn.lrelu))
    network = nn.weight_norm(dnn.Conv2DDNNLayer(network, 32, (5,5), pad='valid', W=Normal(0.05), nonlinearity=nn.lrelu))

    network = nn.weight_norm(dnn.Conv2DDNNLayer(network, 32, (5,5), pad='valid', W=Normal(0.05), nonlinearity=nn.lrelu))

    network = nn.weight_norm(dnn.Conv2DDNNLayer(network, 16, (3,3), pad='valid', W=Normal(0.05), nonlinearity=nn.lrelu))

    network =nn.weight_norm(ll.DenseLayer(network, num_units=1, W=Normal(0.05), nonlinearity=None), train_g=True, init_stdv=0.1)




    return network
예제 #18
0
def make_conv_bn_relu(l_in, n_filters, filter_size=3, stride=1, pad='same', groups=1):
    l = dnn.Conv2DDNNLayer(l_in, n_filters,
                 filter_size=filter_size,
                 pad='same',
                 stride = stride,
                 W=nn.init.HeNormal('relu'),
                 nonlinearity=None)
    #TODO bias should be disabled, but how?
    #TODO groups arg?
    l = bn(l)
    l = lasagne.layers.NonlinearityLayer(l, nonlinearity=lasagne.nonlinearities.rectify)
    return l
예제 #19
0
def conv_layer(input_,
               filter_size,
               num_filters,
               stride,
               pad,
               nonlinearity=relu,
               W=Normal(0.02),
               **kwargs):
    return dnn.Conv2DDNNLayer(input_,
                              num_filters=num_filters,
                              stride=parse_tuple(stride),
                              filter_size=parse_tuple(filter_size),
                              pad=pad,
                              W=W,
                              nonlinearity=nonlinearity,
                              **kwargs)
def build_DCNN_softmax_mod_special_refl(input_var_refl=None):

    print(
        'Single Convolution layer with only the reflectivity variable and 8 filters'
    )
    # Define the input variable which is 4 frames of IPW fields and 4 frames of
    # reflectivity fields
    #    l_in_ipw = lasagne.layers.InputLayer(shape = (None,4,33,33),
    #                                        input_var = input_var_ipw)

    l_in_refl = lasagne.layers.InputLayer(shape=(None, 4, 33, 33),
                                          input_var=input_var_refl)

    l_conv1_refl = dnn.Conv2DDNNLayer(
        l_in_refl,
        num_filters=8,
        filter_size=(5, 5),
        stride=(2, 2),
        nonlinearity=lasagne.nonlinearities.rectify,
        W=lasagne.init.HeUniform(),
        b=lasagne.init.Constant(.1),
        pad='full')

    conv_shape2 = lasagne.layers.get_output_shape(l_conv1_refl)

    print conv_shape2

    l_hidden1 = lasagne.layers.DenseLayer(
        lasagne.layers.dropout(l_conv1_refl, p=0.2),
        num_units=2048,
        nonlinearity=lasagne.nonlinearities.rectify,
        W=lasagne.init.HeUniform(),
        b=lasagne.init.Constant(.1))

    network = lasagne.layers.DenseLayer(l_hidden1,
                                        num_units=1,
                                        nonlinearity=None)

    return network, l_hidden1
예제 #21
0
def inceptionModule(input_layer, nfilters):
    inception_net = []
    inception_net.append(
        dnn.MaxPool2DDNNLayer(input_layer, pool_size=3, stride=1, pad=1))  #0
    inception_net.append(
        dnn.Conv2DDNNLayer(inception_net[-1],
                           nfilters[0],
                           1,
                           flip_filters=False))  #1

    inception_net.append(
        dnn.Conv2DDNNLayer(input_layer, nfilters[1], 1,
                           flip_filters=False))  #2

    inception_net.append(
        dnn.Conv2DDNNLayer(input_layer, nfilters[2], 1,
                           flip_filters=False))  #3
    inception_net.append(
        dnn.Conv2DDNNLayer(inception_net[-1],
                           nfilters[3],
                           3,
                           pad=1,
                           flip_filters=False))  #4

    inception_net.append(
        dnn.Conv2DDNNLayer(input_layer, nfilters[4], 1,
                           flip_filters=False))  #5
    inception_net.append(
        dnn.Conv2DDNNLayer(inception_net[-1],
                           nfilters[5],
                           5,
                           pad=2,
                           flip_filters=False))  #6

    inception_net.append(
        ll.ConcatLayer([
            inception_net[2],
            inception_net[4],
            inception_net[6],
            inception_net[1],
        ]))  #7

    return inception_net
예제 #22
0
    nn.weight_norm(nn.Deconv2DLayer(gen_layers[-1],
                                    (args.batch_size, 3, 32, 32), (5, 5),
                                    W=Normal(0.05),
                                    nonlinearity=T.tanh),
                   train_g=True,
                   init_stdv=0.1))  # 16 -> 32
gen_dat = ll.get_output(gen_layers[-1])

## same as the original net  the size in tempens  128 - 256
disc_layers = [ll.InputLayer(shape=(None, 3, 32, 32))]
disc_layers.append(ll.DropoutLayer(disc_layers[-1], p=0.2))
disc_layers.append(
    nn.weight_norm(
        dnn.Conv2DDNNLayer(disc_layers[-1],
                           128, (3, 3),
                           pad=1,
                           W=Normal(0.05),
                           nonlinearity=nn.lrelu)))
disc_layers.append(
    nn.weight_norm(
        dnn.Conv2DDNNLayer(disc_layers[-1],
                           128, (3, 3),
                           pad=1,
                           W=Normal(0.05),
                           nonlinearity=nn.lrelu)))
disc_layers.append(
    nn.weight_norm(
        dnn.Conv2DDNNLayer(disc_layers[-1],
                           128, (3, 3),
                           pad=1,
                           stride=2,
예제 #23
0
    print "Created folder {}".format(args.out_dir)
    shutil.copyfile(sys.argv[0], args.out_dir + '/training_script.py')
else:
    print "folder {} already exists. please remove it first.".format(
        args.out_dir)
    exit(1)

rng = np.random.RandomState(args.seed)  # fixed random seeds
theano_rng = MRG_RandomStreams(rng.randint(2**15))
lasagne.random.set_rng(np.random.RandomState(rng.randint(2**15)))
data_rng = np.random.RandomState(args.seed_data)
''' specify pre-trained encoder E '''
enc_layers = [LL.InputLayer(shape=(None, 3, 32, 32), input_var=None)]
enc_layer_conv1 = dnn.Conv2DDNNLayer(enc_layers[-1],
                                     64, (5, 5),
                                     pad=0,
                                     stride=1,
                                     W=Normal(0.01),
                                     nonlinearity=nn.relu)
enc_layers.append(enc_layer_conv1)
enc_layer_pool1 = LL.MaxPool2DLayer(enc_layers[-1], pool_size=(2, 2))
enc_layers.append(enc_layer_pool1)
enc_layer_conv2 = dnn.Conv2DDNNLayer(enc_layers[-1],
                                     128, (5, 5),
                                     pad=0,
                                     stride=1,
                                     W=Normal(0.01),
                                     nonlinearity=nn.relu)
enc_layers.append(enc_layer_conv2)
enc_layer_pool2 = LL.MaxPool2DLayer(enc_layers[-1], pool_size=(2, 2))
enc_layers.append(enc_layer_pool2)
enc_layer_fc3 = LL.DenseLayer(enc_layers[-1],
예제 #24
0
LEARNING_RATE_SCHEDULE = {
    0: 0.02,
    #150: 0.01,
    #200: 0.02,
    300: 0.01,
    400: 0.005,
    #450: 0.001
}

input = layers.InputLayer(shape=(BATCH_SIZE, 3, IMAGE_SIZE, IMAGE_SIZE))

slicerot = SliceRotateLayer(input)

conv1 = dnn.Conv2DDNNLayer(slicerot,
                           num_filters=64,
                           filter_size=(3, 3),
                           W=lasagne.init.Orthogonal(gain='relu'),
                           nonlinearity=leaky_relu)
pool1 = dnn.MaxPool2DDNNLayer(conv1, (3, 3), stride=(2, 2))

conv2_dropout = lasagne.layers.DropoutLayer(pool1, p=0.1)
conv2 = dnn.Conv2DDNNLayer(conv2_dropout,
                           num_filters=96,
                           filter_size=(3, 3),
                           W=lasagne.init.Orthogonal(gain='relu'),
                           nonlinearity=leaky_relu)
pool2 = dnn.MaxPool2DDNNLayer(conv2, (3, 3), stride=(2, 2))

conv3 = dnn.Conv2DDNNLayer(pool2,
                           num_filters=96,
                           filter_size=(3, 3),
예제 #25
0
    print "Created folder {}".format(args.out_dir)
    shutil.copyfile(sys.argv[0], args.out_dir + '/training_script.py')
else:
    print "folder {} already exists. please remove it first.".format(
        args.out_dir)
    exit(1)

rng = np.random.RandomState(args.seed)  # fixed random seeds
theano_rng = MRG_RandomStreams(rng.randint(2**15))
lasagne.random.set_rng(np.random.RandomState(rng.randint(2**15)))
data_rng = np.random.RandomState(args.seed_data)
''' specify pre-trained encoder E '''
enc_layers = [LL.InputLayer(shape=(None, 1, 28, 28), input_var=None)]
enc_layer_conv1 = dnn.Conv2DDNNLayer(enc_layers[-1],
                                     32, (5, 5),
                                     pad=0,
                                     stride=1,
                                     W=Normal(0.01),
                                     nonlinearity=nn.relu)
enc_layers.append(enc_layer_conv1)
enc_layer_pool1 = LL.MaxPool2DLayer(enc_layers[-1], pool_size=(2, 2))
enc_layers.append(enc_layer_pool1)
enc_layer_conv2 = dnn.Conv2DDNNLayer(enc_layers[-1],
                                     32, (5, 5),
                                     pad=0,
                                     stride=1,
                                     W=Normal(0.01),
                                     nonlinearity=nn.relu)
enc_layers.append(enc_layer_conv2)
enc_layer_pool2 = LL.MaxPool2DLayer(enc_layers[-1], pool_size=(2, 2))
enc_layers.append(enc_layer_pool2)
enc_layer_fc3 = LL.DenseLayer(enc_layers[-1],
예제 #26
0
### CNN ARCHITECTURE ####
#########################
#########################
""" VGG_ILSVRG_19_layers"""

data = lasagne.layers.InputLayer(shape=(10, 3, 224, 224))

###################
## Layer block 1 ##
###################

pad1_1 = lasagne.layers.PadLayer(data, width=1)

conv1_1 = dnn.Conv2DDNNLayer(pad1_1,
                             num_filters=64,
                             filter_size=3,
                             nonlinearity=lasagne.nonlinearities.rectify,
                             W=lasagne.init.GlorotUniform())

pad1_2 = lasagne.layers.PadLayer(conv1_1, width=1)

conv1_2 = dnn.Conv2DDNNLayer(pad1_2,
                             num_filters=64,
                             filter_size=3,
                             nonlinearity=lasagne.nonlinearities.rectify,
                             W=lasagne.init.GlorotUniform())

pool1 = dnn.MaxPool2DDNNLayer(conv1_2, pool_size=2, stride=2)

###################
## Layer block 2 ##
예제 #27
0
# specify generative model
noise_dim = (args.batch_size, 100)
noise = theano_rng.uniform(size=noise_dim)
gen_layers = [ll.InputLayer(shape=noise_dim, input_var=noise)]
gen_layers.append(nn.batch_norm(ll.DenseLayer(gen_layers[-1], num_units=4*4*512, W=Normal(0.05), nonlinearity=nn.relu), g=None))
gen_layers.append(ll.ReshapeLayer(gen_layers[-1], (args.batch_size,512,4,4)))
gen_layers.append(nn.batch_norm(nn.Deconv2DLayer(gen_layers[-1], (args.batch_size,256,8,8), (5,5), W=Normal(0.05), nonlinearity=nn.relu), g=None)) # 4 -> 8
gen_layers.append(nn.batch_norm(nn.Deconv2DLayer(gen_layers[-1], (args.batch_size,128,16,16), (5,5), W=Normal(0.05), nonlinearity=nn.relu), g=None)) # 8 -> 16
gen_layers.append(nn.weight_norm(nn.Deconv2DLayer(gen_layers[-1], (args.batch_size,3,32,32), (5,5), W=Normal(0.05), nonlinearity=T.tanh), train_g=True, init_stdv=0.1)) # 16 -> 32
gen_dat = ll.get_output(gen_layers[-1])

# specify discriminative model
disc_layers = [ll.InputLayer(shape=(None, 3, 32, 32))]
disc_layers.append(ll.DropoutLayer(disc_layers[-1], p=0.2))
disc_layers.append(nn.weight_norm(dnn.Conv2DDNNLayer(disc_layers[-1], 96, (3,3), pad=1, W=Normal(0.05), nonlinearity=nn.lrelu)))
disc_layers.append(nn.weight_norm(dnn.Conv2DDNNLayer(disc_layers[-1], 96, (3,3), pad=1, W=Normal(0.05), nonlinearity=nn.lrelu)))
disc_layers.append(nn.weight_norm(dnn.Conv2DDNNLayer(disc_layers[-1], 96, (3,3), pad=1, stride=2, W=Normal(0.05), nonlinearity=nn.lrelu)))
disc_layers.append(ll.DropoutLayer(disc_layers[-1], p=0.5))
disc_layers.append(nn.weight_norm(dnn.Conv2DDNNLayer(disc_layers[-1], 192, (3,3), pad=1, W=Normal(0.05), nonlinearity=nn.lrelu)))
disc_layers.append(nn.weight_norm(dnn.Conv2DDNNLayer(disc_layers[-1], 192, (3,3), pad=1, W=Normal(0.05), nonlinearity=nn.lrelu)))
disc_layers.append(nn.weight_norm(dnn.Conv2DDNNLayer(disc_layers[-1], 192, (3,3), pad=1, stride=2, W=Normal(0.05), nonlinearity=nn.lrelu)))
disc_layers.append(ll.DropoutLayer(disc_layers[-1], p=0.5))
disc_layers.append(nn.weight_norm(dnn.Conv2DDNNLayer(disc_layers[-1], 192, (3,3), pad=0, W=Normal(0.05), nonlinearity=nn.lrelu)))
disc_layers.append(nn.weight_norm(ll.NINLayer(disc_layers[-1], num_units=192, W=Normal(0.05), nonlinearity=nn.lrelu)))
disc_layers.append(nn.weight_norm(ll.NINLayer(disc_layers[-1], num_units=192, W=Normal(0.05), nonlinearity=nn.lrelu)))
disc_layers.append(ll.GlobalPoolLayer(disc_layers[-1]))
disc_layers.append(nn.weight_norm(ll.DenseLayer(disc_layers[-1], num_units=16, W=Normal(0.05), nonlinearity=None), train_g=True, init_stdv=0.1))
disc_params = ll.get_all_params(disc_layers, trainable=True)

x_temp = T.tensor4()
gen_layers.append(
    nn.weight_norm(nn.Deconv2DLayer(gen_layers[-1],
                                    (args.batch_size, 3, 32, 32), (5, 5),
                                    W=Normal(0.05),
                                    nonlinearity=T.tanh,
                                    name='g4'),
                   train_g=True,
                   init_stdv=0.1))  # 16 -> 32
gen_dat = ll.get_output(gen_layers[-1])

genz_layers = [x_input]
genz_layers.append(
    dnn.Conv2DDNNLayer(genz_layers[-1],
                       128, (3, 3),
                       pad=1,
                       stride=2,
                       W=Normal(0.05),
                       nonlinearity=nn.lrelu,
                       name='gz1'))
genz_layers.append(
    nn.batch_norm(dnn.Conv2DDNNLayer(genz_layers[-1],
                                     256, (3, 3),
                                     pad=1,
                                     stride=2,
                                     W=Normal(0.05),
                                     nonlinearity=nn.lrelu,
                                     name='gz2'),
                  g=None))
genz_layers.append(
    nn.batch_norm(dnn.Conv2DDNNLayer(genz_layers[-1],
                                     512, (3, 3),
예제 #29
0
gen_layers.append(nn.batch_norm(ll.DenseLayer(gen_layers[-1], num_units=4*4*512, W=Normal(0.05), nonlinearity=nn.relu, name='gen-01'), g=None, name='gen-02'))
gen_layers.append(ll.ReshapeLayer(gen_layers[-1], (-1,512,4,4), name='gen-03'))
gen_layers.append(ConvConcatLayer([gen_layers[-1], gen_in_y], num_classes, name='gen-10'))
gen_layers.append(nn.batch_norm(nn.Deconv2DLayer(gen_layers[-1], (None,256,8,8), (5,5), W=Normal(0.05), nonlinearity=nn.relu, name='gen-11'), g=None, name='gen-12')) # 4 -> 8
gen_layers.append(ConvConcatLayer([gen_layers[-1], gen_in_y], num_classes, name='gen-20'))
gen_layers.append(nn.batch_norm(nn.Deconv2DLayer(gen_layers[-1], (None,128,16,16), (5,5), W=Normal(0.05), nonlinearity=nn.relu, name='gen-21'), g=None, name='gen-22')) # 8 -> 16
gen_layers.append(ConvConcatLayer([gen_layers[-1], gen_in_y], num_classes, name='gen-30'))
gen_layers.append(nn.weight_norm(nn.Deconv2DLayer(gen_layers[-1], (None,3,32,32), (5,5), W=Normal(0.05), nonlinearity=gen_final_non, name='gen-31'), train_g=True, init_stdv=0.1, name='gen-32')) # 16 -> 32

# discriminator xy2p: test a pair of input comes from p(x, y) instead of p_c or p_g
dis_in_x = ll.InputLayer(shape=(None, in_channels) + dim_input)
dis_in_y = ll.InputLayer(shape=(None,))
dis_layers = [dis_in_x]
dis_layers.append(ll.DropoutLayer(dis_layers[-1], p=0.2, name='dis-00'))
dis_layers.append(ConvConcatLayer([dis_layers[-1], dis_in_y], num_classes, name='dis-01'))
dis_layers.append(nn.weight_norm(dnn.Conv2DDNNLayer(dis_layers[-1], 32, (3,3), pad=1, W=Normal(0.05), nonlinearity=nn.lrelu, name='dis-02'), name='dis-03'))
dis_layers.append(ConvConcatLayer([dis_layers[-1], dis_in_y], num_classes, name='dis-20'))
dis_layers.append(nn.weight_norm(dnn.Conv2DDNNLayer(dis_layers[-1], 32, (3,3), pad=1, stride=2, W=Normal(0.05), nonlinearity=nn.lrelu, name='dis-21'), name='dis-22'))
dis_layers.append(ll.DropoutLayer(dis_layers[-1], p=0.2, name='dis-23'))
dis_layers.append(ConvConcatLayer([dis_layers[-1], dis_in_y], num_classes, name='dis-30'))
dis_layers.append(nn.weight_norm(dnn.Conv2DDNNLayer(dis_layers[-1], 64, (3,3), pad=1, W=Normal(0.05), nonlinearity=nn.lrelu, name='dis-31'), name='dis-32'))
dis_layers.append(ConvConcatLayer([dis_layers[-1], dis_in_y], num_classes, name='dis-40'))
dis_layers.append(nn.weight_norm(dnn.Conv2DDNNLayer(dis_layers[-1], 64, (3,3), pad=1, stride=2, W=Normal(0.05), nonlinearity=nn.lrelu, name='dis-41'), name='dis-42'))
dis_layers.append(ll.DropoutLayer(dis_layers[-1], p=0.2, name='dis-43'))
dis_layers.append(ConvConcatLayer([dis_layers[-1], dis_in_y], num_classes, name='dis-50'))
dis_layers.append(nn.weight_norm(dnn.Conv2DDNNLayer(dis_layers[-1], 128, (3,3), pad=0, W=Normal(0.05), nonlinearity=nn.lrelu, name='dis-51'), name='dis-52'))
dis_layers.append(ConvConcatLayer([dis_layers[-1], dis_in_y], num_classes, name='dis-60'))
dis_layers.append(nn.weight_norm(dnn.Conv2DDNNLayer(dis_layers[-1], 128, (3,3), pad=0, W=Normal(0.05), nonlinearity=nn.lrelu, name='dis-61'), name='dis-62'))
dis_layers.append(ll.GlobalPoolLayer(dis_layers[-1], name='dis-63'))
dis_layers.append(MLPConcatLayer([dis_layers[-1], dis_in_y], num_classes, name='dis-70'))
dis_layers.append(nn.weight_norm(ll.DenseLayer(dis_layers[-1], num_units=1, W=Normal(0.05), nonlinearity=ln.sigmoid, name='dis-71'), train_g=True, init_stdv=0.1, name='dis-72'))
예제 #30
0
sym_x_eval = T.tensor4()
sym_lr = T.scalar()
sym_alpha_cla_g = T.scalar()
sym_alpha_unlabel_entropy = T.scalar()
sym_alpha_unlabel_average = T.scalar()

shared_unlabel = theano.shared(x_unlabelled, borrow=True)
slice_x_u_g = T.ivector()
slice_x_u_d = T.ivector()
slice_x_u_c = T.ivector()

# classifier x2y: p_c(x, y) = p(x) p_c(y | x)
cla_in_x = ll.InputLayer(shape=(None, in_channels) + dim_input)
cla_layers = [cla_in_x]
cla_layers.append(ll.DropoutLayer(cla_layers[-1], p=0.2, name='cla-00'))
cla_layers.append(ll.batch_norm(dnn.Conv2DDNNLayer(cla_layers[-1], 128, (3,3), pad=1, W=Normal(0.05), nonlinearity=nn.lrelu, name='cla-02'), name='cla-03'))
cla_layers.append(ll.batch_norm(dnn.Conv2DDNNLayer(cla_layers[-1], 128, (3,3), pad=1, W=Normal(0.05), nonlinearity=nn.lrelu, name='cla-11'), name='cla-12'))
cla_layers.append(ll.batch_norm(dnn.Conv2DDNNLayer(cla_layers[-1], 128, (3,3), pad=1, W=Normal(0.05), nonlinearity=nn.lrelu, name='cla-21'), name='cla-22'))
cla_layers.append(dnn.MaxPool2DDNNLayer(cla_layers[-1], pool_size=(2, 2)))
cla_layers.append(ll.DropoutLayer(cla_layers[-1], p=0.5, name='cla-23'))
cla_layers.append(ll.batch_norm(dnn.Conv2DDNNLayer(cla_layers[-1], 256, (3,3), pad=1, W=Normal(0.05), nonlinearity=nn.lrelu, name='cla-31'), name='cla-32'))
cla_layers.append(ll.batch_norm(dnn.Conv2DDNNLayer(cla_layers[-1], 256, (3,3), pad=1, W=Normal(0.05), nonlinearity=nn.lrelu, name='cla-41'), name='cla-42'))
cla_layers.append(ll.batch_norm(dnn.Conv2DDNNLayer(cla_layers[-1], 256, (3,3), pad=1, W=Normal(0.05), nonlinearity=nn.lrelu, name='cla-51'), name='cla-52'))
cla_layers.append(dnn.MaxPool2DDNNLayer(cla_layers[-1], pool_size=(2, 2)))
cla_layers.append(ll.DropoutLayer(cla_layers[-1], p=0.5, name='cla-53'))
cla_layers.append(ll.batch_norm(dnn.Conv2DDNNLayer(cla_layers[-1], 512, (3,3), pad=0, W=Normal(0.05), nonlinearity=nn.lrelu, name='cla-61'), name='cla-62'))
cla_layers.append(ll.batch_norm(ll.NINLayer(cla_layers[-1], num_units=256, W=Normal(0.05), nonlinearity=nn.lrelu, name='cla-71'), name='cla-72'))
cla_layers.append(ll.batch_norm(ll.NINLayer(cla_layers[-1], num_units=128, W=Normal(0.05), nonlinearity=nn.lrelu, name='cla-81'), name='cla-82'))
cla_layers.append(ll.GlobalPoolLayer(cla_layers[-1], name='cla-83'))
cla_layers.append(ll.batch_norm(ll.DenseLayer(cla_layers[-1], num_units=num_classes, W=Normal(0.05), nonlinearity=ln.softmax, name='cla-91'), name='cla-92'))