def __build_48_net__(self):

        model24 = self.subnet
        network = layers.InputLayer((None, 3, 48, 48),
                                    input_var=self.__input_var__)
        network = layers.Conv2DLayer(network,
                                     num_filters=64,
                                     filter_size=(5, 5),
                                     stride=1,
                                     nonlinearity=relu)
        network = layers.batch_norm(
            layers.MaxPool2DLayer(network, pool_size=(3, 3), stride=2))
        network = layers.Conv2DLayer(network,
                                     num_filters=64,
                                     filter_size=(5, 5),
                                     stride=1,
                                     nonlinearity=relu)
        network = layers.BatchNormLayer(network)
        network = layers.MaxPool2DLayer(network, pool_size=(3, 3), stride=2)
        network = layers.DenseLayer(network, num_units=256, nonlinearity=relu)
        #network = layers.Conv2DLayer(network,num_filters=256,filter_size=(1,1),stride=1,nonlinearity=relu)
        denselayer24 = model24.net.input_layer
        network = layers.ConcatLayer([network, denselayer24])
        network = layers.DenseLayer(network, num_units=2, nonlinearity=softmax)
        return network
Exemplo n.º 2
0
    def __build_48_net__(self):
        network = layers.InputLayer((None, 3, 48, 48),
                                    input_var=self.__input_var__)

        network = layers.Conv2DLayer(network,
                                     num_filters=64,
                                     filter_size=(5, 5),
                                     stride=1,
                                     nonlinearity=relu)
        network = layers.MaxPool2DLayer(network, pool_size=(3, 3), stride=2)
        network = layers.batch_norm(network)

        network = layers.Conv2DLayer(network,
                                     num_filters=64,
                                     filter_size=(5, 5),
                                     stride=1,
                                     nonlinearity=relu)
        network = layers.batch_norm(network)
        network = layers.MaxPool2DLayer(network, pool_size=(3, 3), stride=2)

        network = layers.Conv2DLayer(network,
                                     num_filters=64,
                                     filter_size=(3, 3),
                                     stride=1,
                                     nonlinearity=relu)
        network = layers.batch_norm(network)
        network = layers.MaxPool2DLayer(network, pool_size=(3, 3), stride=2)

        network = layers.DenseLayer(network, num_units=256, nonlinearity=relu)
        network = layers.DenseLayer(network, num_units=2, nonlinearity=softmax)
        return network
Exemplo n.º 3
0
 def _forward(self):
     net = {}
     net['input'] = layers.InputLayer(shape=(None, 1, 28, 28),
                                      input_var=self.X)
     net['conv1'] = layers.Conv2DLayer(net['input'],
                                       32, (3, 3),
                                       W=init.Orthogonal(),
                                       pad=1)
     net['pool1'] = layers.MaxPool2DLayer(net['conv1'], (2, 2),
                                          stride=(2, 2))
     net['conv2'] = layers.Conv2DLayer(net['pool1'],
                                       64, (3, 3),
                                       W=init.Orthogonal(),
                                       pad=1)
     net['pool2'] = layers.MaxPool2DLayer(net['conv2'], (2, 2),
                                          stride=(2, 2))
     net['conv3'] = layers.Conv2DLayer(net['pool2'],
                                       128, (3, 3),
                                       W=init.Orthogonal(),
                                       pad=1)
     net['conv4'] = layers.Conv2DLayer(net['conv3'],
                                       128, (3, 3),
                                       W=init.Orthogonal(),
                                       pad=1)
     net['pool3'] = layers.MaxPool2DLayer(net['conv4'], (2, 2),
                                          stride=(2, 2))
     net['flatten'] = layers.FlattenLayer(net['pool3'])
     net['out'] = layers.DenseLayer(net['flatten'],
                                    10,
                                    b=None,
                                    nonlinearity=nonlinearities.softmax)
     return net
Exemplo n.º 4
0
def getNet6():
  inputLayer = layers.InputLayer(shape=(None, 1, imageShape[0], imageShape[1])) #120x120

  conv1Layer = layers.Conv2DLayer(inputLayer, num_filters=32, filter_size=(3,3), pad=(1,1), W=HeNormal('relu'), nonlinearity=rectify) #120x120
  conv2Layer = layers.Conv2DLayer(conv1Layer, num_filters=32, filter_size=(3,3), pad=(1,1), W=HeNormal('relu'), nonlinearity=rectify) #120x120
  pool1Layer = layers.MaxPool2DLayer(conv2Layer, pool_size=(2,2)) #60x60
  conv3Layer = layers.Conv2DLayer(pool1Layer, num_filters=64, filter_size=(3,3), pad=(1,1), W=HeNormal('relu'), nonlinearity=rectify) #60x60
  conv4Layer = layers.Conv2DLayer(conv3Layer, num_filters=64, filter_size=(3,3), pad=(1,1), W=HeNormal('relu'), nonlinearity=rectify) #60x60
  conv5Layer = layers.Conv2DLayer(conv4Layer, num_filters=64, filter_size=(3,3), pad=(1,1), W=HeNormal('relu'), nonlinearity=rectify) #60x60
  pool2Layer = layers.MaxPool2DLayer(conv5Layer, pool_size=(2,2)) #30x30
  conv6Layer = layers.Conv2DLayer(pool2Layer, num_filters=128, filter_size=(3,3), pad=(1,1), W=HeNormal('relu'), nonlinearity=rectify) #30x30
  conv7Layer = layers.Conv2DLayer(conv6Layer, num_filters=128, filter_size=(3,3), pad=(1,1), W=HeNormal('relu'), nonlinearity=rectify) #30x30
  conv8Layer = layers.Conv2DLayer(conv7Layer, num_filters=128, filter_size=(3,3), pad=(1,1), W=HeNormal('relu'), nonlinearity=rectify) #30x30
  pool3Layer = layers.MaxPool2DLayer(conv8Layer, pool_size=(2,2)) #15x15
  conv9Layer = layers.Conv2DLayer(pool3Layer, num_filters=256, filter_size=(4,4), W=HeNormal('relu'), nonlinearity=rectify) #12x12
  flattenLayer = layers.FlattenLayer(conv9Layer)
  hidden1Layer = layers.DenseLayer(flattenLayer, num_units=1024, W=HeNormal('relu'), nonlinearity=rectify)
  dropout1Layer = layers.DropoutLayer(hidden1Layer, p=0.5)
  hidden2Layer = layers.DenseLayer(dropout1Layer, num_units=512, W=HeNormal('relu'), nonlinearity=rectify)
  dropout2Layer = layers.DropoutLayer(hidden2Layer, p=0.5)
  hidden3Layer = layers.DenseLayer(dropout2Layer, num_units=256, W=HeNormal('relu'), nonlinearity=rectify)
  dropout3Layer = layers.DropoutLayer(hidden3Layer, p=0.5)
  hidden4Layer = layers.DenseLayer(dropout3Layer, num_units=128, W=HeNormal('relu'), nonlinearity=rectify)
  outputLayer = layers.DenseLayer(hidden4Layer, num_units=10, W=HeNormal('relu'), nonlinearity=softmax)
  return outputLayer
Exemplo n.º 5
0
def build_auto_encoder_mnist_cnn(input_var=None):
    """
    Generate an auto-encoder cnn using the Lasagne library
    """
    # Build encoder part
    network = lyr.InputLayer(shape=(None, 1, 28, 28), input_var=input_var)
    network = lyr.Conv2DLayer(network, 64, (5, 5), W=lasagne.init.Normal())
    network = lyr.MaxPool2DLayer(network, (2, 2))
    network = lyr.Conv2DLayer(network, 128, (5, 5), W=lasagne.init.Normal())
    network = lyr.MaxPool2DLayer(network, (2, 2))
    network = lyr.FlattenLayer(network)

    network = lyr.DenseLayer(network, 2048, W=lasagne.init.Normal())
    network = lyr.ReshapeLayer(network, (input_var.shape[0], 2048, 1, 1))

    # Build decoder part
    network = lyr.TransposedConv2DLayer(network,
                                        128, (5, 5),
                                        W=lasagne.init.Normal())
    network = lyr.Upscale2DLayer(network, (2, 2))
    network = lyr.TransposedConv2DLayer(network,
                                        64, (4, 4),
                                        W=lasagne.init.Normal())
    network = lyr.Upscale2DLayer(network, (2, 2))
    network = lyr.TransposedConv2DLayer(network,
                                        1, (3, 3),
                                        W=lasagne.init.Normal(),
                                        nonlinearity=None)

    return network
Exemplo n.º 6
0
def buildModel():

    #this is our input layer with the inputs (None, dimensions, width, height)
    l_input = layers.InputLayer((None, 3, 64, 64))

    #first convolutional layer, has l_input layer as incoming and is followed by a pooling layer
    l_conv1 = layers.Conv2DLayer(l_input, num_filters=32, filter_size=3, pad='same', nonlinearity=tanh)
    l_pool1 = layers.MaxPool2DLayer(l_conv1, pool_size=2)

    #second convolution (l_pool1 is incoming), let's increase the number of filters
    l_conv2 = layers.Conv2DLayer(l_pool1, num_filters=64, filter_size=3, pad='same', nonlinearity=tanh)
    l_pool2 = layers.MaxPool2DLayer(l_conv2, pool_size=2)

    #third convolution (l_pool2 is incoming), even more filters
    l_conv3 = layers.Conv2DLayer(l_pool2, num_filters=128, filter_size=3, pad='same', nonlinearity=tanh)
    l_pool3 = layers.MaxPool2DLayer(l_conv3, pool_size=2)

    #fourth and final convolution
    l_conv4 = layers.Conv2DLayer(l_pool3, num_filters=256, filter_size=3, pad='same', nonlinearity=tanh)
    l_pool4 = layers.MaxPool2DLayer(l_conv4, pool_size=2)

    #our cnn contains 3 dense layers, one of them is our output layer
    l_dense1 = layers.DenseLayer(l_pool4, num_units=128, nonlinearity=tanh)
    l_dense2 = layers.DenseLayer(l_dense1, num_units=128, nonlinearity=tanh)

    #the output layer has 6 units which is exactly the count of our class labels
    #it has a softmax activation function, its values represent class probabilities
    l_output = layers.DenseLayer(l_dense2, num_units=6, nonlinearity=softmax)

    #let's see how many params our net has
    print ("MODEL HAS"+ str(layers.count_params(l_output))+" PARAMS")

    #we return the layer stack as our network by returning the last layer
    return l_output
Exemplo n.º 7
0
def create_network(npochs):
    l_in = layers.InputLayer((None, 1, 200, 250))
    l_conv1 = layers.Conv2DLayer(l_in, num_filters=32, filter_size=(3, 3))
    l_pool1 = layers.MaxPool2DLayer(l_conv1, pool_size=(2, 2))
    l_drop1 = layers.DropoutLayer(l_pool1, p=0.1)
    l_conv2 = layers.Conv2DLayer(l_drop1, num_filters=64, filter_size=(2, 2))
    l_pool2 = layers.MaxPool2DLayer(l_conv2, pool_size=(2, 2))
    l_drop2 = layers.DropoutLayer(l_pool2, p=0.2)
    l_conv3 = layers.Conv2DLayer(l_drop2, num_filters=128, filter_size=(2, 2))
    l_pool3 = layers.MaxPool2DLayer(l_conv3, pool_size=(2, 2))
    l_drop3 = layers.DropoutLayer(l_pool3, p=0.3)
    l_den1 = layers.DenseLayer(l_drop3, num_units=1000)
    l_drop4 = layers.DropoutLayer(l_den1, p=0.5)
    l_den2 = layers.DenseLayer(l_drop4, num_units=1000)
    l_output = layers.DenseLayer(l_den2, num_units=8, nonlinearity=None)
    net = NeuralNet(
        layers=l_output,
        # learning parameters
        update=nesterov_momentum,
        update_learning_rate=theano.shared(np.float32(0.03)),
        update_momentum=theano.shared(np.float32(0.9)),
        regression=True,
        on_epoch_finished=[
            AdjustVariable('update_learning_rate', start=0.03, stop=0.0001),
            AdjustVariable('update_momentum', start=0.9, stop=0.9999),
            EarlyStopping(),
        ],
        max_epochs=npochs,  # maximum iteration
        train_split=TrainSplit(eval_size=0.2),
        verbose=1,
    )
    return net
def build_autoencoder_network():
    input_var = T.tensor4('input_var');

    layer = layers.InputLayer(shape=(None, 3, PS, PS), input_var=input_var);
    layer = batch_norm(layers.Conv2DLayer(layer, 100,  filter_size=(5,5), stride=1, pad='same', nonlinearity=leaky_rectify));
    layer = batch_norm(layers.Conv2DLayer(layer, 120,  filter_size=(5,5), stride=1, pad='same', nonlinearity=leaky_rectify));
    layer = batch_norm(layers.Conv2DLayer(layer, 120,  filter_size=(1,1), stride=1, pad='same', nonlinearity=leaky_rectify));
    pool1 =            layers.MaxPool2DLayer(layer, (2, 2), 2);
    layer = batch_norm(layers.Conv2DLayer(pool1, 240,  filter_size=(3,3), stride=1, pad='same', nonlinearity=leaky_rectify));
    layer = batch_norm(layers.Conv2DLayer(layer, 320,  filter_size=(3,3), stride=1, pad='same', nonlinearity=leaky_rectify));
    layer = batch_norm(layers.Conv2DLayer(layer, 320,  filter_size=(1,1), stride=1, pad='same', nonlinearity=leaky_rectify));
    pool2 =            layers.MaxPool2DLayer(layer, (2, 2), 2);
    layer = batch_norm(layers.Conv2DLayer(pool2, 640,  filter_size=(3,3), stride=1, pad='same', nonlinearity=leaky_rectify));
    prely = batch_norm(layers.Conv2DLayer(layer, 1024, filter_size=(3,3), stride=1, pad='same', nonlinearity=leaky_rectify));

    featm    = batch_norm(layers.Conv2DLayer(prely, 640, filter_size=(1,1), nonlinearity=leaky_rectify));
    feat_map = batch_norm(layers.Conv2DLayer(featm, 100, filter_size=(1,1), nonlinearity=rectify, name="feat_map"));
    maskm    = batch_norm(layers.Conv2DLayer(prely, 100, filter_size=(1,1), nonlinearity=leaky_rectify));
    mask_rep = batch_norm(layers.Conv2DLayer(maskm, 1,   filter_size=(1,1), nonlinearity=None),   beta=None, gamma=None);
    mask_map = SoftThresPerc(mask_rep, perc=90.0, alpha=0.1, beta=init.Constant(0.5), tight=100.0, name="mask_map");
    layer    = ChInnerProdMerge(feat_map, mask_map, name="encoder");

    layer = batch_norm(layers.Deconv2DLayer(layer, 1024, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify));
    layer = batch_norm(layers.Deconv2DLayer(layer, 640,  filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify));
    layer = batch_norm(layers.Deconv2DLayer(layer, 320,  filter_size=(1,1), stride=1, crop='same', nonlinearity=leaky_rectify));
    layer =            layers.InverseLayer(layer, pool2);
    layer = batch_norm(layers.Deconv2DLayer(layer, 320,  filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify));
    layer = batch_norm(layers.Deconv2DLayer(layer, 320,  filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify));
    layer = batch_norm(layers.Deconv2DLayer(layer, 120,  filter_size=(1,1), stride=1, crop='same', nonlinearity=leaky_rectify));
    layer =            layers.InverseLayer(layer, pool1);
    layer = batch_norm(layers.Deconv2DLayer(layer, 120,  filter_size=(5,5), stride=1, crop='same', nonlinearity=leaky_rectify));
    layer = batch_norm(layers.Deconv2DLayer(layer, 100,  filter_size=(5,5), stride=1, crop='same', nonlinearity=leaky_rectify));
    layer =            layers.Deconv2DLayer(layer, 3,    filter_size=(1,1), stride=1, crop='same', nonlinearity=identity);

    glblf = batch_norm(layers.Conv2DLayer(prely, 128,  filter_size=(1,1), nonlinearity=leaky_rectify));
    glblf = layers.Pool2DLayer(glblf, pool_size=(5,5), stride=5, mode='average_inc_pad');
    glblf = batch_norm(layers.Conv2DLayer(glblf, 64,   filter_size=(3,3), stride=1, pad='same', nonlinearity=leaky_rectify));
    glblf = batch_norm(layers.Conv2DLayer(glblf, 5,    filter_size=(1,1), nonlinearity=rectify), name="global_feature");

    glblf = batch_norm(layers.Deconv2DLayer(glblf, 256, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify));
    glblf = batch_norm(layers.Deconv2DLayer(glblf, 128, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify));
    glblf = batch_norm(layers.Deconv2DLayer(glblf, 128, filter_size=(9,9), stride=5, crop=(2,2),  nonlinearity=leaky_rectify));
    glblf = batch_norm(layers.Deconv2DLayer(glblf, 128, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify));
    glblf = batch_norm(layers.Deconv2DLayer(glblf, 128, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify));
    glblf = batch_norm(layers.Deconv2DLayer(glblf, 64,  filter_size=(4,4), stride=2, crop=(1,1),  nonlinearity=leaky_rectify));
    glblf = batch_norm(layers.Deconv2DLayer(glblf, 64,  filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify));
    glblf = batch_norm(layers.Deconv2DLayer(glblf, 64,  filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify));
    glblf = batch_norm(layers.Deconv2DLayer(glblf, 32,  filter_size=(4,4), stride=2, crop=(1,1),  nonlinearity=leaky_rectify));
    glblf = batch_norm(layers.Deconv2DLayer(glblf, 32,  filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify));
    glblf = batch_norm(layers.Deconv2DLayer(glblf, 32,  filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify));
    glblf =            layers.Deconv2DLayer(glblf, 3,   filter_size=(1,1), stride=1, crop='same', nonlinearity=identity);

    layer = layers.ElemwiseSumLayer([layer, glblf]);

    network = ReshapeLayer(layer, ([0], -1));
    mask_var = lasagne.layers.get_output(mask_map);
    output_var = lasagne.layers.get_output(network);

    return network, input_var, mask_var, output_var;
def buildModel():

    print "BUILDING MODEL TYPE..."

    #default settings
    filters = 16
    first_stride = 2
    last_filter_multiplier = 4

    #input layer
    net = l.InputLayer((None, IM_DIM, IM_SIZE[1], IM_SIZE[0]))

    #conv layers
    net = l.batch_norm(l.Conv2DLayer(net, num_filters=filters     , filter_size=7, pad='same', stride=first_stride, W=init.HeNormal(gain=INIT_GAIN), nonlinearity=NONLINEARITY))
    net = l.MaxPool2DLayer(net, pool_size=2)
    
    net = l.batch_norm(l.Conv2DLayer(net, num_filters=filters * 2   , filter_size=5, pad='same', stride=1, W=init.HeNormal(gain=INIT_GAIN), nonlinearity=NONLINEARITY))
    net = l.MaxPool2DLayer(net, pool_size=2)
  
    net = l.batch_norm(l.Conv2DLayer(net, num_filters=filters * 4 , filter_size=3, pad='same', stride=1, W=init.HeNormal(gain=INIT_GAIN), nonlinearity=NONLINEARITY))
    net = l.MaxPool2DLayer(net, pool_size=2)
    net = l.DropoutLayer(net, DROPOUT)

    #net = l.batch_norm(l.Conv2DLayer(net, num_filters=filters * 8 , filter_size=3, pad='same', stride=1, W=init.HeNormal(gain=INIT_GAIN), nonlinearity=NONLINEARITY))
    #net = l.MaxPool2DLayer(net, pool_size=2)

    #net = l.batch_norm(l.Conv2DLayer(net, num_filters=filters * 16 , filter_size=3, pad='same', stride=1, W=init.HeNormal(gain=INIT_GAIN), nonlinearity=NONLINEARITY))
    #net = l.MaxPool2DLayer(net, pool_size=2)

    #net = l.batch_norm(l.Conv2DLayer(net, num_filters=filters * 32 , filter_size=7, pad='same', stride=1, W=init.HeNormal(gain=INIT_GAIN), nonlinearity=NONLINEARITY))
    #net = l.MaxPool2DLayer(net, pool_size=2)


    #print "\tFINAL POOL OUT SHAPE:", l.get_output_shape(net)

    #dense layers
    net = l.batch_norm(l.DenseLayer(net, 512, W=init.HeNormal(gain=INIT_GAIN), nonlinearity=NONLINEARITY))
    net = l.DropoutLayer(net, DROPOUT)
    net = l.batch_norm(l.DenseLayer(net, 512, W=init.HeNormal(gain=INIT_GAIN), nonlinearity=NONLINEARITY))
    net = l.DropoutLayer(net, DROPOUT)

    #Classification Layer
    if MULTI_LABEL:
        net = l.DenseLayer(net, NUM_CLASSES, nonlinearity=nonlinearities.sigmoid, W=init.HeNormal(gain=1))
    else:
        net = l.DenseLayer(net, NUM_CLASSES, nonlinearity=nonlinearities.softmax, W=init.HeNormal(gain=1))

    print "...DONE!"

    #model stats
    print "MODEL HAS", (sum(hasattr(layer, 'W') for layer in l.get_all_layers(net))), "WEIGHTED LAYERS"
    print "MODEL HAS", l.count_params(net), "PARAMS"

    return net
Exemplo n.º 10
0
def build_network(input_var, image_size=28, output_dim=10):

    nonlin = lasagne.nonlinearities.rectify
    W_init = lasagne.init.GlorotUniform()
    b_init = lasagne.init.Constant(0.)

    input_shape = (None, 1, image_size, image_size)

    network = nn.InputLayer(input_shape, input_var)

    network = nn.Conv2DLayer(network,
                             num_filters=64,
                             filter_size=(3, 3),
                             nonlinearity=nonlin,
                             W=W_init,
                             b=b_init)
    network = nn.Conv2DLayer(network,
                             num_filters=64,
                             filter_size=(3, 3),
                             nonlinearity=nonlin,
                             W=W_init,
                             b=b_init)
    network = nn.MaxPool2DLayer(network, pool_size=(2, 2))

    network = nn.Conv2DLayer(network,
                             num_filters=128,
                             filter_size=(3, 3),
                             W=W_init,
                             b=b_init,
                             nonlinearity=nonlin)
    network = nn.Conv2DLayer(network,
                             num_filters=128,
                             filter_size=(3, 3),
                             W=W_init,
                             b=b_init,
                             nonlinearity=nonlin)
    network = nn.MaxPool2DLayer(network, pool_size=(2, 2))

    network = nn.dropout(network, p=0.5)
    network = nn.DenseLayer(network,
                            num_units=256,
                            W=W_init,
                            b=b_init,
                            nonlinearity=nonlin)

    network = nn.dropout(network, p=0.5)
    network = nn.DenseLayer(network,
                            num_units=output_dim,
                            W=W_init,
                            b=b_init,
                            nonlinearity=None)

    return network
Exemplo n.º 11
0
    def __init__(self, args):

        self.args = args

        rng = np.random.RandomState(self.args.seed) # fixed random seeds
        theano_rng = MRG_RandomStreams(rng.randint(2 ** 15))
        lasagne.random.set_rng(np.random.RandomState(rng.randint(2 ** 15)))
        data_rng = np.random.RandomState(self.args.seed_data)

        ''' specify pre-trained generator E '''
        self.enc_layers = [LL.InputLayer(shape=(None, 3, 32, 32), input_var=None)]
        enc_layer_conv1 = dnn.Conv2DDNNLayer(self.enc_layers[-1], 64, (5,5), pad=0, stride=1, W=Normal(0.01), nonlinearity=nn.relu)
        self.enc_layers.append(enc_layer_conv1)
        enc_layer_pool1 = LL.MaxPool2DLayer(self.enc_layers[-1], pool_size=(2, 2))
        self.enc_layers.append(enc_layer_pool1)
        enc_layer_conv2 = dnn.Conv2DDNNLayer(self.enc_layers[-1], 128, (5,5), pad=0, stride=1, W=Normal(0.01), nonlinearity=nn.relu)
        self.enc_layers.append(enc_layer_conv2)
        enc_layer_pool2 = LL.MaxPool2DLayer(self.enc_layers[-1], pool_size=(2, 2))
        self.enc_layers.append(enc_layer_pool2)
        self.enc_layer_fc3 = LL.DenseLayer(self.enc_layers[-1], num_units=256, nonlinearity=T.nnet.relu)
        self.enc_layers.append(self.enc_layer_fc3)
        self.enc_layer_fc4 = LL.DenseLayer(self.enc_layers[-1], num_units=10, nonlinearity=T.nnet.softmax)
        self.enc_layers.append(self.enc_layer_fc4)


        ''' load pretrained weights for encoder '''
        weights_toload = np.load('pretrained/encoder.npz')
        weights_list_toload = [weights_toload['arr_{}'.format(k)] for k in range(len(weights_toload.files))]
        LL.set_all_param_values(self.enc_layers[-1], weights_list_toload)


        ''' input tensor variables '''
        #self.G_weights
        #self.D_weights
        self.dummy_input = T.scalar()
        self.G_layers = []
        self.z = theano_rng.uniform(size=(self.args.batch_size, self.args.z0dim))
        self.x = T.tensor4()
        self.meanx = T.tensor3()
        self.Gen_x = T.tensor4() 
        self.D_layers = []
        self.D_layer_adv = [] 
        self.D_layer_z_recon = []
        self.gen_lr = T.scalar() # learning rate
        self.disc_lr = T.scalar() # learning rate
        self.y = T.ivector()
        self.y_1hot = T.matrix()
        self.Gen_x_list = []
        self.y_recon_list = []
        self.mincost = T.scalar()
        #self.enc_layer_fc3 = self.get_enc_layer_fc3()

        self.real_fc3 = LL.get_output(self.enc_layer_fc3, self.x, deterministic=True)
Exemplo n.º 12
0
    def __init__(self, x, y, args):
        self.params_theta = []
        self.params_lambda = []
        self.params_weight = []
        if args.dataset == 'mnist':
            input_size = (None, 1, 28, 28)
        elif args.dataset == 'cifar10':
            input_size = (None, 3, 32, 32)
        else:
            raise AssertionError
        layers = [ll.InputLayer(input_size)]
        self.penalty = theano.shared(np.array(0.))

        #conv1
        layers.append(Conv2DLayerWithReg(args, layers[-1], 20, 5))
        self.add_params_to_self(args, layers[-1])
        layers.append(ll.MaxPool2DLayer(layers[-1], pool_size=2, stride=2))
        #conv1
        layers.append(Conv2DLayerWithReg(args, layers[-1], 50, 5))
        self.add_params_to_self(args, layers[-1])
        layers.append(ll.MaxPool2DLayer(layers[-1], pool_size=2, stride=2))

        # Michael: add dropout
        layers.append(ll.DropoutLayer(layers[-1]))  # Michael
        #fc1
        layers.append(DenseLayerWithReg(args, layers[-1], num_units=500))
        self.add_params_to_self(args, layers[-1])
        layers.append(ll.DropoutLayer(layers[-1]))  # Michael
        #softmax
        layers.append(
            DenseLayerWithReg(args,
                              layers[-1],
                              num_units=10,
                              nonlinearity=nonlinearities.softmax))
        self.add_params_to_self(args, layers[-1])
        # no dropout on output

        self.layers = layers
        self.y = ll.get_output(layers[-1], x, deterministic=False)
        self.prediction = T.argmax(self.y, axis=1)
        # self.penalty = penalty if penalty != 0. else T.constant(0.)
        print(self.params_lambda)
        # time.sleep(20)
        # cost function
        self.loss = T.mean(categorical_crossentropy(self.y, y))
        self.lossWithPenalty = T.add(self.loss, self.penalty)
        print("loss and losswithpenalty", type(self.loss),
              type(self.lossWithPenalty))


# Michael: wide resnet: https://gist.github.com/FlorianMuellerklein/3d9ba175038a3f2e7de3794fa303f1ee
# https://github.com/FlorianMuellerklein/Identity-Mapping-ResNet-Lasagne/blob/master/models.py
Exemplo n.º 13
0
def buildModel():

    # Theinput layer with the inputs (None, dimensions, width, height)
    l_input = layers.InputLayer((None, 3, 32, 32))

    # First convolutional layer, has l_input layer as incoming and is followed by a pooling layer, filters = 16
    l_conv1 = layers.Conv2DLayer(l_input,
                                 num_filters=16,
                                 filter_size=3,
                                 pad='same',
                                 nonlinearity=tanh)
    l_pool1 = layers.MaxPool2DLayer(l_conv1, pool_size=2)

    # The second convolution (l_pool1 is incoming), filters = 32
    l_conv2 = layers.Conv2DLayer(l_pool1,
                                 num_filters=32,
                                 filter_size=3,
                                 pad='same',
                                 nonlinearity=tanh)
    l_pool2 = layers.MaxPool2DLayer(l_conv2, pool_size=2)

    # The third convolution (l_pool2 is incoming), filters = 64
    l_conv3 = layers.Conv2DLayer(l_pool2,
                                 num_filters=64,
                                 filter_size=3,
                                 pad='same',
                                 nonlinearity=tanh)
    l_pool3 = layers.MaxPool2DLayer(l_conv3, pool_size=2)

    # The fourth and final convolution, filters = 128
    l_conv4 = layers.Conv2DLayer(l_pool3,
                                 num_filters=128,
                                 filter_size=3,
                                 pad='same',
                                 nonlinearity=tanh)
    l_pool4 = layers.MaxPool2DLayer(l_conv4, pool_size=2)

    # The CNN contains 3 dense layers, one of them is the output layer
    l_dense1 = layers.DenseLayer(l_pool4, num_units=64, nonlinearity=tanh)
    l_dense2 = layers.DenseLayer(l_dense1, num_units=64, nonlinearity=tanh)

    # The output layer has 43 units which is exactly the count of our class labels
    # It has a softmax activation function, its values represent class probabilities
    l_output = layers.DenseLayer(l_dense2, num_units=43, nonlinearity=softmax)

    #print "The CNN model has ", layers.count_params(l_output), "parameters"

    # Returning the layer stack by returning the last layer
    return l_output
Exemplo n.º 14
0
def build_network(input_var=None, input_shape=227):

    nf = 32
    n = lasagne.nonlinearities.tanh
    W_init = lasagne.init.GlorotUniform()

    net = nn.InputLayer((None, 3, None, None), input_var=input_var)

    # Block 1
    net = nn.Conv2DLayer(net, nf, 3, W=W_init, nonlinearity=n, pad='same')
    net = nn.Conv2DLayer(net, nf, 3, W=W_init, nonlinearity=n, pad='same')
    net = nn.MaxPool2DLayer(net, 2)

    # Block 2
    net = nn.Conv2DLayer(net, nf * 2, 3, W=W_init, nonlinearity=n, pad='same')
    net = nn.Conv2DLayer(net, nf * 2, 3, W=W_init, nonlinearity=n, pad='same')
    net = nn.SpatialPyramidPoolingLayer(net, [4, 2, 1],
                                        implementation='kaiming')

    net = nn.DenseLayer(net, 512, W=W_init, nonlinearity=n)
    net = nn.dropout(net, p=0.5)

    net = nn.DenseLayer(net, 128, W=W_init, nonlinearity=n)

    return nn.DenseLayer(net, 1, W=W_init, nonlinearity=T.nnet.sigmoid)
Exemplo n.º 15
0
def build_contract_level(incoming,
                         num_filters,
                         nonlin,
                         W_init=lasagne.init.GlorotUniform(),
                         b_init=lasagne.init.Constant(0.01),
                         filter_size=3):
    """Builds a Conv-Conv-Pool block of the U-Net encoder."""

    network = nn.Conv2DLayer(incoming,
                             num_filters,
                             filter_size,
                             pad='same',
                             W=W_init,
                             b=b_init,
                             nonlinearity=nonlin)
    network = nn.batch_norm(network)
    network = nn.Conv2DLayer(network,
                             num_filters,
                             filter_size,
                             pad='same',
                             W=W_init,
                             b=b_init,
                             nonlinearity=nonlin)
    network = nn.batch_norm(network)
    return network, nn.MaxPool2DLayer(network, 2)
Exemplo n.º 16
0
def getNet9():
  inputLayer = layers.InputLayer(shape=(None, 1, imageShape[0], imageShape[1]))
  conv1Layer = layers.Conv2DLayer(inputLayer, num_filters=32, filter_size=(5,3), W=GlorotNormal('relu'), nonlinearity=rectify)
  pool1Layer = layers.MaxPool2DLayer(conv1Layer, pool_size=(2,2))
  conv2Layer = layers.Conv2DLayer(pool1Layer, num_filters=64, filter_size=(5,4), W=GlorotNormal('relu'), nonlinearity=rectify)
  pool2Layer = layers.MaxPool2DLayer(conv2Layer, pool_size=(2,2))
  conv3Layer = layers.Conv2DLayer(pool2Layer, num_filters=128, filter_size=(4,4), W=GlorotNormal('relu'), nonlinearity=rectify)
  pool3Layer = layers.MaxPool2DLayer(conv3Layer, pool_size=(2,2))
  conv4Layer = layers.Conv2DLayer(pool3Layer, num_filters=256, filter_size=(4,4), W=GlorotNormal('relu'), nonlinearity=rectify)
  hidden1Layer = layers.DenseLayer(conv4Layer, num_units=2048, W=GlorotNormal('relu'), nonlinearity=rectify)
  dropout1Layer = layers.DropoutLayer(hidden1Layer, p=0.5)
  hidden2Layer = layers.DenseLayer(dropout1Layer, num_units=1024, W=GlorotNormal('relu'), nonlinearity=rectify)
  dropout2Layer = layers.DropoutLayer(hidden2Layer, p=0.5)
  hidden3Layer = layers.DenseLayer(dropout2Layer, num_units=512, W=GlorotNormal('relu'), nonlinearity=rectify)
  outputLayer = layers.DenseLayer(hidden3Layer, num_units=10, W=GlorotNormal(1.0), nonlinearity=softmax)
  return outputLayer
Exemplo n.º 17
0
def build_network_from_ae(classn):
    input_var = T.tensor4('input_var');
    target_var = T.imatrix('targets');

    layer = layers.InputLayer(shape=(None, 3, PS, PS), input_var=input_var);
    layer = (layers.Conv2DLayer(layer, 100,  filter_size=(5,5), stride=1, nonlinearity=leaky_rectify));
    layer = (layers.Conv2DLayer(layer, 100,  filter_size=(5,5), stride=1, nonlinearity=leaky_rectify));
    layer = (layers.Conv2DLayer(layer, 120,  filter_size=(4,4), stride=1, nonlinearity=leaky_rectify));
    layer = layers.MaxPool2DLayer(layer, pool_size=(3,3), stride=2);
    layer = (layers.Conv2DLayer(layer, 240,  filter_size=(3,3), stride=1, nonlinearity=leaky_rectify));
    layer = (layers.Conv2DLayer(layer, 320,  filter_size=(3,3), stride=1, nonlinearity=leaky_rectify));
    layer = (layers.Conv2DLayer(layer, 320,  filter_size=(3,3), stride=1, nonlinearity=leaky_rectify));
    layer = (layers.Conv2DLayer(layer, 320,  filter_size=(3,3), stride=1, nonlinearity=leaky_rectify));
    layer = (layers.Conv2DLayer(layer, 320,  filter_size=(3,3), stride=1, nonlinearity=leaky_rectify));
    layer = (layers.Conv2DLayer(layer, 320,  filter_size=(3,3), stride=1, nonlinearity=leaky_rectify));
    layer = (layers.Conv2DLayer(layer, 320,  filter_size=(3,3), stride=1, nonlinearity=leaky_rectify));
    layer = (layers.Conv2DLayer(layer, 320,  filter_size=(3,3), stride=1, nonlinearity=leaky_rectify));
    layer = (layers.Conv2DLayer(layer, 480,  filter_size=(3,3), stride=1, nonlinearity=leaky_rectify));
    layer = (layers.Conv2DLayer(layer, 480,  filter_size=(3,3), stride=1, nonlinearity=leaky_rectify));
    layer = (layers.Conv2DLayer(layer, 480,  filter_size=(3,3), stride=1, nonlinearity=leaky_rectify));
    layer = (layers.Conv2DLayer(layer, 480,  filter_size=(3,3), stride=1, nonlinearity=leaky_rectify));

    layer = layers.Pool2DLayer(layer, pool_size=(20,20), stride=20, mode='average_inc_pad');
    network = layers.DenseLayer(layer, classn, nonlinearity=sigmoid);

    return network, input_var, target_var;
Exemplo n.º 18
0
def init_cnn(model_file, hidden_units, num_filters, filter_hs, dropout_rate,
             n_words, n_dim):
    """
    initializes CNN by loading weights of a previously trained model. note that the model
    trained and this model need to have same parameters. see trainCNN.py for explanation of
    neural network architecture
    :param model_file:
    :param hidden_units:
    :param num_filters:
    :param filter_hs:
    :param dropout_rate:
    :param n_words:
    :param n_dim:
    :return:
    """
    assert len(num_filters) == len(filter_hs)
    filter_shapes = []
    pool_sizes = []
    for filter_h in filter_hs:
        filter_shapes.append((filter_h, n_dim))
        pool_sizes.append((n_words - filter_h + 1, 1))

    l_in = LL.InputLayer(shape=(None, 1, n_words, n_dim))

    layer_list = []
    for i in range(len(filter_hs)):
        l_conv = LL.Conv2DLayer(l_in,
                                num_filters=num_filters[i],
                                filter_size=filter_shapes[i],
                                nonlinearity=L.nonlinearities.rectify,
                                W=L.init.HeNormal(gain='relu'))
        l_pool = LL.MaxPool2DLayer(l_conv, pool_size=pool_sizes[i])
        layer_list.append(l_pool)

    mergedLayer = LL.ConcatLayer(layer_list)

    l_hidden1 = LL.DenseLayer(mergedLayer,
                              num_units=hidden_units[0],
                              nonlinearity=L.nonlinearities.tanh,
                              W=L.init.HeNormal(gain='relu'))
    l_hidden1_dropout = LL.DropoutLayer(l_hidden1, p=dropout_rate[0])

    l_hidden2 = LL.DenseLayer(l_hidden1_dropout,
                              num_units=hidden_units[1],
                              nonlinearity=L.nonlinearities.tanh,
                              W=L.init.HeNormal(gain='relu'))
    l_hidden2_dropout = LL.DropoutLayer(l_hidden2, p=dropout_rate[1])

    l_output = LL.DenseLayer(l_hidden2_dropout,
                             num_units=hidden_units[2],
                             nonlinearity=L.nonlinearities.tanh)

    net_output = theano.function([l_in.input_var],
                                 LL.get_output(l_output, deterministic=True))

    with np.load(model_file) as f:
        param_values = [f['arr_%d' % i] for i in range(len(f.files))]
    LL.set_all_param_values(l_output, param_values)

    return net_output
Exemplo n.º 19
0
def getNet4():
    inputLayer = layers.InputLayer(shape=(None, 1, imageShape[0],
                                          imageShape[1]))  #120x120
    conv1Layer = layers.Conv2DLayer(inputLayer,
                                    num_filters=32,
                                    filter_size=(5, 5),
                                    nonlinearity=elu)  #116x116
    pool1Layer = layers.MaxPool2DLayer(conv1Layer, pool_size=(2, 2))  #58x58
    dropout1Layer = layers.DropoutLayer(pool1Layer, p=0.5)
    conv2Layer = layers.Conv2DLayer(dropout1Layer,
                                    num_filters=64,
                                    filter_size=(5, 5),
                                    nonlinearity=tanh)  #54x54
    pool2Layer = layers.MaxPool2DLayer(conv2Layer, pool_size=(2, 2))  #27x27
    dropout2Layer = layers.DropoutLayer(pool2Layer, p=0.5)
    conv3Layer = layers.Conv2DLayer(dropout2Layer,
                                    num_filters=128,
                                    filter_size=(4, 4),
                                    nonlinearity=tanh)  #24x24
    pool3Layer = layers.MaxPool2DLayer(conv3Layer, pool_size=(2, 2))  #12x12
    dropout3Layer = layers.DropoutLayer(pool3Layer, p=0.5)
    conv4Layer = layers.Conv2DLayer(dropout3Layer,
                                    num_filters=256,
                                    filter_size=(3, 3),
                                    nonlinearity=elu)  #10x10
    pool4Layer = layers.MaxPool2DLayer(conv4Layer, pool_size=(2, 2))  #5x5
    dropout4Layer = layers.DropoutLayer(pool4Layer, p=0.5)
    conv5Layer = layers.Conv2DLayer(dropout4Layer,
                                    num_filters=512,
                                    filter_size=(4, 4),
                                    nonlinearity=tanh)  #2x2
    hidden1Layer = layers.DenseLayer(conv5Layer,
                                     num_units=2048,
                                     nonlinearity=tanh)
    hidden2Layer = layers.DenseLayer(hidden1Layer,
                                     num_units=1024,
                                     nonlinearity=elu)
    hidden3Layer = layers.DenseLayer(hidden2Layer,
                                     num_units=512,
                                     nonlinearity=tanh)
    hidden4Layer = layers.DenseLayer(hidden3Layer,
                                     num_units=256,
                                     nonlinearity=tanh)
    outputLayer = layers.DenseLayer(hidden4Layer,
                                    num_units=10,
                                    nonlinearity=softmax)
    return outputLayer
Exemplo n.º 20
0
def input_block(net, config, melspec=False, verbose=True):
    """
    """
    # load scaler
    sclr = joblib.load(config.paths.preproc.scaler)

    net['input'] = L.InputLayer(shape=get_in_shape(config), name='input')
    sigma = theano.shared(np.array(0., dtype=np.float32),
                          name='noise_controller')
    net['noise'] = L.GaussianNoiseLayer(net['input'],
                                        sigma=sigma,
                                        name='input_corruption')

    if config.hyper_parameters.input == "melspec":

        net['sclr'] = L.standardize(net['noise'],
                                    offset=sclr.mean_.astype(np.float32),
                                    scale=sclr.scale_.astype(np.float32),
                                    shared_axes=(0, 1, 2))
    else:
        net['stft'] = STFTLayer(L.ReshapeLayer(net['noise'],
                                               ([0], [1], [2], 1),
                                               name='reshape'),
                                n_fft=config.hyper_parameters.n_fft,
                                hop_size=config.hyper_parameters.hop_size)

        if melspec:
            net['melspec'] = MelSpecLayer(
                sr=config.hyper_parameters.sample_rate,
                n_fft=config.hyper_parameters.n_fft,
                n_mels=128,
                log_amplitude=True)

            net['sclr'] = L.standardize(net['melspec'],
                                        offset=sclr.mean_.astype(np.float32),
                                        scale=sclr.scale_.astype(np.float32),
                                        shared_axes=(0, 1, 2))

        else:
            net['sclr'] = L.standardize(net['stft'],
                                        offset=sclr.mean_.astype(np.float32),
                                        scale=sclr.scale_.astype(np.float32),
                                        shared_axes=(0, 1, 2))

            # only pooling freq domain
            net['stft.pl'] = L.MaxPool2DLayer(net['sclr'],
                                              pool_size=(2, 1),
                                              name='stft.pl')

    if verbose:
        print(net['input'].output_shape)
        # if melspec:
        #     print(net['melspec'].output_shape)
        # else:
        #     print(net['stft'].output_shape)
        #     print(net['stft.pl'].output_shape)
        print(net['sclr'].output_shape)

    return net, sigma
Exemplo n.º 21
0
def _build(X):
    layer = layers.InputLayer(shape=(None, 1, 28, 28), input_var=X)
    layer = layers.Conv2DLayer(layer,
                               num_filters=32,
                               filter_size=(5, 5),
                               stride=(1, 1),
                               pad='same',
                               untie_biases=False,
                               W=init.GlorotUniform(),
                               b=init.Constant(0.),
                               nonlinearity=nonlinearities.rectify)
    visual1 = layers.get_output(layer)
    layer = layers.MaxPool2DLayer(layer,
                                  pool_size=(2, 2),
                                  stride=None,
                                  pad=(0, 0),
                                  ignore_border=False)
    layer = layers.Conv2DLayer(layer,
                               num_filters=32,
                               filter_size=(5, 5),
                               stride=(1, 1),
                               pad='same',
                               untie_biases=False,
                               W=init.GlorotUniform(),
                               b=init.Constant(0.),
                               nonlinearity=nonlinearities.rectify)
    visual2 = layers.get_output(layer)
    layer = layers.MaxPool2DLayer(layer,
                                  pool_size=(2, 2),
                                  stride=None,
                                  pad=(0, 0),
                                  ignore_border=False)
    layer = layers.flatten(layer, outdim=2)
    layer = layers.DropoutLayer(layer, p=0.5)
    layer = layers.DenseLayer(layer,
                              num_units=256,
                              W=init.GlorotUniform(),
                              b=init.Constant(0.),
                              nonlinearity=nonlinearities.rectify)
    layer = layers.DropoutLayer(layer, p=0.5)
    layer = layers.DenseLayer(layer,
                              num_units=10,
                              W=init.GlorotUniform(),
                              b=init.Constant(0.),
                              nonlinearity=nonlinearities.softmax)
    return layer, visual1, visual2
Exemplo n.º 22
0
def simpleConv(input_var=None, num_units=32):

    network = layers.InputLayer(shape=(None, input_n_channel, input_height,
                                       input_width),
                                input_var=input_var)

    network = layers.Conv2DLayer(network,
                                 num_filters=num_units,
                                 filter_size=(9, 9))
    network = layers.MaxPool2DLayer(network, pool_size=(2, 2))
    network = layers.Conv2DLayer(network,
                                 num_filters=num_units,
                                 filter_size=(9, 9))
    network = layers.MaxPool2DLayer(network, pool_size=(2, 2))
    network = layers.Conv2DLayer(network,
                                 num_filters=1000,
                                 filter_size=(10, 10))

    network = layers.DenseLayer(layers.DropoutLayer(network, p=0.2),
                                num_units=1000)
    network = layers.DenseLayer(layers.DropoutLayer(network, p=0.5),
                                num_units=1000)

    network = layers.ReshapeLayer(network,
                                  shape=(input_var.shape[0], 1000, 1, 1))
    '''
	network = layers.TransposedConv2DLayer(network, num_filters=num_units, filter_size=(4,4))
	network = layers.Upscale2DLayer(network, 2)
	network = layers.TransposedConv2DLayer(network, num_filters=num_units, filter_size=(5,5))
	network = layers.Upscale2DLayer(network, 2)
	network = layers.TransposedConv2DLayer(network, num_filters=3, filter_size=(9,9))
	'''
    network = layers.TransposedConv2DLayer(network,
                                           num_filters=num_units,
                                           filter_size=(8, 8))
    network = layers.TransposedConv2DLayer(network,
                                           num_filters=num_units,
                                           filter_size=(9, 9))
    network = layers.TransposedConv2DLayer(network,
                                           num_filters=num_units,
                                           filter_size=(9, 9))
    network = layers.TransposedConv2DLayer(network,
                                           num_filters=3,
                                           filter_size=(9, 9))

    return network
Exemplo n.º 23
0
def getNet5():
  inputLayer = layers.InputLayer(shape=(None, 1, imageShape[0], imageShape[1])) #120x120
  conv1Layer = layers.Conv2DLayer(inputLayer, num_filters=32, filter_size=(4,4), nonlinearity=elu) #117x117
  pool1Layer = layers.MaxPool2DLayer(conv1Layer, pool_size=(3,3)) #39x39
  conv2Layer = layers.Conv2DLayer(pool1Layer, num_filters=64, filter_size=(4,4), nonlinearity=tanh) #36x36
  pool2Layer = layers.MaxPool2DLayer(conv2Layer, pool_size=(2,2)) #18x18
  conv3Layer = layers.Conv2DLayer(pool2Layer, num_filters=128, filter_size=(4,4), nonlinearity=sigmoid) #15x15
  pool3Layer = layers.MaxPool2DLayer(conv3Layer, pool_size=(3,3)) #5x5
  conv4Layer = layers.Conv2DLayer(pool3Layer, num_filters=256, filter_size=(4,4), nonlinearity=tanh) #2x2
  hidden1Layer = layers.DenseLayer(conv4Layer, num_units=1024, nonlinearity=elu)
  dropout1Layer = layers.DropoutLayer(hidden1Layer, p=0.5)
  hidden2Layer = layers.DenseLayer(dropout1Layer, num_units=512, nonlinearity=tanh)
  dropout2Layer = layers.DropoutLayer(hidden2Layer, p=0.5)
  hidden3Layer = layers.DenseLayer(dropout2Layer, num_units=256, nonlinearity=tanh)
  dropout3Layer = layers.DropoutLayer(hidden3Layer, p=0.5)
  hidden4Layer = layers.DenseLayer(dropout3Layer, num_units=128, nonlinearity=tanh)
  outputLayer = layers.DenseLayer(hidden4Layer, num_units=10, nonlinearity=softmax)
  return outputLayer
Exemplo n.º 24
0
def getNet3():
  inputLayer = layers.InputLayer(shape=(None, 1, imageShape[0], imageShape[1]))
  conv1Layer = layers.Conv2DLayer(inputLayer, num_filters=32, filter_size=(3,3), nonlinearity=elu)
  pool1Layer = layers.MaxPool2DLayer(conv1Layer, pool_size=(2,2))
  dropout1Layer = layers.DropoutLayer(pool1Layer, p=0.2)
  conv2Layer = layers.Conv2DLayer(dropout1Layer, num_filters=64, filter_size=(4,3), nonlinearity=tanh)
  pool2Layer = layers.MaxPool2DLayer(conv2Layer, pool_size=(2,2))
  dropout2Layer = layers.DropoutLayer(pool2Layer, p=0.2)
  conv3Layer = layers.Conv2DLayer(dropout2Layer, num_filters=128, filter_size=(3,3), nonlinearity=tanh)
  pool3Layer = layers.MaxPool2DLayer(conv3Layer, pool_size=(2,2))
  dropout3Layer = layers.DropoutLayer(pool3Layer, p=0.2)
  conv4Layer = layers.Conv2DLayer(dropout3Layer, num_filters=256, filter_size=(3,2), nonlinearity=elu)
  hidden1Layer = layers.DenseLayer(conv4Layer, num_units=1024, nonlinearity=elu)
  hidden2Layer = layers.DenseLayer(hidden1Layer, num_units=512, nonlinearity=tanh)
  hidden3Layer = layers.DenseLayer(hidden2Layer, num_units=256, nonlinearity=tanh)
  #hidden4Layer = layers.DenseLayer(hidden3Layer, num_units=256, nonlinearity=elu)
  #hidden5Layer = layers.DenseLayer(hidden4Layer, num_units=128, nonlinearity=tanh)
  outputLayer = layers.DenseLayer(hidden3Layer, num_units=10, nonlinearity=softmax)
  return outputLayer
Exemplo n.º 25
0
def build_model(input_var):
    layer = layers.InputLayer(shape=(None, 3, 224, 224), input_var=input_var)
    layer = layers.Conv2DLayer(layer, num_filters=64, filter_size=(3, 3), stride=(1, 1), pad='same')
    layer = layers.MaxPool2DLayer(layer, pool_size=(3, 3), stride=(2, 2), pad=(0, 0), ignore_border=False)
    layer = layers.Conv2DLayer(layer, num_filters=128, filter_size=(3, 3), stride=(1, 1), pad='same')
    layer = layers.MaxPool2DLayer(layer, pool_size=(3, 3), stride=(2, 2), pad=(0, 0), ignore_border=False)
    layer = layers.Conv2DLayer(layer, num_filters=256, filter_size=(3, 3), stride=(1, 1), pad='same')
    layer = layers.MaxPool2DLayer(layer, pool_size=(3, 3), stride=(2, 2), pad=(0, 0), ignore_border=False)
    layer = layers.Conv2DLayer(layer, num_filters=512, filter_size=(3, 3), stride=(1, 1), pad='same')
    layer = layers.MaxPool2DLayer(layer, pool_size=(3, 3), stride=(2, 2), pad=(0, 0), ignore_border=False)
    layer = layers.Conv2DLayer(layer, num_filters=512, filter_size=(3, 3), stride=(1, 1), pad='same')
    layer = layers.MaxPool2DLayer(layer, pool_size=(3, 3), stride=(2, 2), pad=(0, 0), ignore_border=False)
    layer = layers.flatten(layer, outdim=2)
    layer = layers.DenseLayer(layer, num_units=4096, nonlinearity=nonlinearities.rectify)
    layer = layers.DropoutLayer(layer, p=0.5)
    layer = layers.DenseLayer(layer, num_units=4096, nonlinearity=nonlinearities.rectify)
    layer = layers.DropoutLayer(layer, p=0.5)
    layer = layers.DenseLayer(layer, num_units=2, nonlinearity=nonlinearities.softmax)
    return layer
Exemplo n.º 26
0
 def _build(self):
     layer = layers.InputLayer(shape=(None, 3, 112, 112), input_var=self.X)
     layer = layers.Conv2DLayer(layer, num_filters=64, filter_size=(5, 5), stride=(1, 1), pad='same',
                                untie_biases=False, W=init.GlorotUniform(), b=init.Constant(0.),
                                nonlinearity=nonlinearities.rectify)
     layer = layers.MaxPool2DLayer(layer, pool_size=(2, 2), stride=None, pad=(0, 0), ignore_border=False)
     layer = layers.Conv2DLayer(layer, num_filters=64, filter_size=(5, 5), stride=(1, 1), pad='same',
                                untie_biases=False, W=init.GlorotUniform(), b=init.Constant(0.),
                                nonlinearity=nonlinearities.rectify)
     layer = layers.MaxPool2DLayer(layer, pool_size=(8, 8), stride=None, pad=(0, 0), ignore_border=False)
     layer = layers.flatten(layer, outdim=2)  # 不加入展开层也可以,DenseLayer自动展开
     layer = layers.DropoutLayer(layer, p=0.5)
     layer = layers.DenseLayer(layer, num_units=2048,
                               W=init.GlorotUniform(), b=init.Constant(0.),
                               nonlinearity=nonlinearities.rectify)
     layer = layers.DropoutLayer(layer, p=0.5)
     layer = layers.DenseLayer(layer, num_units=2,
                               W=init.GlorotUniform(), b=init.Constant(0.),
                               nonlinearity=nonlinearities.softmax)
     return layer
Exemplo n.º 27
0
def getNet2():
  inputLayer = layers.InputLayer(shape=(None, 1, imageShape[0], imageShape[1]))
  loc1Layer = layers.Conv2DLayer(inputLayer, num_filters=32, filter_size=(3,3), W=GlorotUniform('relu'), nonlinearity=rectify)
  loc2Layer = layers.MaxPool2DLayer(loc1Layer, pool_size=(2,2))
  loc3Layer = layers.Conv2DLayer(loc2Layer, num_filters=64, filter_size=(4,3), W=GlorotUniform('relu'), nonlinearity=rectify)
  loc4Layer = layers.MaxPool2DLayer(loc3Layer, pool_size=(2,2))
  loc5Layer = layers.Conv2DLayer(loc4Layer, num_filters=128, filter_size=(3,3), W=GlorotUniform('relu'), nonlinearity=rectify)
  loc6Layer = layers.MaxPool2DLayer(loc5Layer, pool_size=(2,2))
  loc7Layer = layers.Conv2DLayer(loc6Layer, num_filters=256, filter_size=(3,2), W=GlorotUniform('relu'), nonlinearity=rectify)
  #loc7Layer = layers.DenseLayer(loc5Layer, num_units=1024, nonlinearity=rectify)
  loc8Layer = layers.DenseLayer(loc7Layer, num_units=256, W=GlorotUniform('relu'), nonlinearity=rectify)
  loc9Layer = layers.DenseLayer(loc8Layer, num_units=128, W=GlorotUniform('relu'), nonlinearity=rectify)
  loc10Layer = layers.DenseLayer(loc9Layer, num_units=64, W=GlorotUniform('relu'), nonlinearity=rectify)
  #loc11Layer = layers.DenseLayer(loc10Layer, num_units=32, nonlinearity=tanh)
  #loc12Layer = layers.DenseLayer(loc11Layer, num_units=16, nonlinearity=tanh)
  locOutLayer = layers.DenseLayer(loc10Layer, num_units=6, W=GlorotUniform(1.0), nonlinearity=identity)

  transformLayer = layers.TransformerLayer(inputLayer, locOutLayer, downsample_factor=1.0)

  conv1Layer = layers.Conv2DLayer(transformLayer, num_filters=32, filter_size=(3,3), W=GlorotNormal('relu'), nonlinearity=rectify)
  pool1Layer = layers.MaxPool2DLayer(conv1Layer, pool_size=(2,2))
  conv2Layer = layers.Conv2DLayer(pool1Layer, num_filters=64, filter_size=(4,3), W=GlorotUniform('relu'), nonlinearity=rectify)
  pool2Layer = layers.MaxPool2DLayer(conv2Layer, pool_size=(2,2))
  conv3Layer = layers.Conv2DLayer(pool2Layer, num_filters=128, filter_size=(3,3), W=GlorotUniform('relu'), nonlinearity=rectify)
  pool3Layer = layers.MaxPool2DLayer(conv3Layer, pool_size=(2,2))
  conv4Layer = layers.Conv2DLayer(pool3Layer, num_filters=256, filter_size=(3,2), W=GlorotNormal('relu'), nonlinearity=rectify)
  hidden1Layer = layers.DenseLayer(conv4Layer, num_units=1024, W=GlorotUniform('relu'), nonlinearity=rectify)
  dropout1Layer = layers.DropoutLayer(hidden1Layer, p=0.5)
  hidden2Layer = layers.DenseLayer(dropout1Layer, num_units=512, W=GlorotUniform('relu'), nonlinearity=rectify)
  #hidden3Layer = layers.DenseLayer(hidden2Layer, num_units=256, nonlinearity=tanh)
  outputLayer = layers.DenseLayer(hidden2Layer, num_units=10, W=GlorotUniform(1.0), nonlinearity=softmax)
  return outputLayer
Exemplo n.º 28
0
def build_mnist_cnn(input_var=None):
    """
    Generate the cnn using the Lasagne library
    """
    network = lyr.InputLayer(shape=(None, 1, 28, 28), input_var=input_var)
    network = lyr.Conv2DLayer(network,
                              64, (5, 5),
                              W=lasagne.init.GlorotNormal())
    network = lyr.MaxPool2DLayer(network, (2, 2))
    network = lyr.Conv2DLayer(network,
                              128, (5, 5),
                              W=lasagne.init.GlorotNormal())
    network = lyr.MaxPool2DLayer(network, (2, 2))
    network = lyr.Conv2DLayer(network, 256, (4, 4), W=lasagne.init.Normal())

    network = lyr.DenseLayer(network, 512, W=lasagne.init.GlorotNormal())
    network = lyr.DenseLayer(network,
                             10,
                             nonlinearity=lasagne.nonlinearities.softmax,
                             W=lasagne.init.GlorotNormal())

    return network
Exemplo n.º 29
0
    def __init__(self, x, y, args):
        self.params_theta = []
        self.params_lambda = []
        self.params_weight = []
        if args.dataset == 'mnist':
            input_size = (None, 1, 28, 28)
        elif args.dataset == 'cifar10':
            input_size = (None, 3, 32, 32)
        else:
            raise AssertionError
        layers = [ll.InputLayer(input_size)]
        self.penalty = theano.shared(np.array(0.))

        #conv1
        layers.append(Conv2DLayerWithReg(args, layers[-1], 20, 5))
        self.add_params_to_self(args, layers[-1])
        layers.append(ll.MaxPool2DLayer(layers[-1], pool_size=2, stride=2))
        #conv1
        layers.append(Conv2DLayerWithReg(args, layers[-1], 50, 5))
        self.add_params_to_self(args, layers[-1])
        layers.append(ll.MaxPool2DLayer(layers[-1], pool_size=2, stride=2))
        #fc1
        layers.append(DenseLayerWithReg(args, layers[-1], num_units=500))
        self.add_params_to_self(args, layers[-1])
        #softmax
        layers.append(DenseLayerWithReg(args, layers[-1], num_units=10, nonlinearity=nonlinearities.softmax))
        self.add_params_to_self(args, layers[-1])

        self.layers = layers
        self.y = ll.get_output(layers[-1], x, deterministic=False)
        self.prediction = T.argmax(self.y, axis=1)
        # self.penalty = penalty if penalty != 0. else T.constant(0.)
        print(self.params_lambda)
        # time.sleep(20)
        # cost function
        self.loss = T.mean(categorical_crossentropy(self.y, y))
        self.lossWithPenalty = T.add(self.loss, self.penalty)
        print "loss and losswithpenalty", type(self.loss), type(self.lossWithPenalty)
Exemplo n.º 30
0
 def __build_24_calib_net__(self):
     network = layers.InputLayer((None, 3, 24, 24),
                                 input_var=self.__input_var__)
     network = layers.Conv2DLayer(network,
                                  num_filters=32,
                                  filter_size=(5, 5),
                                  stride=1,
                                  nonlinearity=relu)
     network = layers.MaxPool2DLayer(network, pool_size=(3, 3), stride=2)
     network = layers.DenseLayer(network, num_units=64, nonlinearity=relu)
     network = layers.DenseLayer(network,
                                 num_units=45,
                                 nonlinearity=softmax)
     return network