Esempio n. 1
0
def build_pi_model():

    log.i('BUILDING RASBPERRY PI MODEL...')

    # Random Seed
    lasagne_random.set_rng(cfg.getRandomState())

    # Input layer for images
    net = l.InputLayer((None, cfg.IM_DIM, cfg.IM_SIZE[1], cfg.IM_SIZE[0]))

    # Convolutinal layer groups
    for i in range(len(cfg.FILTERS)):

        # 3x3 Convolution + Stride
        net = batch_norm(
            l.Conv2DLayer(net,
                          num_filters=cfg.FILTERS[i],
                          filter_size=cfg.KERNEL_SIZES[i],
                          num_groups=cfg.NUM_OF_GROUPS[i],
                          pad='same',
                          stride=2,
                          W=initialization(cfg.NONLINEARITY),
                          nonlinearity=nonlinearity(cfg.NONLINEARITY)))

        log.i(('\tGROUP', i + 1, 'OUT SHAPE:', l.get_output_shape(net)))

    # Fully connected layers + dropout layers
    net = l.DenseLayer(net,
                       cfg.DENSE_UNITS,
                       nonlinearity=nonlinearity(cfg.NONLINEARITY),
                       W=initialization(cfg.NONLINEARITY))
    net = l.DropoutLayer(net, p=cfg.DROPOUT)

    net = l.DenseLayer(net,
                       cfg.DENSE_UNITS,
                       nonlinearity=nonlinearity(cfg.NONLINEARITY),
                       W=initialization(cfg.NONLINEARITY))
    net = l.DropoutLayer(net, p=cfg.DROPOUT)

    # Classification Layer (Softmax)
    net = l.DenseLayer(net,
                       len(cfg.CLASSES),
                       nonlinearity=nonlinearity('softmax'),
                       W=initialization('softmax'))

    log.i(("\tFINAL NET OUT SHAPE:", l.get_output_shape(net)))
    log.i("...DONE!")

    # Model stats
    log.i(("MODEL HAS",
           (sum(hasattr(layer, 'W')
                for layer in l.get_all_layers(net))), "WEIGHTED LAYERS"))
    log.i(("MODEL HAS", l.count_params(net), "PARAMS"))

    return net
def buildModel():

    print "BUILDING MODEL TYPE..."

    #default settings
    filters = 16
    first_stride = 2
    last_filter_multiplier = 4

    #input layer
    net = l.InputLayer((None, IM_DIM, IM_SIZE[1], IM_SIZE[0]))

    #conv layers
    net = l.batch_norm(l.Conv2DLayer(net, num_filters=filters     , filter_size=7, pad='same', stride=first_stride, W=init.HeNormal(gain=INIT_GAIN), nonlinearity=NONLINEARITY))
    net = l.MaxPool2DLayer(net, pool_size=2)
    
    net = l.batch_norm(l.Conv2DLayer(net, num_filters=filters * 2   , filter_size=5, pad='same', stride=1, W=init.HeNormal(gain=INIT_GAIN), nonlinearity=NONLINEARITY))
    net = l.MaxPool2DLayer(net, pool_size=2)
  
    net = l.batch_norm(l.Conv2DLayer(net, num_filters=filters * 4 , filter_size=3, pad='same', stride=1, W=init.HeNormal(gain=INIT_GAIN), nonlinearity=NONLINEARITY))
    net = l.MaxPool2DLayer(net, pool_size=2)
    net = l.DropoutLayer(net, DROPOUT)

    #net = l.batch_norm(l.Conv2DLayer(net, num_filters=filters * 8 , filter_size=3, pad='same', stride=1, W=init.HeNormal(gain=INIT_GAIN), nonlinearity=NONLINEARITY))
    #net = l.MaxPool2DLayer(net, pool_size=2)

    #net = l.batch_norm(l.Conv2DLayer(net, num_filters=filters * 16 , filter_size=3, pad='same', stride=1, W=init.HeNormal(gain=INIT_GAIN), nonlinearity=NONLINEARITY))
    #net = l.MaxPool2DLayer(net, pool_size=2)

    #net = l.batch_norm(l.Conv2DLayer(net, num_filters=filters * 32 , filter_size=7, pad='same', stride=1, W=init.HeNormal(gain=INIT_GAIN), nonlinearity=NONLINEARITY))
    #net = l.MaxPool2DLayer(net, pool_size=2)


    #print "\tFINAL POOL OUT SHAPE:", l.get_output_shape(net)

    #dense layers
    net = l.batch_norm(l.DenseLayer(net, 512, W=init.HeNormal(gain=INIT_GAIN), nonlinearity=NONLINEARITY))
    net = l.DropoutLayer(net, DROPOUT)
    net = l.batch_norm(l.DenseLayer(net, 512, W=init.HeNormal(gain=INIT_GAIN), nonlinearity=NONLINEARITY))
    net = l.DropoutLayer(net, DROPOUT)

    #Classification Layer
    if MULTI_LABEL:
        net = l.DenseLayer(net, NUM_CLASSES, nonlinearity=nonlinearities.sigmoid, W=init.HeNormal(gain=1))
    else:
        net = l.DenseLayer(net, NUM_CLASSES, nonlinearity=nonlinearities.softmax, W=init.HeNormal(gain=1))

    print "...DONE!"

    #model stats
    print "MODEL HAS", (sum(hasattr(layer, 'W') for layer in l.get_all_layers(net))), "WEIGHTED LAYERS"
    print "MODEL HAS", l.count_params(net), "PARAMS"

    return net
Esempio n. 3
0
    def build_instrument_model(self, n_vars, **kwargs):

        targets = TT.vector()
        instrument_vars = TT.matrix()

        instruments = layers.InputLayer((None, n_vars), instrument_vars)
        instruments = layers.DropoutLayer(instruments, p=0.2)

        dense_layer = layers.DenseLayer(instruments,
                                        kwargs['dense_size'],
                                        nonlinearity=nonlinearities.tanh)
        dense_layer = layers.DropoutLayer(dense_layer, p=0.2)

        for _ in xrange(kwargs['n_dense_layers'] - 1):
            dense_layer = layers.DenseLayer(dense_layer,
                                            kwargs['dense_size'],
                                            nonlinearity=nonlinearities.tanh)
            dense_layer = layers.DropoutLayer(dense_layer, p=0.5)

        self.instrument_output = layers.DenseLayer(
            dense_layer, 1, nonlinearity=nonlinearities.linear)
        init_params = layers.get_all_param_values(self.instrument_output)
        prediction = layers.get_output(self.instrument_output,
                                       deterministic=False)
        test_prediction = layers.get_output(self.instrument_output,
                                            deterministic=True)

        # flexible here, endog variable can be categorical, continuous, etc.
        l2_cost = regularization.regularize_network_params(
            self.instrument_output, regularization.l2)
        loss = objectives.squared_error(
            prediction.flatten(), targets.flatten()).mean() + 1e-4 * l2_cost
        loss_total = objectives.squared_error(prediction.flatten(),
                                              targets.flatten()).mean()

        params = layers.get_all_params(self.instrument_output, trainable=True)
        param_updates = updates.adadelta(loss, params)

        self._instrument_train_fn = theano.function([
            targets,
            instrument_vars,
        ],
                                                    loss,
                                                    updates=param_updates)

        self._instrument_loss_fn = theano.function([
            targets,
            instrument_vars,
        ], loss_total)

        self._instrument_output_fn = theano.function([instrument_vars],
                                                     test_prediction)

        return init_params
    def __init__(self, x, y, args):
        self.params_theta = []
        self.params_lambda = []
        self.params_weight = []
        if args.dataset == 'mnist':
            input_size = (None, 1, 28, 28)
        elif args.dataset == 'cifar10':
            input_size = (None, 3, 32, 32)
        else:
            raise AssertionError
        layers = [ll.InputLayer(input_size)]
        self.penalty = theano.shared(np.array(0.))

        #conv1
        layers.append(Conv2DLayerWithReg(args, layers[-1], 20, 5))
        self.add_params_to_self(args, layers[-1])
        layers.append(ll.MaxPool2DLayer(layers[-1], pool_size=2, stride=2))
        #conv1
        layers.append(Conv2DLayerWithReg(args, layers[-1], 50, 5))
        self.add_params_to_self(args, layers[-1])
        layers.append(ll.MaxPool2DLayer(layers[-1], pool_size=2, stride=2))

        # Michael: add dropout
        layers.append(ll.DropoutLayer(layers[-1]))  # Michael
        #fc1
        layers.append(DenseLayerWithReg(args, layers[-1], num_units=500))
        self.add_params_to_self(args, layers[-1])
        layers.append(ll.DropoutLayer(layers[-1]))  # Michael
        #softmax
        layers.append(
            DenseLayerWithReg(args,
                              layers[-1],
                              num_units=10,
                              nonlinearity=nonlinearities.softmax))
        self.add_params_to_self(args, layers[-1])
        # no dropout on output

        self.layers = layers
        self.y = ll.get_output(layers[-1], x, deterministic=False)
        self.prediction = T.argmax(self.y, axis=1)
        # self.penalty = penalty if penalty != 0. else T.constant(0.)
        print(self.params_lambda)
        # time.sleep(20)
        # cost function
        self.loss = T.mean(categorical_crossentropy(self.y, y))
        self.lossWithPenalty = T.add(self.loss, self.penalty)
        print("loss and losswithpenalty", type(self.loss),
              type(self.lossWithPenalty))


# Michael: wide resnet: https://gist.github.com/FlorianMuellerklein/3d9ba175038a3f2e7de3794fa303f1ee
# https://github.com/FlorianMuellerklein/Identity-Mapping-ResNet-Lasagne/blob/master/models.py
Esempio n. 5
0
 def _buildDense(self):
     layer = layers.InputLayer(shape=(None, 3, 32, 32), input_var=self.X)
     layer = layers.DropoutLayer(layer, p=0.2)
     layer = maxoutDense(layer, num_units=800 * 5, ds=5)
     layer = layers.DropoutLayer(layer, p=0.5)
     layer = maxoutDense(layer, num_units=800 * 5, ds=5)
     layer = layers.DropoutLayer(layer, p=0.5)
     layer = layers.DenseLayer(layer,
                               num_units=10,
                               W=init.GlorotUniform(),
                               b=init.Constant(0.),
                               nonlinearity=nonlinearities.softmax)
     return layer
Esempio n. 6
0
    def get_discriminator(self):
        ''' specify discriminator D0 '''
        """
        disc0_layers = [LL.InputLayer(shape=(self.args.batch_size, 3, 32, 32))]
        disc0_layers.append(LL.GaussianNoiseLayer(disc0_layers[-1], sigma=0.05))
        disc0_layers.append(dnn.Conv2DDNNLayer(disc0_layers[-1], 96, (3,3), pad=1, W=Normal(0.02), nonlinearity=nn.lrelu))
        disc0_layers.append(nn.batch_norm(dnn.Conv2DDNNLayer(disc0_layers[-1], 96, (3,3), pad=1, stride=2, W=Normal(0.02), nonlinearity=nn.lrelu))) # 16x16
        disc0_layers.append(LL.DropoutLayer(disc0_layers[-1], p=0.1))
        disc0_layers.append(nn.batch_norm(dnn.Conv2DDNNLayer(disc0_layers[-1], 192, (3,3), pad=1, W=Normal(0.02), nonlinearity=nn.lrelu)))
        disc0_layers.append(nn.batch_norm(dnn.Conv2DDNNLayer(disc0_layers[-1], 192, (3,3), pad=1, stride=2, W=Normal(0.02), nonlinearity=nn.lrelu))) # 8x8
        disc0_layers.append(LL.DropoutLayer(disc0_layers[-1], p=0.1))
        disc0_layers.append(nn.batch_norm(dnn.Conv2DDNNLayer(disc0_layers[-1], 192, (3,3), pad=0, W=Normal(0.02), nonlinearity=nn.lrelu))) # 6x6
        disc0_layer_shared = LL.NINLayer(disc0_layers[-1], num_units=192, W=Normal(0.02), nonlinearity=nn.lrelu) # 6x6
        disc0_layers.append(disc0_layer_shared)

        disc0_layer_z_recon = LL.DenseLayer(disc0_layer_shared, num_units=50, W=Normal(0.02), nonlinearity=None)
        disc0_layers.append(disc0_layer_z_recon) # also need to recover z from x

        disc0_layers.append(LL.GlobalPoolLayer(disc0_layer_shared))
        disc0_layer_adv = LL.DenseLayer(disc0_layers[-1], num_units=10, W=Normal(0.02), nonlinearity=None)
        disc0_layers.append(disc0_layer_adv)

        return disc0_layers, disc0_layer_adv, disc0_layer_z_recon
        """
        disc_x_layers = [LL.InputLayer(shape=(None, 3, 32, 32))]
        disc_x_layers.append(LL.GaussianNoiseLayer(disc_x_layers[-1], sigma=0.2))
        disc_x_layers.append(dnn.Conv2DDNNLayer(disc_x_layers[-1], 96, (3,3), pad=1, W=Normal(0.01), nonlinearity=nn.lrelu))
        disc_x_layers.append(nn.batch_norm(dnn.Conv2DDNNLayer(disc_x_layers[-1], 96, (3,3), pad=1, stride=2, W=Normal(0.01), nonlinearity=nn.lrelu)))
        disc_x_layers.append(LL.DropoutLayer(disc_x_layers[-1], p=0.5))
        disc_x_layers.append(nn.batch_norm(dnn.Conv2DDNNLayer(disc_x_layers[-1], 192, (3,3), pad=1, W=Normal(0.01), nonlinearity=nn.lrelu)))
        disc_x_layers.append(nn.batch_norm(dnn.Conv2DDNNLayer(disc_x_layers[-1], 192, (3,3), pad=1, stride=2, W=Normal(0.01), nonlinearity=nn.lrelu)))
        disc_x_layers.append(LL.DropoutLayer(disc_x_layers[-1], p=0.5))
        disc_x_layers.append(nn.batch_norm(dnn.Conv2DDNNLayer(disc_x_layers[-1], 192, (3,3), pad=0, W=Normal(0.01), nonlinearity=nn.lrelu)))
        disc_x_layers_shared = LL.NINLayer(disc_x_layers[-1], num_units=192, W=Normal(0.01), nonlinearity=nn.lrelu)
        disc_x_layers.append(disc_x_layers_shared)

        disc_x_layer_z_recon = LL.DenseLayer(disc_x_layers_shared, num_units=self.args.z0dim, nonlinearity=None)
        disc_x_layers.append(disc_x_layer_z_recon) # also need to recover z from x

        # disc_x_layers.append(nn.MinibatchLayer(disc_x_layers_shared, num_kernels=100))
        disc_x_layers.append(LL.GlobalPoolLayer(disc_x_layers_shared))
        disc_x_layer_adv = LL.DenseLayer(disc_x_layers[-1], num_units=10, W=Normal(0.01), nonlinearity=None)
        disc_x_layers.append(disc_x_layer_adv)

        #output_before_softmax_x = LL.get_output(disc_x_layer_adv, x, deterministic=False)
        #output_before_softmax_gen = LL.get_output(disc_x_layer_adv, gen_x, deterministic=False)

        # temp = LL.get_output(gen_x_layers[-1], deterministic=False, init=True)
        # temp = LL.get_output(disc_x_layers[-1], x, deterministic=False, init=True)
        # init_updates = [u for l in LL.get_all_layers(gen_x_layers)+LL.get_all_layers(disc_x_layers) for u in getattr(l,'init_updates',[])]
        return disc_x_layers, disc_x_layer_adv, disc_x_layer_z_recon
def enc_net(_incoming, output_channels, drop_rate=0.3, nonlinearity=None):
    #    #_noise = L.GaussianNoiseLayer(_incoming, sigma=0.1)
    _drop1 = L.DropoutLayer(_incoming, p=drop_rate, rescale=True)
    _fc1 = L.DenseLayer(_drop1,
                        4 * output_channels,
                        W=I.Normal(0.02),
                        b=I.Constant(0.1),
                        nonlinearity=NL.rectify)
    _drop2 = L.DropoutLayer(_fc1, p=drop_rate, rescale=True)
    _fc2 = L.DenseLayer(_drop2,
                        output_channels,
                        W=I.Normal(0.02),
                        b=I.Constant(0.1),
                        nonlinearity=nonlinearity)
    return _fc2
def ptb_lstm(input_var, vocabulary_size, hidden_size, seq_len, num_layers,
             dropout, batch_size):
    l_input = L.InputLayer(shape=(batch_size, seq_len), input_var=input_var)
    l_embed = L.EmbeddingLayer(l_input,
                               vocabulary_size,
                               hidden_size,
                               W=init.Uniform(1.0))
    l_lstms = []
    for i in range(num_layers):
        l_lstm = L.LSTMLayer(l_embed if i == 0 else l_lstms[-1],
                             hidden_size,
                             ingate=L.Gate(W_in=init.GlorotUniform(),
                                           W_hid=init.Orthogonal()),
                             forgetgate=L.Gate(W_in=init.GlorotUniform(),
                                               W_hid=init.Orthogonal(),
                                               b=init.Constant(1.0)),
                             cell=L.Gate(
                                 W_in=init.GlorotUniform(),
                                 W_hid=init.Orthogonal(),
                                 W_cell=None,
                                 nonlinearity=lasagne.nonlinearities.tanh),
                             outgate=L.Gate(W_in=init.GlorotUniform(),
                                            W_hid=init.Orthogonal()))
        l_lstms.append(l_lstm)
    l_drop = L.DropoutLayer(l_lstms[-1], dropout)
    l_out = L.DenseLayer(l_drop, num_units=vocabulary_size, num_leading_axes=2)
    l_out = L.ReshapeLayer(
        l_out,
        (l_out.output_shape[0] * l_out.output_shape[1], l_out.output_shape[2]))
    l_out = L.NonlinearityLayer(l_out,
                                nonlinearity=lasagne.nonlinearities.softmax)
    return l_out
Esempio n. 9
0
def getNet2():
  inputLayer = layers.InputLayer(shape=(None, 1, imageShape[0], imageShape[1]))
  loc1Layer = layers.Conv2DLayer(inputLayer, num_filters=32, filter_size=(3,3), W=GlorotUniform('relu'), nonlinearity=rectify)
  loc2Layer = layers.MaxPool2DLayer(loc1Layer, pool_size=(2,2))
  loc3Layer = layers.Conv2DLayer(loc2Layer, num_filters=64, filter_size=(4,3), W=GlorotUniform('relu'), nonlinearity=rectify)
  loc4Layer = layers.MaxPool2DLayer(loc3Layer, pool_size=(2,2))
  loc5Layer = layers.Conv2DLayer(loc4Layer, num_filters=128, filter_size=(3,3), W=GlorotUniform('relu'), nonlinearity=rectify)
  loc6Layer = layers.MaxPool2DLayer(loc5Layer, pool_size=(2,2))
  loc7Layer = layers.Conv2DLayer(loc6Layer, num_filters=256, filter_size=(3,2), W=GlorotUniform('relu'), nonlinearity=rectify)
  #loc7Layer = layers.DenseLayer(loc5Layer, num_units=1024, nonlinearity=rectify)
  loc8Layer = layers.DenseLayer(loc7Layer, num_units=256, W=GlorotUniform('relu'), nonlinearity=rectify)
  loc9Layer = layers.DenseLayer(loc8Layer, num_units=128, W=GlorotUniform('relu'), nonlinearity=rectify)
  loc10Layer = layers.DenseLayer(loc9Layer, num_units=64, W=GlorotUniform('relu'), nonlinearity=rectify)
  #loc11Layer = layers.DenseLayer(loc10Layer, num_units=32, nonlinearity=tanh)
  #loc12Layer = layers.DenseLayer(loc11Layer, num_units=16, nonlinearity=tanh)
  locOutLayer = layers.DenseLayer(loc10Layer, num_units=6, W=GlorotUniform(1.0), nonlinearity=identity)

  transformLayer = layers.TransformerLayer(inputLayer, locOutLayer, downsample_factor=1.0)

  conv1Layer = layers.Conv2DLayer(transformLayer, num_filters=32, filter_size=(3,3), W=GlorotNormal('relu'), nonlinearity=rectify)
  pool1Layer = layers.MaxPool2DLayer(conv1Layer, pool_size=(2,2))
  conv2Layer = layers.Conv2DLayer(pool1Layer, num_filters=64, filter_size=(4,3), W=GlorotUniform('relu'), nonlinearity=rectify)
  pool2Layer = layers.MaxPool2DLayer(conv2Layer, pool_size=(2,2))
  conv3Layer = layers.Conv2DLayer(pool2Layer, num_filters=128, filter_size=(3,3), W=GlorotUniform('relu'), nonlinearity=rectify)
  pool3Layer = layers.MaxPool2DLayer(conv3Layer, pool_size=(2,2))
  conv4Layer = layers.Conv2DLayer(pool3Layer, num_filters=256, filter_size=(3,2), W=GlorotNormal('relu'), nonlinearity=rectify)
  hidden1Layer = layers.DenseLayer(conv4Layer, num_units=1024, W=GlorotUniform('relu'), nonlinearity=rectify)
  dropout1Layer = layers.DropoutLayer(hidden1Layer, p=0.5)
  hidden2Layer = layers.DenseLayer(dropout1Layer, num_units=512, W=GlorotUniform('relu'), nonlinearity=rectify)
  #hidden3Layer = layers.DenseLayer(hidden2Layer, num_units=256, nonlinearity=tanh)
  outputLayer = layers.DenseLayer(hidden2Layer, num_units=10, W=GlorotUniform(1.0), nonlinearity=softmax)
  return outputLayer
Esempio n. 10
0
def discriminator(input_var, Y):
    yb = Y.dimshuffle(0, 1, 'x', 'x')

    D_1 = lasagne.layers.InputLayer(shape=(None, 1, 28, 28),
                                    input_var=input_var)
    D_2 = lasagne.layers.InputLayer(shape=(None, 10), input_var=Y)
    network = D_1
    network_yb = D_2
    network = CondConvConcatLayer([network, network_yb])

    network = ll.DropoutLayer(network, p=0.4)

    network = conv_layer(network, 3, 32, 1, 'same', nonlinearity=lrelu)
    network = CondConvConcatLayer([network, network_yb])

    network = conv_layer(network, 3, 64, 2, 'same', nonlinearity=lrelu)
    network = CondConvConcatLayer([network, network_yb])

    network = conv_layer(network, 3, 64, 2, 'same', nonlinearity=lrelu)
    #network = batch_norm(conv_layer(network, 3, 128, 1, 'same', nonlinearity=lrelu))
    #network = ll.DropoutLayer(network, p=0.2)

    network = conv_layer(network, 3, 128, 2, 'same', nonlinearity=lrelu)
    network = CondConvConcatLayer([network, network_yb])

    network = batch_norm(
        conv_layer(network, 4, 128, 1, 'valid', nonlinearity=lrelu))
    network = CondConvConcatLayer([network, network_yb])

    #network= DropoutLayer(network, p=0.5)
    network = conv_layer(network, 1, 1, 1, 'valid', nonlinearity=None)

    return network, D_1, D_2
Esempio n. 11
0
def getNet9():
  inputLayer = layers.InputLayer(shape=(None, 1, imageShape[0], imageShape[1]))
  conv1Layer = layers.Conv2DLayer(inputLayer, num_filters=32, filter_size=(5,3), W=GlorotNormal('relu'), nonlinearity=rectify)
  pool1Layer = layers.MaxPool2DLayer(conv1Layer, pool_size=(2,2))
  conv2Layer = layers.Conv2DLayer(pool1Layer, num_filters=64, filter_size=(5,4), W=GlorotNormal('relu'), nonlinearity=rectify)
  pool2Layer = layers.MaxPool2DLayer(conv2Layer, pool_size=(2,2))
  conv3Layer = layers.Conv2DLayer(pool2Layer, num_filters=128, filter_size=(4,4), W=GlorotNormal('relu'), nonlinearity=rectify)
  pool3Layer = layers.MaxPool2DLayer(conv3Layer, pool_size=(2,2))
  conv4Layer = layers.Conv2DLayer(pool3Layer, num_filters=256, filter_size=(4,4), W=GlorotNormal('relu'), nonlinearity=rectify)
  hidden1Layer = layers.DenseLayer(conv4Layer, num_units=2048, W=GlorotNormal('relu'), nonlinearity=rectify)
  dropout1Layer = layers.DropoutLayer(hidden1Layer, p=0.5)
  hidden2Layer = layers.DenseLayer(dropout1Layer, num_units=1024, W=GlorotNormal('relu'), nonlinearity=rectify)
  dropout2Layer = layers.DropoutLayer(hidden2Layer, p=0.5)
  hidden3Layer = layers.DenseLayer(dropout2Layer, num_units=512, W=GlorotNormal('relu'), nonlinearity=rectify)
  outputLayer = layers.DenseLayer(hidden3Layer, num_units=10, W=GlorotNormal(1.0), nonlinearity=softmax)
  return outputLayer
Esempio n. 12
0
def getNet4():
    inputLayer = layers.InputLayer(shape=(None, 1, imageShape[0],
                                          imageShape[1]))  #120x120
    conv1Layer = layers.Conv2DLayer(inputLayer,
                                    num_filters=32,
                                    filter_size=(5, 5),
                                    nonlinearity=elu)  #116x116
    pool1Layer = layers.MaxPool2DLayer(conv1Layer, pool_size=(2, 2))  #58x58
    dropout1Layer = layers.DropoutLayer(pool1Layer, p=0.5)
    conv2Layer = layers.Conv2DLayer(dropout1Layer,
                                    num_filters=64,
                                    filter_size=(5, 5),
                                    nonlinearity=tanh)  #54x54
    pool2Layer = layers.MaxPool2DLayer(conv2Layer, pool_size=(2, 2))  #27x27
    dropout2Layer = layers.DropoutLayer(pool2Layer, p=0.5)
    conv3Layer = layers.Conv2DLayer(dropout2Layer,
                                    num_filters=128,
                                    filter_size=(4, 4),
                                    nonlinearity=tanh)  #24x24
    pool3Layer = layers.MaxPool2DLayer(conv3Layer, pool_size=(2, 2))  #12x12
    dropout3Layer = layers.DropoutLayer(pool3Layer, p=0.5)
    conv4Layer = layers.Conv2DLayer(dropout3Layer,
                                    num_filters=256,
                                    filter_size=(3, 3),
                                    nonlinearity=elu)  #10x10
    pool4Layer = layers.MaxPool2DLayer(conv4Layer, pool_size=(2, 2))  #5x5
    dropout4Layer = layers.DropoutLayer(pool4Layer, p=0.5)
    conv5Layer = layers.Conv2DLayer(dropout4Layer,
                                    num_filters=512,
                                    filter_size=(4, 4),
                                    nonlinearity=tanh)  #2x2
    hidden1Layer = layers.DenseLayer(conv5Layer,
                                     num_units=2048,
                                     nonlinearity=tanh)
    hidden2Layer = layers.DenseLayer(hidden1Layer,
                                     num_units=1024,
                                     nonlinearity=elu)
    hidden3Layer = layers.DenseLayer(hidden2Layer,
                                     num_units=512,
                                     nonlinearity=tanh)
    hidden4Layer = layers.DenseLayer(hidden3Layer,
                                     num_units=256,
                                     nonlinearity=tanh)
    outputLayer = layers.DenseLayer(hidden4Layer,
                                    num_units=10,
                                    nonlinearity=softmax)
    return outputLayer
Esempio n. 13
0
    def __init__(self, dims, dropouts=None, input_var=None):
        assert len(dims) >= 3, 'Not enough dimmensions'
        if dropouts != None:
            assert len(dropouts) == len(dims) - 1
        else:
            dropouts = [0] * (len(dims) - 1)
        self.input_var = input_var
        if input_var == None:
            self.input_var = T.matrix('inputs')
        self.target_var = T.ivector('targets')

        # input layer
        network = layers.InputLayer((None, dims[0]), input_var=self.input_var)
        if dropouts[0]:
            network = layers.DropoutLayer(network, p=dropouts[0])
        # hidden layers
        for dim, dropout in zip(dims[1:-1], dropouts[1:]):
            network = layers.DenseLayer(network, num_units=dim,
                                        W=lasagne.init.GlorotUniform())
            if dropout:
                network = layers.DropoutLayer(network, p=dropout)
        # output layer
        network = layers.DenseLayer(network, num_units=dims[-1],
                                  nonlinearity=nl.softmax)
        self.network = network

        # util functions, completely stolen from Lasagne example
        self.prediction = layers.get_output(network)
        self.loss = lasagne.objectives.categorical_crossentropy(
            self.prediction, self.target_var).mean()
        self.params = layers.get_all_params(network, trainable=True)
        self.updates = lasagne.updates.nesterov_momentum(
            self.loss, self.params, learning_rate=0.01, momentum=0.9)
        # if non-determnistic:
        self.test_prediction = layers.get_output(network, deterministic=True)
        self.test_loss = lasagne.objectives.categorical_crossentropy(
            self.test_prediction, self.target_var).mean()
        self.test_acc = T.mean(
            T.eq(T.argmax(self.test_prediction, axis=1), self.target_var),
            dtype=theano.config.floatX)

        self.train_fn = theano.function([self.input_var, self.target_var],
                                        self.loss, updates=self.updates)
        self.val_fn = theano.function([self.input_var, self.target_var],
                                      [self.test_loss, self.test_acc])
        self.eval_fn = theano.function([self.input_var], [self.test_prediction])
        self.acc_fn = theano.function([self.input_var, self.target_var], self.test_acc)
def makeRNN(xInputRNN, hiddenInitRNN, hidden2InitRNN, sequenceLen, vocabularySize, neuralNetworkSz):

	input_Layer = L.InputLayer(input_var = xInputRNN, shape = (None, sequenceLen))
	hidden_Layer = L.InputLayer(input_var = hiddenInitRNN, shape = (None, neuralNetworkSz))
	hidden_Layer2 = L.InputLayer(input_var = hidden2InitRNN, shape = (None, neuralNetworkSz))
	input_Layer = L.EmbeddingLayer(input_Layer, input_size = vocabularySize, output_size = neuralNetworkSz)

	RNN_Layer = L.LSTMLayer(input_Layer, num_units = neuralNetworkSz, hid_init = hidden_Layer)
	h = L.DropoutLayer(RNN_Layer, p = dropOutProbability)
	RNN_Layer2 = L.LSTMLayer(h, num_units = neuralNetworkSz, hid_init = hidden_Layer2)
	h = L.DropoutLayer(RNN_Layer2, p = dropOutProbability)

	layerShape = L.ReshapeLayer(h, (-1, neuralNetworkSz))
	
	predictions = NCE(layerShape, num_units = vocabularySize, Z = Z)
	predictions = L.ReshapeLayer(predictions, (-1, sequenceLen, vocabularySize))
	return RNN_Layer, RNN_Layer2, predictions
Esempio n. 15
0
def simpleConv(input_var=None, num_units=32):

    network = layers.InputLayer(shape=(None, input_n_channel, input_height,
                                       input_width),
                                input_var=input_var)

    network = layers.Conv2DLayer(network,
                                 num_filters=num_units,
                                 filter_size=(9, 9))
    network = layers.MaxPool2DLayer(network, pool_size=(2, 2))
    network = layers.Conv2DLayer(network,
                                 num_filters=num_units,
                                 filter_size=(9, 9))
    network = layers.MaxPool2DLayer(network, pool_size=(2, 2))
    network = layers.Conv2DLayer(network,
                                 num_filters=1000,
                                 filter_size=(10, 10))

    network = layers.DenseLayer(layers.DropoutLayer(network, p=0.2),
                                num_units=1000)
    network = layers.DenseLayer(layers.DropoutLayer(network, p=0.5),
                                num_units=1000)

    network = layers.ReshapeLayer(network,
                                  shape=(input_var.shape[0], 1000, 1, 1))
    '''
	network = layers.TransposedConv2DLayer(network, num_filters=num_units, filter_size=(4,4))
	network = layers.Upscale2DLayer(network, 2)
	network = layers.TransposedConv2DLayer(network, num_filters=num_units, filter_size=(5,5))
	network = layers.Upscale2DLayer(network, 2)
	network = layers.TransposedConv2DLayer(network, num_filters=3, filter_size=(9,9))
	'''
    network = layers.TransposedConv2DLayer(network,
                                           num_filters=num_units,
                                           filter_size=(8, 8))
    network = layers.TransposedConv2DLayer(network,
                                           num_filters=num_units,
                                           filter_size=(9, 9))
    network = layers.TransposedConv2DLayer(network,
                                           num_filters=num_units,
                                           filter_size=(9, 9))
    network = layers.TransposedConv2DLayer(network,
                                           num_filters=3,
                                           filter_size=(9, 9))

    return network
Esempio n. 16
0
def _build(X):
    layer = layers.InputLayer(shape=(None, 1, 28, 28), input_var=X)
    layer = layers.Conv2DLayer(layer,
                               num_filters=32,
                               filter_size=(5, 5),
                               stride=(1, 1),
                               pad='same',
                               untie_biases=False,
                               W=init.GlorotUniform(),
                               b=init.Constant(0.),
                               nonlinearity=nonlinearities.rectify)
    visual1 = layers.get_output(layer)
    layer = layers.MaxPool2DLayer(layer,
                                  pool_size=(2, 2),
                                  stride=None,
                                  pad=(0, 0),
                                  ignore_border=False)
    layer = layers.Conv2DLayer(layer,
                               num_filters=32,
                               filter_size=(5, 5),
                               stride=(1, 1),
                               pad='same',
                               untie_biases=False,
                               W=init.GlorotUniform(),
                               b=init.Constant(0.),
                               nonlinearity=nonlinearities.rectify)
    visual2 = layers.get_output(layer)
    layer = layers.MaxPool2DLayer(layer,
                                  pool_size=(2, 2),
                                  stride=None,
                                  pad=(0, 0),
                                  ignore_border=False)
    layer = layers.flatten(layer, outdim=2)
    layer = layers.DropoutLayer(layer, p=0.5)
    layer = layers.DenseLayer(layer,
                              num_units=256,
                              W=init.GlorotUniform(),
                              b=init.Constant(0.),
                              nonlinearity=nonlinearities.rectify)
    layer = layers.DropoutLayer(layer, p=0.5)
    layer = layers.DenseLayer(layer,
                              num_units=10,
                              W=init.GlorotUniform(),
                              b=init.Constant(0.),
                              nonlinearity=nonlinearities.softmax)
    return layer, visual1, visual2
Esempio n. 17
0
def getNet5():
  inputLayer = layers.InputLayer(shape=(None, 1, imageShape[0], imageShape[1])) #120x120
  conv1Layer = layers.Conv2DLayer(inputLayer, num_filters=32, filter_size=(4,4), nonlinearity=elu) #117x117
  pool1Layer = layers.MaxPool2DLayer(conv1Layer, pool_size=(3,3)) #39x39
  conv2Layer = layers.Conv2DLayer(pool1Layer, num_filters=64, filter_size=(4,4), nonlinearity=tanh) #36x36
  pool2Layer = layers.MaxPool2DLayer(conv2Layer, pool_size=(2,2)) #18x18
  conv3Layer = layers.Conv2DLayer(pool2Layer, num_filters=128, filter_size=(4,4), nonlinearity=sigmoid) #15x15
  pool3Layer = layers.MaxPool2DLayer(conv3Layer, pool_size=(3,3)) #5x5
  conv4Layer = layers.Conv2DLayer(pool3Layer, num_filters=256, filter_size=(4,4), nonlinearity=tanh) #2x2
  hidden1Layer = layers.DenseLayer(conv4Layer, num_units=1024, nonlinearity=elu)
  dropout1Layer = layers.DropoutLayer(hidden1Layer, p=0.5)
  hidden2Layer = layers.DenseLayer(dropout1Layer, num_units=512, nonlinearity=tanh)
  dropout2Layer = layers.DropoutLayer(hidden2Layer, p=0.5)
  hidden3Layer = layers.DenseLayer(dropout2Layer, num_units=256, nonlinearity=tanh)
  dropout3Layer = layers.DropoutLayer(hidden3Layer, p=0.5)
  hidden4Layer = layers.DenseLayer(dropout3Layer, num_units=128, nonlinearity=tanh)
  outputLayer = layers.DenseLayer(hidden4Layer, num_units=10, nonlinearity=softmax)
  return outputLayer
Esempio n. 18
0
    def __build_24_net__(self):

        network = layers.InputLayer((None, 3, 24, 24),
                                    input_var=self.__input_var__)
        network = layers.dropout(network, p=0.1)
        network = layers.Conv2DLayer(network,
                                     num_filters=64,
                                     filter_size=(5, 5),
                                     stride=1,
                                     nonlinearity=relu)
        network = layers.batch_norm(network)
        network = layers.MaxPool2DLayer(network, pool_size=(3, 3), stride=2)
        network = layers.DropoutLayer(network, p=0.5)
        network = layers.batch_norm(network)
        network = layers.DenseLayer(network, num_units=64, nonlinearity=relu)
        network = layers.DropoutLayer(network, p=0.5)
        network = layers.DenseLayer(network, num_units=2, nonlinearity=softmax)
        return network
Esempio n. 19
0
def OneLayerMLP(batchsize, input_var=None):

    network = layers.InputLayer(shape=(None, input_n_channel, input_height,
                                       input_width),
                                input_var=input_var)
    network = layers.DropoutLayer(network, p=0.2)
    network = layers.DenseLayer(network,
                                num_units=15000,
                                nonlinearity=lasagne.nonlinearities.rectify)
    network = layers.DropoutLayer(network, p=0.5)
    network = layers.DenseLayer(network,
                                num_units=output_n_channel * output_height *
                                output_width,
                                nonlinearity=lasagne.nonlinearities.rectify)
    network = layers.ReshapeLayer(network,
                                  shape=(batchsize, output_n_channel,
                                         output_height, output_width))

    return network
Esempio n. 20
0
def getNet3():
  inputLayer = layers.InputLayer(shape=(None, 1, imageShape[0], imageShape[1]))
  conv1Layer = layers.Conv2DLayer(inputLayer, num_filters=32, filter_size=(3,3), nonlinearity=elu)
  pool1Layer = layers.MaxPool2DLayer(conv1Layer, pool_size=(2,2))
  dropout1Layer = layers.DropoutLayer(pool1Layer, p=0.2)
  conv2Layer = layers.Conv2DLayer(dropout1Layer, num_filters=64, filter_size=(4,3), nonlinearity=tanh)
  pool2Layer = layers.MaxPool2DLayer(conv2Layer, pool_size=(2,2))
  dropout2Layer = layers.DropoutLayer(pool2Layer, p=0.2)
  conv3Layer = layers.Conv2DLayer(dropout2Layer, num_filters=128, filter_size=(3,3), nonlinearity=tanh)
  pool3Layer = layers.MaxPool2DLayer(conv3Layer, pool_size=(2,2))
  dropout3Layer = layers.DropoutLayer(pool3Layer, p=0.2)
  conv4Layer = layers.Conv2DLayer(dropout3Layer, num_filters=256, filter_size=(3,2), nonlinearity=elu)
  hidden1Layer = layers.DenseLayer(conv4Layer, num_units=1024, nonlinearity=elu)
  hidden2Layer = layers.DenseLayer(hidden1Layer, num_units=512, nonlinearity=tanh)
  hidden3Layer = layers.DenseLayer(hidden2Layer, num_units=256, nonlinearity=tanh)
  #hidden4Layer = layers.DenseLayer(hidden3Layer, num_units=256, nonlinearity=elu)
  #hidden5Layer = layers.DenseLayer(hidden4Layer, num_units=128, nonlinearity=tanh)
  outputLayer = layers.DenseLayer(hidden3Layer, num_units=10, nonlinearity=softmax)
  return outputLayer
Esempio n. 21
0
def build_model(input_var):
    layer = layers.InputLayer(shape=(None, 3, 224, 224), input_var=input_var)
    layer = layers.Conv2DLayer(layer, num_filters=64, filter_size=(3, 3), stride=(1, 1), pad='same')
    layer = layers.MaxPool2DLayer(layer, pool_size=(3, 3), stride=(2, 2), pad=(0, 0), ignore_border=False)
    layer = layers.Conv2DLayer(layer, num_filters=128, filter_size=(3, 3), stride=(1, 1), pad='same')
    layer = layers.MaxPool2DLayer(layer, pool_size=(3, 3), stride=(2, 2), pad=(0, 0), ignore_border=False)
    layer = layers.Conv2DLayer(layer, num_filters=256, filter_size=(3, 3), stride=(1, 1), pad='same')
    layer = layers.MaxPool2DLayer(layer, pool_size=(3, 3), stride=(2, 2), pad=(0, 0), ignore_border=False)
    layer = layers.Conv2DLayer(layer, num_filters=512, filter_size=(3, 3), stride=(1, 1), pad='same')
    layer = layers.MaxPool2DLayer(layer, pool_size=(3, 3), stride=(2, 2), pad=(0, 0), ignore_border=False)
    layer = layers.Conv2DLayer(layer, num_filters=512, filter_size=(3, 3), stride=(1, 1), pad='same')
    layer = layers.MaxPool2DLayer(layer, pool_size=(3, 3), stride=(2, 2), pad=(0, 0), ignore_border=False)
    layer = layers.flatten(layer, outdim=2)
    layer = layers.DenseLayer(layer, num_units=4096, nonlinearity=nonlinearities.rectify)
    layer = layers.DropoutLayer(layer, p=0.5)
    layer = layers.DenseLayer(layer, num_units=4096, nonlinearity=nonlinearities.rectify)
    layer = layers.DropoutLayer(layer, p=0.5)
    layer = layers.DenseLayer(layer, num_units=2, nonlinearity=nonlinearities.softmax)
    return layer
Esempio n. 22
0
 def _build(self):
     layer = layers.InputLayer(shape=(None, 3, 32, 32), input_var=self.X)
     layer = nin(layer,
                 conv_filters=192,
                 filter_size=(5, 5),
                 pad=2,
                 cccp1_filters=160,
                 cccp2_filters=96)
     layer = layers.Pool2DLayer(layer,
                                pool_size=(3, 3),
                                stride=2,
                                pad=(0, 0),
                                ignore_border=False,
                                mode='max')
     layer = layers.DropoutLayer(layer, p=0.5)
     layer = nin(layer,
                 conv_filters=192,
                 filter_size=(5, 5),
                 pad=2,
                 cccp1_filters=192,
                 cccp2_filters=192)
     layer = layers.Pool2DLayer(layer,
                                pool_size=(3, 3),
                                stride=2,
                                ignore_border=False,
                                mode='average_exc_pad')
     layer = layers.DropoutLayer(layer, p=0.5)
     layer = nin(layer,
                 conv_filters=192,
                 filter_size=(3, 3),
                 pad=1,
                 cccp1_filters=192,
                 cccp2_filters=10)
     layer = layers.Pool2DLayer(layer,
                                pool_size=(8, 8),
                                stride=1,
                                ignore_border=False,
                                mode='average_exc_pad')
     layer = layers.flatten(layer, outdim=2)
     layer = layers.NonlinearityLayer(layer,
                                      nonlinearity=nonlinearities.softmax)
     return layer
Esempio n. 23
0
 def _build(self):
     layer = layers.InputLayer(shape=(None, 3, 112, 112), input_var=self.X)
     layer = layers.Conv2DLayer(layer, num_filters=64, filter_size=(5, 5), stride=(1, 1), pad='same',
                                untie_biases=False, W=init.GlorotUniform(), b=init.Constant(0.),
                                nonlinearity=nonlinearities.rectify)
     layer = layers.MaxPool2DLayer(layer, pool_size=(2, 2), stride=None, pad=(0, 0), ignore_border=False)
     layer = layers.Conv2DLayer(layer, num_filters=64, filter_size=(5, 5), stride=(1, 1), pad='same',
                                untie_biases=False, W=init.GlorotUniform(), b=init.Constant(0.),
                                nonlinearity=nonlinearities.rectify)
     layer = layers.MaxPool2DLayer(layer, pool_size=(8, 8), stride=None, pad=(0, 0), ignore_border=False)
     layer = layers.flatten(layer, outdim=2)  # 不加入展开层也可以,DenseLayer自动展开
     layer = layers.DropoutLayer(layer, p=0.5)
     layer = layers.DenseLayer(layer, num_units=2048,
                               W=init.GlorotUniform(), b=init.Constant(0.),
                               nonlinearity=nonlinearities.rectify)
     layer = layers.DropoutLayer(layer, p=0.5)
     layer = layers.DenseLayer(layer, num_units=2,
                               W=init.GlorotUniform(), b=init.Constant(0.),
                               nonlinearity=nonlinearities.softmax)
     return layer
Esempio n. 24
0
def discriminator(input_var):
    network = lasagne.layers.InputLayer(shape=(None, 1, 28, 28),
                                        input_var=input_var)

    network = ll.DropoutLayer(network, p=0.5)

    network = weight_norm(conv_layer(network,
                                     3,
                                     32,
                                     1,
                                     'same',
                                     nonlinearity=lrelu),
                          train_g=False)

    network = weight_norm(conv_layer(network,
                                     3,
                                     32,
                                     2,
                                     'same',
                                     nonlinearity=lrelu),
                          train_g=False)
    network = weight_norm(conv_layer(network,
                                     3,
                                     64,
                                     2,
                                     'same',
                                     nonlinearity=lrelu),
                          train_g=False)

    network = weight_norm(conv_layer(network,
                                     3,
                                     128,
                                     2,
                                     'same',
                                     nonlinearity=lrelu),
                          train_g=False)

    network = weight_norm(conv_layer(network,
                                     4,
                                     128,
                                     1,
                                     'valid',
                                     nonlinearity=lrelu),
                          train_g=False)

    network = weight_norm(conv_layer(network,
                                     1,
                                     1,
                                     1,
                                     'valid',
                                     nonlinearity=None),
                          train_g=True)

    return network
def cls_net(_incoming):
    _drop1 = L.DropoutLayer(_incoming, p=0.2, rescale=True)
    _conv1 = batch_norm(
        conv(_drop1,
             num_filters=64,
             filter_size=7,
             stride=3,
             pad=0,
             W=I.Normal(0.02),
             b=None,
             nonlinearity=NL.rectify))
    _drop2 = L.DropoutLayer(_conv1, p=0.2, rescale=True)
    _conv2 = batch_norm(
        conv(_drop2,
             num_filters=128,
             filter_size=3,
             stride=1,
             pad=0,
             W=I.Normal(0.02),
             b=None,
             nonlinearity=NL.rectify))
    _pool2 = L.MaxPool2DLayer(_conv2, pool_size=2)
    _fc1 = batch_norm(
        L.DenseLayer(L.FlattenLayer(_pool2, outdim=2),
                     256,
                     W=I.Normal(0.02),
                     b=None,
                     nonlinearity=NL.rectify))

    _fc2 = L.DenseLayer(_fc1,
                        ny,
                        W=I.Normal(0.02),
                        b=None,
                        nonlinearity=NL.sigmoid)
    _aux = [
        tanh(_conv1),
        tanh(_conv2),
        tanh(L.DimshuffleLayer(_fc1, (0, 1, 'x', 'x'))),
        L.DimshuffleLayer(_fc2, (0, 1, 'x', 'x'))
    ]
    return _aux, _fc2
def net_vgglike(k, input_shape, nclass):
    input_x, target_y, Winit = T.tensor4("input"), T.vector(
        "target", dtype='int32'), init.Normal()

    net = ll.InputLayer(input_shape, input_x)
    net = conv_bn_rectify(net, 64 * k)
    net = ll.DropoutLayer(net, 0.3)
    net = conv_bn_rectify(net, 64 * k)
    net = MaxPool2DLayer(net, 2, 2)

    net = conv_bn_rectify(net, 128 * k)
    net = ll.DropoutLayer(net, 0.4)
    net = conv_bn_rectify(net, 128 * k)
    net = MaxPool2DLayer(net, 2, 2)

    net = conv_bn_rectify(net, 256 * k)
    net = ll.DropoutLayer(net, 0.4)
    net = conv_bn_rectify(net, 256 * k)
    net = ll.DropoutLayer(net, 0.4)
    net = conv_bn_rectify(net, 256 * k)
    net = MaxPool2DLayer(net, 2, 2)

    net = conv_bn_rectify(net, 512 * k)
    net = ll.DropoutLayer(net, 0.4)
    net = conv_bn_rectify(net, 512 * k)
    net = ll.DropoutLayer(net, 0.4)
    net = conv_bn_rectify(net, 512 * k)
    net = MaxPool2DLayer(net, 2, 2)

    net = conv_bn_rectify(net, 512 * k)
    net = ll.DropoutLayer(net, 0.4)
    net = conv_bn_rectify(net, 512 * k)
    net = ll.DropoutLayer(net, 0.4)
    net = conv_bn_rectify(net, 512 * k)
    net = MaxPool2DLayer(net, 2, 2)

    net = ll.DenseLayer(net,
                        int(512 * k),
                        W=init.Normal(),
                        nonlinearity=nl.rectify)
    net = BatchNormLayer(net, epsilon=1e-3)
    net = ll.NonlinearityLayer(net)
    net = ll.DropoutLayer(net, 0.5)
    net = ll.DenseLayer(net, nclass, W=init.Normal(), nonlinearity=nl.softmax)

    return net, input_x, target_y, k
Esempio n. 27
0
 def _build(self):
     layer = layers.InputLayer(shape=(None, 1, 28, 28), input_var=self.X)
     layer = layers.DropoutLayer(layer, p=0.2)
     layer = layers.DenseLayer(layer,
                               num_units=800,
                               W=init.GlorotUniform(),
                               b=init.Constant(0.),
                               nonlinearity=nonlinearities.rectify)
     layer = layers.DropoutLayer(layer, p=0.5)
     layer = layers.DenseLayer(layer,
                               num_units=800,
                               W=init.GlorotUniform(),
                               b=init.Constant(0.),
                               nonlinearity=nonlinearities.rectify)
     layer = layers.DropoutLayer(layer, p=0.5)
     layer = layers.DenseLayer(layer,
                               num_units=10,
                               W=init.GlorotUniform(),
                               b=init.Constant(0.),
                               nonlinearity=nonlinearities.softmax)
     return layer
Esempio n. 28
0
    def sum_pos_encodings_in(self, statement):
        pe_matrix = self.pe_matrix_in
        pe_weights = pe_matrix * self.W_pe[statement]

        if self.dropout_in > 0 and self.mode == 'train':
            pe_weights_d = pe_weights.dimshuffle(('x', 0, 1))
            net = layers.InputLayer(shape=(1, self.max_inp_sent_len, self.dim), input_var=pe_weights_d)
            net = layers.DropoutLayer(net, p=self.dropout_in)
            pe_weights = layers.get_output(net)[0]

        pe_weights = T.cast(pe_weights, floatX)
        memories = T.sum(pe_weights, axis=0)
        return memories
Esempio n. 29
0
def build_discriminator(input_var=None):
    #D_inp = T.tensor4('Ds')
    D = l.InputLayer(shape=(None, 1, 28, 28), input_var=input_var)
    D = l.Conv2DLayer(D,
                      num_filters=20,
                      filter_size=(5, 5),
                      nonlinearity=reLU,
                      W=lasagne.init.GlorotUniform())
    #D = l.Conv2DLayer(D,1,filter_size=(2,2), stride=2, nonlinearity=reLU)
    D = l.DropoutLayer(D, p=0.2)
    D = l.Conv2DLayer(D,
                      num_filters=20,
                      filter_size=(5, 5),
                      nonlinearity=reLU,
                      W=lasagne.init.GlorotUniform())
    #D = l.Conv2DLayer(D,1,filter_size=(2,2), stride=2, nonlinearity=reLU)
    D = l.DropoutLayer(D, p=0.2)
    D = l.DenseLayer(l.dropout(D, p=0.5), num_units=256, nonlinearity=reLU)
    D = l.DenseLayer(l.dropout(D, p=0.5), num_units=1, nonlinearity=softmax)

    #D1.params = D.params
    return D
Esempio n. 30
0
def getNet7():
  inputLayer = layers.InputLayer(shape=(None, 1, imageShape[0], imageShape[1])) #120x120

  conv1Layer = layers.Conv2DLayer(inputLayer, num_filters=32, filter_size=(3,3), pad=(1,1), W=HeNormal('relu'), nonlinearity=rectify) #120x120
  conv2Layer = layers.Conv2DLayer(conv1Layer, num_filters=32, filter_size=(3,3), pad=(1,1), W=HeNormal('relu'), nonlinearity=rectify) #120x120
  pool1Layer = layers.MaxPool2DLayer(conv2Layer, pool_size=(2,2)) #60x60
  conv3Layer = layers.Conv2DLayer(pool1Layer, num_filters=64, filter_size=(3,3), pad=(1,1), W=HeNormal('relu'), nonlinearity=rectify) #60x60
  conv4Layer = layers.Conv2DLayer(conv3Layer, num_filters=64, filter_size=(3,3), pad=(1,1), W=HeNormal('relu'), nonlinearity=rectify) #60x60
  pool2Layer = layers.MaxPool2DLayer(conv4Layer, pool_size=(2,2)) #30x30
  conv5Layer = layers.Conv2DLayer(pool2Layer, num_filters=128, filter_size=(3,3), pad=(1,1), W=HeNormal('relu'), nonlinearity=rectify) #30x30
  conv6Layer = layers.Conv2DLayer(conv5Layer, num_filters=128, filter_size=(3,3), pad=(1,1), W=HeNormal('relu'), nonlinearity=rectify) #30x30
  pool3Layer = layers.MaxPool2DLayer(conv6Layer, pool_size=(2,2)) #15x15
  conv7Layer = layers.Conv2DLayer(pool3Layer, num_filters=256, filter_size=(4,4), W=HeNormal('relu'), nonlinearity=rectify) #12x12
  flattenLayer = layers.FlattenLayer(conv7Layer)
  hidden1Layer = layers.DenseLayer(flattenLayer, num_units=1024, W=HeNormal('relu'), nonlinearity=rectify)
  dropout1Layer = layers.DropoutLayer(hidden1Layer, p=0.5)
  hidden2Layer = layers.DenseLayer(dropout1Layer, num_units=512, W=HeNormal('relu'), nonlinearity=rectify)
  dropout2Layer = layers.DropoutLayer(hidden2Layer, p=0.5)
  hidden3Layer = layers.DenseLayer(dropout2Layer, num_units=256, W=HeNormal('relu'), nonlinearity=rectify)
  #dropout3Layer = layers.DropoutLayer(hidden3Layer, p=0.5)
  #hidden4Layer = layers.DenseLayer(dropout2Layer, num_units=128, W=HeNormal('relu'), nonlinearity=rectify)
  outputLayer = layers.DenseLayer(hidden3Layer, num_units=10, W=HeNormal('relu'), nonlinearity=softmax)
  return outputLayer