def train_test(train, labels, test, weight_decay): net = NeuralNet( layers=[ ('input', InputLayer), ('dropout0', DropoutLayer), ('dense1', DenseLayer), ('dropout1', DropoutLayer), ('dense2', DenseLayer), ('dropout2', DropoutLayer), ('dense3', DenseLayer), ('dropout3', DropoutLayer), ('output', DenseLayer), ], update=nesterov_momentum, loss=None, objective=partial(WeightDecayObjective, weight_decay=weight_decay), regression=False, max_epochs=600, eval_size=0.1, #on_epoch_finished = None, #on_training_finished = None, verbose=bool(VERBOSITY), input_shape=(None, train.shape[1]), output_num_units=NCLASSES, dense1_num_units=700, dense2_num_units=1000, dense3_num_units=700, dense1_nonlinearity=LeakyRectify(leakiness=0.1), dense2_nonlinearity=LeakyRectify(leakiness=0.1), dense3_nonlinearity=LeakyRectify(leakiness=0.1), output_nonlinearity=softmax, dense1_W=HeUniform(), dense2_W=HeUniform(), dense3_W=HeUniform(), dense1_b=Constant(0.), dense2_b=Constant(0.), dense3_b=Constant(0.), output_b=Constant(0.), dropout0_p=0.1, dropout1_p=0.6, dropout2_p=0.6, dropout3_p=0.6, update_learning_rate=shared(float32(0.02)), # update_momentum=shared(float32(0.9)), # batch_iterator_train=BatchIterator(batch_size=128), batch_iterator_test=BatchIterator(batch_size=128), ) net.fit(train, labels) return net.predict_proba(test)
def test_nonlinearity(self, nonlinearity): import lasagne.nonlinearities if nonlinearity == 'leaky_rectify_0': from lasagne.nonlinearities import LeakyRectify theano_nonlinearity = LeakyRectify(leakiness=0) elif nonlinearity == 'scaled_tanh': from lasagne.nonlinearities import ScaledTanH theano_nonlinearity = ScaledTanH() elif nonlinearity == 'scaled_tanh_p': from lasagne.nonlinearities import ScaledTanH theano_nonlinearity = ScaledTanH(scale_in=0.5, scale_out=2.27) elif nonlinearity.startswith('selu'): from lasagne.nonlinearities import SELU, selu if nonlinearity == 'selu': theano_nonlinearity = SELU() elif nonlinearity == 'selu_paper': theano_nonlinearity = selu elif nonlinearity == 'selu_rect': theano_nonlinearity = SELU(scale=1, scale_neg=0) elif nonlinearity == 'selu_custom': theano_nonlinearity = SELU(scale=1.21, scale_neg=0.12) else: theano_nonlinearity = getattr(lasagne.nonlinearities, nonlinearity) np_nonlinearity = getattr(self, nonlinearity) X = T.matrix() X0 = lasagne.utils.floatX(np.random.uniform(-3, 3, (10, 10))) theano_result = theano_nonlinearity(X).eval({X: X0}) np_result = np_nonlinearity(X0) assert np.allclose(theano_result, np_result)
def build_generator(input_var=None): from lasagne.layers import (InputLayer, ReshapeLayer, DenseLayer, batch_norm, DropoutLayer) from lasagne.nonlinearities import LeakyRectify, sigmoid lrelu = LeakyRectify(0.2) # input: 100dim layer = InputLayer(shape=(None, 100), input_var=input_var) # fully-connected layer layer = batch_norm(DenseLayer(layer, 4096)) # project and reshape layer = batch_norm(DropoutLayer(layer, 0.5)) layer = batch_norm(DenseLayer(layer, 512*4*4)) layer = batch_norm(DropoutLayer(layer, 0.5)) layer = ReshapeLayer(layer, ([0], 512, 4, 4)) # 2 fractional-stride convolutions layer = batch_norm(Deconv2DLayer(layer, 256, 3, stride=1, pad=1)) layer = batch_norm(Deconv2DLayer(layer, 256, 3, stride=2, pad=1)) # 3 fractional-stride convolutions layer = batch_norm(Deconv2DLayer(layer, 128, 3, stride=1, pad=1)) layer = batch_norm(Deconv2DLayer(layer, 128, 3, stride=1, pad=1)) layer = batch_norm(Deconv2DLayer(layer, 128, 3, stride=2, pad=1)) # 3 fractional-stride convolutions layer = batch_norm(Deconv2DLayer(layer, 64, 3, stride=1, pad=1)) layer = batch_norm(Deconv2DLayer(layer, 64, 3, stride=1, pad=1)) layer = batch_norm(Deconv2DLayer(layer, 64, 3, stride=2, pad=1)) # 4 fractional-stride convolutions layer = batch_norm(Deconv2DLayer(layer, 32, 5, stride=1, pad=2)) layer = batch_norm(Deconv2DLayer(layer, 32, 5, stride=1, pad=2)) layer = batch_norm(Deconv2DLayer(layer, 32, 5, stride=1, pad=2)) layer = Deconv2DLayer(layer, 3, 5, stride=2, pad=2, nonlinearity=sigmoid) #layer = ReshapeLayer(layer, (None, 3, 64, 64)) print ("Generator output:", layer.output_shape) return layer
def build_discriminator(input_var=None, convs=0): from lasagne.layers import (InputLayer, DenseLayer, batch_norm) from lasagne.layers.dnn import Conv2DDNNLayer as Conv2DLayer # override from lasagne.nonlinearities import LeakyRectify, sigmoid lrelu = LeakyRectify(0.2) if convs == 0: # input: (None, 1, 64, 64) layer = InputLayer(shape=(None, 1, 64, 64), input_var=input_var) # two convolutions layer = batch_norm( Conv2DLayer(layer, 64, 5, stride=2, pad=2, nonlinearity=lrelu)) layer = batch_norm( Conv2DLayer(layer, 128, 5, stride=2, pad=2, nonlinearity=lrelu)) else: # input: (None, 1, 128, 128) layer = InputLayer(shape=(None, 1, 128, 128), input_var=input_var) # two convolutions layer = batch_norm( Conv2DLayer(layer, 64, 5, stride=2, pad=2, nonlinearity=lrelu)) layer = batch_norm( Conv2DLayer(layer, 128, 5, stride=2, pad=2, nonlinearity=lrelu)) # fully-connected layer layer = batch_norm(DenseLayer(layer, 1024, nonlinearity=lrelu)) # output layer layer = DenseLayer(layer, 1, nonlinearity=sigmoid) print("Discriminator output:", layer.output_shape) return layer
def build_discriminator(input_var=None, bot_dim=None): # layer = InputLayer(shape=(None, 2), input_var=input_var) layer = DenseLayer(layer, num_units=128, nonlinearity=LeakyRectify(0.2), W=lasagne.init.Normal(std=0.02, mean=0)) layer = DenseLayer(layer, num_units=128, nonlinearity=LeakyRectify(0.2), W=lasagne.init.Normal(std=0.02, mean=0)) layer = DenseLayer(layer, num_units=bot_dim, nonlinearity=ScaledSig(), W=lasagne.init.Normal(std=0.02, mean=0)) return layer
def build_encoder_conv2d_32_hidden(self, l_input): from lasagne.nonlinearities import sigmoid from lasagne.nonlinearities import LeakyRectify from lasagne.layers import Conv2DLayer from lasagne.layers import InputLayer, ReshapeLayer, DenseLayer try: from lasagne.layers.dnn import batch_norm_dnn as batch_norm except ImportError: from lasagne.layers import batch_norm # input: 3x28x28dim lrelu = LeakyRectify(0.2) layer = batch_norm( Conv2DLayer(l_input, 64, 5, stride=2, pad='same', nonlinearity=lrelu)) # original with relu layer = batch_norm( Conv2DLayer(layer, 128, 5, stride=2, pad='same', nonlinearity=lrelu)) # original with relu return ReshapeLayer(layer, ([0], 6272))
def build_decoder_28(self, in_layer): lrelu = LeakyRectify(0.2) # fully-connected layer layer = batch_norm(DenseLayer( in_layer, 1024, nonlinearity=lrelu)) # original with relu # project and reshape layer = batch_norm(DenseLayer( layer, 256 * 7 * 7, nonlinearity=lrelu)) # original with relu layer = ReshapeLayer(layer, ([0], 256, 7, 7)) # two fractional-stride convolutions layer = batch_norm( Deconv2DLayer(layer, 128, 5, stride=2, crop='same', output_size=14, nonlinearity=lrelu)) # original with relu return Deconv2DLayer(layer, self.channels, 5, stride=2, crop='same', output_size=28, nonlinearity=None)
def build_discriminator_32(image=None, ndf=128): lrelu = LeakyRectify(0.2) # input: images InputImg = InputLayer(shape=(None, 3, 32, 32), input_var=image) print("Dis Img_input:", InputImg.output_shape) # Conv Layer dis1 = Conv2DLayer(InputImg, ndf, (4, 4), (2, 2), pad=1, W=Normal(0.02), nonlinearity=lrelu) print("Dis conv1:", dis1.output_shape) # Conv Layer dis2 = batch_norm( Conv2DLayer(dis1, ndf * 2, (4, 4), (2, 2), pad=1, W=Normal(0.02), nonlinearity=lrelu)) print("Dis conv2:", dis2.output_shape) # Conv Layer dis3 = batch_norm( Conv2DLayer(dis2, ndf * 4, (4, 4), (2, 2), pad=1, W=Normal(0.02), nonlinearity=lrelu)) print("Dis conv3:", dis3.output_shape) # Conv Layer dis4 = DenseLayer(dis3, 1, W=Normal(0.02), nonlinearity=sigmoid) print("Dis output:", dis4.output_shape) return dis4
def build_discriminator(input_img=None, input_text=None): from lasagne.layers import (InputLayer, Conv2DLayer, ReshapeLayer, DenseLayer, batch_norm, ConcatLayer) from lasagne.nonlinearities import LeakyRectify, sigmoid lrelu = LeakyRectify(0.1) # input: (None, 1, 28, 28) layer = InputLayer(shape=(None, 1, 28, 28), input_var=input_img) layer2 = InputLayer(shape=(None,1,300), input_var=input_text) layer2 = ReshapeLayer(layer2, ([0], 1*300)) for i in reversed(range(len(layer_list))): layer = batch_norm(Conv2DLayer(layer, layer_list[i], filter_sz, stride=stride, pad=(filter_sz-1)/2, nonlinearity=lrelu)) newPS = 28 if stride!=1: newPS = 28/(2**len(layer_list)) layer = ReshapeLayer(layer, ([0], layer_list[0]*newPS*newPS)) layer = ConcatLayer([layer, layer2], axis=1) for i in reversed(range(len(fclayer_list))): layer = batch_norm(DenseLayer(layer, fclayer_list[i], nonlinearity=lrelu)) layer = DenseLayer(layer, 1, nonlinearity=None, b=None) print ("Discriminator output:", layer.output_shape) return layer
def build_critic(input_var=None): from lasagne.layers import (InputLayer, Conv2DLayer, ReshapeLayer, DenseLayer) try: from lasagne.layers.dnn import batch_norm_dnn as batch_norm except ImportError: from lasagne.layers import batch_norm from lasagne.nonlinearities import LeakyRectify lrelu = LeakyRectify(0.2) # input: (None, 3, 64, 64) layer = InputLayer(shape=(None, 3, 64, 64), input_var=input_var) layer = GAN.GaussianNoiseLayer(layer, sigma=0.5) # two convolutions layer = batch_norm( Conv2DLayer(layer, 64, 5, stride=2, pad='same', nonlinearity=lrelu)) layer = batch_norm( Conv2DLayer(layer, 128, 5, stride=2, pad='same', nonlinearity=lrelu)) layer = batch_norm( Conv2DLayer(layer, 256, 5, stride=2, pad='same', nonlinearity=lrelu)) layer = batch_norm( Conv2DLayer(layer, 512, 5, stride=2, pad='same', nonlinearity=lrelu)) # fully-connected layer layer = batch_norm(DenseLayer(layer, 1024, nonlinearity=lrelu)) # output layer (linear and without bias) layer = DenseLayer(layer, 1, nonlinearity=None, b=None) print("critic output:", layer.output_shape) return layer
def build_discriminator(input_var=None): from lasagne.layers import (InputLayer, Conv2DLayer, ReshapeLayer, DenseLayer, batch_norm, DropoutLayer) from lasagne.layers.dnn import Conv2DDNNLayer as Conv2DLayer # override from lasagne.nonlinearities import LeakyRectify, sigmoid lrelu = LeakyRectify(0.2) # input: (None, 3, 64, 64) layer = InputLayer(shape=(None, 3, 64, 64), input_var=input_var) # 2 convolutions layer = batch_norm(Conv2DLayer(layer, 128, 3, stride=1, pad=1, nonlinearity=lrelu)) layer = batch_norm(Conv2DLayer(layer, 128, 3, stride=2, pad=1, nonlinearity=lrelu)) # 2 convolutions layer = batch_norm(Conv2DLayer(layer, 192, 3, stride=1, pad=1, nonlinearity=lrelu)) layer = batch_norm(Conv2DLayer(layer, 192, 3, stride=1, pad=1, nonlinearity=lrelu)) layer = batch_norm(Conv2DLayer(layer, 192, 3, stride=2, pad=1, nonlinearity=lrelu)) # 3 convolutions layer = batch_norm(Conv2DLayer(layer, 256, 5, stride=1, pad=2, nonlinearity=lrelu)) layer = batch_norm(Conv2DLayer(layer, 256, 5, stride=1, pad=2, nonlinearity=lrelu)) layer = batch_norm(Conv2DLayer(layer, 192, 5, stride=2, pad=2, nonlinearity=lrelu)) # 4 convolutions layer = batch_norm(Conv2DLayer(layer, 384, 5, stride=1, pad=2, nonlinearity=lrelu)) layer = batch_norm(Conv2DLayer(layer, 384, 5, stride=1, pad=2, nonlinearity=lrelu)) layer = batch_norm(Conv2DLayer(layer, 384, 5, stride=1, pad=2, nonlinearity=lrelu)) layer = batch_norm(Conv2DLayer(layer, 384, 5, stride=2, pad=2, nonlinearity=lrelu)) # fully-connected layer layer = batch_norm(DenseLayer(layer, 4096, nonlinearity=lrelu)) # output layer layer = batch_norm(DropoutLayer(layer, 0.5)) # After FC layerm addth Global Average Pooling layer #layer = lasagne.layers.GlobalPoolLayer(layer) layer = DenseLayer(layer, 1, nonlinearity=sigmoid) print ("Discriminator output:", layer.output_shape) return layer
def build_critic(input_var=None, model_name='wgan'): from lasagne.layers import (InputLayer, Conv2DLayer, ReshapeLayer, DenseLayer) try: from lasagne.layers.dnn import batch_norm_dnn as batch_norm except ImportError: from lasagne.layers import batch_norm from lasagne.nonlinearities import LeakyRectify, sigmoid lrelu = LeakyRectify(0.2) # input: (None, 1, 28, 28) layer = InputLayer(shape=(None, 1, 28, 28), input_var=input_var) # two convolutions layer = batch_norm( Conv2DLayer(layer, 64, 5, stride=2, pad='same', nonlinearity=lrelu)) layer = batch_norm( Conv2DLayer(layer, 128, 5, stride=2, pad='same', nonlinearity=lrelu)) # fully-connected layer layer = batch_norm(DenseLayer(layer, 1024, nonlinearity=lrelu)) # output layer if model_name == 'dcgan': layer = DenseLayer(layer, 1, nonlinearity=sigmoid) elif model_name == 'wgan': layer = DenseLayer(layer, 1, nonlinearity=None, b=None) elif model_name == 'lsgan': layer = DenseLayer(layer, 1, nonlinearity=None) print("critic output:", layer.output_shape) return layer
def build_critic(gan, input_var=None, do_batch_norm=False): from lasagne.layers import (InputLayer, Conv2DLayer, DenseLayer) from lasagne.nonlinearities import LeakyRectify lrelu = LeakyRectify(0.2) # input: (None, 1, 28, 28) layer = InputLayer(shape=(None, 1, 28, 28), input_var=input_var) # two convolutions layer = BatchNorm( Conv2DLayer(layer, 64, 5, stride=2, pad='same', nonlinearity=lrelu), do_batch_norm) layer = BatchNorm( Conv2DLayer(layer, 128, 5, stride=2, pad='same', nonlinearity=lrelu), do_batch_norm) # fully-connected layer layer = BatchNorm(DenseLayer(layer, 1024, nonlinearity=lrelu), do_batch_norm) # output layer if gan in ('wgan', 'wgan-gp'): layer = DenseLayer(layer, 1, nonlinearity=None, b=None) elif gan in ('lsgan', ): layer = DenseLayer(layer, 1, nonlinearity=None) elif gan in ('dcgan', ): from lasagne.nonlinearities import sigmoid layer = DenseLayer(layer, 1, nonlinearity=sigmoid) else: raise Exception("GAN {} is not supported".format(gan)) print("Critic output: ", layer.output_shape) return layer
def discriminator(input_var=None, configs=None): lrelu = LeakyRectify(0.2) network = InputLayer(shape=(None, 1, configs['img_rows'], configs['img_cols']), input_var=input_var) network = batch_norm( Conv2DLayer(network, num_filters=64, filter_size=(5, 5), stride=2, nonlinearity=lrelu, W=lasagne.init.GlorotUniform())) # network = lasagne.layers.MaxPool2DLayer(network, pool_size=(2,2)) network = batch_norm( lasagne.layers.Conv2DLayer(network, num_filters=128, filter_size=5, stride=2, nonlinearity=lrelu)) # network = lasagne.layers.MaxPool2DLayer(network, pool_size=(2,2)) network = batch_norm( DenseLayer(incoming=lasagne.layers.dropout(network, p=0.25), num_units=1024, nonlinearity=lrelu)) network = DenseLayer( incoming=network, num_units=1, nonlinearity=sigmoid, ) network = lasagne.layers.ReshapeLayer(network, (-1, nb_classes)) return network
def build_synth(input_dist=None): from lasagne.layers import (InputLayer, DenseLayer, batch_norm, ReshapeLayer) from lasagne.nonlinearities import LeakyRectify, rectify lrelu = LeakyRectify(0.2) layer = InputLayer(shape=(None, 1, 28, 28), input_var=input_dist) layer = batch_norm(DenseLayer(layer, 1024, nonlinearity=lrelu)) layer = batch_norm(DenseLayer(layer, 1 * 28 * 28))#, nonlinearity=lrelu) layer = ReshapeLayer(layer, ([0], 1, 28, 28)) return layer
def build_generator_128(noise=None, ngf=128): lrelu = LeakyRectify(0.2) # noise input InputNoise = InputLayer(shape=(None, 100), input_var=noise) #FC Layer gnet0 = DenseLayer(InputNoise, ngf * 16 * 4 * 4, W=Normal(0.02), nonlinearity=lrelu) print("Gen fc1:", gnet0.output_shape) #Reshape Layer gnet1 = ReshapeLayer(gnet0, ([0], ngf * 16, 4, 4)) print("Gen rs1:", gnet1.output_shape) # DeConv Layer gnet2 = Deconv2DLayer(gnet1, ngf * 8, (4, 4), (2, 2), crop=1, W=Normal(0.02), nonlinearity=lrelu) print("Gen deconv1:", gnet2.output_shape) # DeConv Layer gnet3 = Deconv2DLayer(gnet2, ngf * 8, (4, 4), (2, 2), crop=1, W=Normal(0.02), nonlinearity=lrelu) print("Gen deconv2:", gnet3.output_shape) # DeConv Layer gnet4 = Deconv2DLayer(gnet3, ngf * 4, (4, 4), (2, 2), crop=1, W=Normal(0.02), nonlinearity=lrelu) print("Gen deconv3:", gnet4.output_shape) # DeConv Layer gnet5 = Deconv2DLayer(gnet4, ngf * 4, (4, 4), (2, 2), crop=1, W=Normal(0.02), nonlinearity=lrelu) print("Gen deconv4:", gnet5.output_shape) # DeConv Layer gnet6 = Deconv2DLayer(gnet5, ngf * 2, (4, 4), (2, 2), crop=1, W=Normal(0.02), nonlinearity=lrelu) print("Gen deconv5:", gnet6.output_shape) # DeConv Layer gnet7 = Deconv2DLayer(gnet6, 3, (3, 3), (1, 1), crop='same', W=Normal(0.02), nonlinearity=tanh) print("Gen output:", gnet7.output_shape) return gnet7
def get_nonlinearity(layer): default_nonlinear = "ReLU" # for all Conv2DLayer, Conv2DCCLayer, and DenseLayer req = layer.get("nonlinearity") or default_nonlinear return { "LReLU": LeakyRectify(1. / leak_alpha), "None": None, "sigmoid": nonlinearities.sigmoid, "ReLU": nonlinearities.rectify, "softmax": nonlinearities.softmax, "tanh": nonlinearities.tanh }[req]
def build_discriminator(input_var=None, dim_h=128): lrelu = LeakyRectify(0.2) layer = InputLayer(shape=(None, 3, 64, 64), input_var=input_var) layer = Conv2DLayer(layer, dim_h, 5, stride=2, pad=2, nonlinearity=lrelu) layer = batch_norm(Conv2DLayer(layer, dim_h * 2, 5, stride=2, pad=2, nonlinearity=lrelu)) layer = batch_norm(Conv2DLayer(layer, dim_h * 4, 5, stride=2, pad=2, nonlinearity=lrelu)) layer = batch_norm(Conv2DLayer(layer, dim_h * 8, 5, stride=2, pad=2, nonlinearity=lrelu)) layer = DenseLayer(layer, 1, nonlinearity=None) logger.debug('Discriminator output: {}' .format(layer.output_shape)) return layer
def make_net(W, H, size1=20, size2=15): net = NeuralNet( layers=[ ('input', InputLayer), ('dense1', DenseLayer), ('dense2', DenseLayer), ('output', DenseLayer), ], input_shape=(None, W * H), dense1_num_units=size1, dense1_nonlinearity=LeakyRectify(leakiness=0.1), dense1_W=HeNormal(), dense1_b=Constant(), dense2_num_units=size2, dense2_nonlinearity=LeakyRectify(leakiness=0.1), dense2_W=HeNormal(), dense2_b=Constant(), output_num_units=4, output_nonlinearity=softmax, output_W=HeNormal(), output_b=Constant(), update=nesterov_momentum, # todo update_learning_rate=shared(float32(1.)), update_momentum=0.9, max_epochs=200, on_epoch_finished=[ StopWhenOverfitting(), StopAfterMinimum(), AdjustLearningRate(1., 0.0001), ], #label_encoder = False, regression=True, verbose=1, batch_iterator_train=BatchIterator(batch_size=128), # todo batch_iterator_test=BatchIterator(batch_size=128), train_split=TrainSplit(eval_size=0.1), ) net.initialize() return net
def _build_network(self): """ Build a discriminator network :return: """ net = OrderedDict() net["inp"] = InputLayer([None, self.n_input_channels] + list(self.inp_dims)) net["conv1"] = Conv2DLayer(net["inp"], 64, 5, stride=2, pad="same", nonlinearity=LeakyRectify(0.2)) net["conv2"] = Conv2DLayer(net["conv1"], 128, 5, stride=2, pad="same", nonlinearity=LeakyRectify(0.2)) net["conv3"] = Conv2DLayer(net["conv2"], 256, 5, stride=2, pad="same", nonlinearity=LeakyRectify(0.2)) net["conv4"] = Conv2DLayer(net["conv3"], 256, 5, stride=2, pad="same", nonlinearity=LeakyRectify(0.2)) net["conv5"] = Conv2DLayer(net["conv4"], 256, 5, stride=2, pad="same", nonlinearity=LeakyRectify(0.2)) net["dense1"] = DenseLayer(net["conv5"], 512, nonlinearity=LeakyRectify(0.2)) net["dense2"] = DenseLayer(net["dense1"], 512, nonlinearity=LeakyRectify(0.2)) out_nonlin = None if not self.wasserstein: out_nonlin = T.nnet.sigmoid net["out"] = DenseLayer(net["dense2"], 1, nonlinearity=out_nonlin) return net
def build_critic(input_var=None): from lasagne.layers import (InputLayer, Conv2DLayer, ReshapeLayer, DenseLayer, MaxPool2DLayer, dropout) try: from lasagne.layers.dnn import batch_norm_dnn as batch_norm except ImportError: from lasagne.layers import batch_norm from lasagne.nonlinearities import LeakyRectify, rectify lrelu = LeakyRectify(0.2) layer = InputLayer(shape=(None, 1, 128, 128), input_var=input_var, name='d_in_data') print("MNIST critic") # convolution layers layer = batch_norm( Conv2DLayer(layer, 128, 5, stride=2, pad='same', nonlinearity=lrelu)) layer = batch_norm( Conv2DLayer(layer, 128, 5, stride=2, pad='same', nonlinearity=lrelu)) layer = batch_norm( Conv2DLayer(layer, 128, 5, stride=2, pad='same', nonlinearity=lrelu)) """ print("naive CREPE critic") # words from sequences with 7 characters # each filter learns a word representation of shape M x 1 layer = Conv2DLayer( layer, 128, (128, 7), nonlinearity=lrelu) layer = MaxPool2DLayer(layer, (1, 3)) # temporal convolution, 7-gram layer = Conv2DLayer( layer, 128, (1, 7), nonlinearity=lrelu) layer = MaxPool2DLayer(layer, (1, 3)) # temporal convolution, 3-gram layer = Conv2DLayer( layer, 128, (1, 3), nonlinearity=lrelu) layer = Conv2DLayer( layer, 128, (1, 3), nonlinearity=lrelu) layer = Conv2DLayer( layer, 128, (1, 3), nonlinearity=lrelu) layer = Conv2DLayer( layer, 128, (1, 3), nonlinearity=lrelu) # fully-connected layers layer = DenseLayer(layer, 1024, nonlinearity=rectify) layer = DenseLayer(layer, 1024, nonlinearity=rectify) """ layer = DenseLayer(layer, 1, nonlinearity=lrelu) print("critic output:", layer.output_shape) return layer
def test_nonlinearity(self, nonlinearity): import lasagne.nonlinearities if nonlinearity == 'leaky_rectify_0': from lasagne.nonlinearities import LeakyRectify theano_nonlinearity = LeakyRectify(leakiness=0) else: theano_nonlinearity = getattr(lasagne.nonlinearities, nonlinearity) np_nonlinearity = getattr(self, nonlinearity) X = T.matrix() X0 = lasagne.utils.floatX(np.random.uniform(-3, 3, (10, 10))) theano_result = theano_nonlinearity(X).eval({X: X0}) np_result = np_nonlinearity(X0) assert np.allclose(theano_result, np_result)
def build_discriminator(input_var=None): from lasagne.layers import (InputLayer, Conv2DLayer, ReshapeLayer, DenseLayer, batch_norm) from lasagne.layers.dnn import Conv2DDNNLayer as Conv2DLayer # override from lasagne.nonlinearities import LeakyRectify, sigmoid lrelu = LeakyRectify(0.2) # input: (None, 1, 28, 28) layer = InputLayer(shape=(None, 1, 28, 28), input_var=input_var) # two convolutions layer = Conv2DLayer(layer, 64, 5, stride=2, pad=2, nonlinearity=lrelu) layer = Conv2DLayer(layer, 128, 5, stride=2, pad=2, nonlinearity=lrelu) # fully-connected layer layer = DenseLayer(layer, 1024, nonlinearity=lrelu) # output layer mu = DenseLayer(layer, 100, nonlinearity=None) log_sigma = DenseLayer(layer, 100, nonlinearity=None) print("Discriminator output:", mu.output_shape) return mu, log_sigma
def build_critic(self, critic_model='critic_28', input_var=None, z_var=None): lrelu = LeakyRectify(0.2) # input: (None, 1, 28, 28) in_x_layer = InputLayer(shape=(None, self.channels, self.width, self.height), input_var=input_var) in_z_layer = InputLayer(shape=(None, self.z_dim), input_var=z_var) l_out_disc = None if critic_model == 'critic_28': l_out_disc = self.build_critic_28(in_x_layer, in_z_layer) else: print('unknown critic ' + critic_model) raise print("critic output:", l_out_disc.output_shape) return l_out_disc, in_x_layer, in_z_layer
def build_discriminator(input_var=None): lrelu = LeakyRectify(0.2) layer = InputLayer(shape=(None, 8, 32, 32), input_var=input_var) # two convolutions layer = Conv2DLayer(layer, 512, 5, stride=2, pad=2, nonlinearity=lrelu) ''' layer = batch_norm(Conv2DLayer(layer, 128 * 2, 5, stride=2, pad=2, nonlinearity=lrelu)) layer = batch_norm(Conv2DLayer(layer, 128 * 4, 5, stride=2, pad=2, nonlinearity=lrelu)) layer = batch_norm(Conv2DLayer(layer, 128 * 8, 5, stride=2, pad=2, nonlinearity=lrelu)) ''' layer = Conv2DLayer(layer, 512 * 2, 5, stride=2, pad=2, nonlinearity=lrelu) layer = Conv2DLayer(layer, 512 * 4, 5, stride=2, pad=2, nonlinearity=lrelu) #layer = Conv2DLayer(layer, 128 * 8, 5, stride=2, pad=2, nonlinearity=lrelu) layer = DenseLayer(layer, 1, nonlinearity=None) print("Discriminator output:", layer.output_shape) return layer
def build_discriminator(input_var=None, dim_h=64, use_batch_norm=True, leak=None): if not use_batch_norm: bn = lambda x: x else: bn = batch_norm lrelu = LeakyRectify(leak) layer = InputLayer(shape=(None, DIM_C, DIM_X, DIM_Y), input_var=input_var) layer = bn(Conv2DLayer( layer, dim_h, 5, stride=2, pad=2, nonlinearity=lrelu)) layer = bn(Conv2DLayer(layer, dim_h * 2, 5, stride=2, pad=2, nonlinearity=lrelu)) layer = DenseLayer(layer, 1024, nonlinearity=lrelu) layer = DenseLayer(layer, 1, nonlinearity=None) logger.debug('Discriminator output: {}'.format(layer.output_shape)) return layer
def build_encoder_28(self, layer_in, encoder_mode='encoder_28'): lrelu = LeakyRectify(0.2) layer = batch_norm( Conv2DLayer(layer_in, 128, 5, stride=2, pad='same', nonlinearity=lrelu)) # original with relu layer = batch_norm( Conv2DLayer(layer, 256, 5, stride=2, pad='same', nonlinearity=lrelu)) # original with relu layer = ReshapeLayer(layer, ([0], 6272 * 2)) layer = batch_norm(DenseLayer(layer, 1024, nonlinearity=lrelu)) return batch_norm(DenseLayer(layer, self.z_dim, nonlinearity=None))
def build_discriminator(input_img=None, input_text=None): from lasagne.layers import (InputLayer, Conv2DLayer, ReshapeLayer, DenseLayer, batch_norm, ConcatLayer) from lasagne.nonlinearities import LeakyRectify, sigmoid lrelu = LeakyRectify(0.1) # input: (None, 1, 28, 28) layer = InputLayer(shape=(None, 1, 28, 28), input_var=input_img) layer = ReshapeLayer(layer, ([0], 1 * 28 * 28)) layer2 = InputLayer(shape=(None, 1, 300), input_var=input_text) layer2 = ReshapeLayer(layer2, ([0], 1 * 300)) layer = ConcatLayer([layer, layer2], axis=1) for i in reversed(range(len(layer_list))): layer = batch_norm(DenseLayer(layer, layer_list[i], nonlinearity=lrelu)) layer = DenseLayer(layer, 1, nonlinearity=sigmoid) print("Discriminator output:", layer.output_shape) return layer
def build_discriminator(input_var, inputWidth, inputHeight): from lasagne.layers import (InputLayer, Conv2DLayer, ReshapeLayer, DenseLayer, batch_norm, dropout) # from lasagne.layers.dnn import Conv2DDNNLayer as Conv2DLayer # override from lasagne.nonlinearities import LeakyRectify, sigmoid lrelu = LeakyRectify(0.2) # input: (None, 1, 28, 28) layer = InputLayer(shape=(None, 4, inputHeight, inputWidth), input_var=input_var) # two convolutions layer = batch_norm( Conv2DLayer(layer, 64, 5, stride=2, pad=2, nonlinearity=lrelu)) layer = batch_norm( Conv2DLayer(layer, 128, 5, stride=2, pad=2, nonlinearity=lrelu)) # fully-connected layer layer = batch_norm(DenseLayer(layer, 1024, nonlinearity=lrelu)) # output layer layer = DenseLayer(layer, 1, nonlinearity=None) print("Discriminator output:", layer.output_shape) return layer
def build_disc(inp): lr = LeakyRectify(leakiness=0.2) net = InputLayer((None, 3, 256, 256), input_var=inp) # 256 * 256 net = batch_norm(Conv2DLayer(net, 64, 4, stride=2, pad=1, nonlinearity=lr)) # 128 * 128 net = batch_norm(Conv2DLayer(net, 128, 4, stride=2, pad=1, nonlinearity=lr)) # 64 * 64 net = batch_norm(Conv2DLayer(net, 256, 4, stride=2, pad=1, nonlinearity=lr)) # 32 * 32 net = batch_norm(Conv2DLayer(net, 512, 4, stride=4, nonlinearity=lr)) # 8 * 8 net = batch_norm(Conv2DLayer(net, 512, 4, stride=4, nonlinearity=lr)) # 2 * 2 net = batch_norm(DenseLayer(net, 4096, nonlinearity=lr)) net = batch_norm(DenseLayer(net, 1024, nonlinearity=lr)) net = DenseLayer(net, 1, nonlinearity=sigmoid) print("Discriminator output:", net.output_shape) return net