def QNetwork(input_var):
    """
    This sets up a network in Lasagne that decides on what move to play
    """
    n_actions = 2

    from lasagne.layers import batch_norm
    from lasagne.layers import DenseLayer
    from lasagne.layers import InputLayer
    from lasagne.nonlinearities import rectify, linear, sigmoid, softmax, tanh
    from lasagne.init import GlorotNormal
    network = InputLayer(shape=(None, 4), input_var=input_var, name='Input')
    network = (DenseLayer(incoming=network,
                          num_units=24,
                          nonlinearity=rectify,
                          W=GlorotNormal()))
    network = (
        DenseLayer(incoming=network,
                   num_units=24,
                   nonlinearity=rectify,
                   W=GlorotNormal())

        #                          W=lasagne.init.HeUniform())
    )
    network = DenseLayer(incoming=network,
                         num_units=n_actions,
                         W=GlorotNormal(),
                         b=lasagne.init.Constant(0),
                         nonlinearity=linear)
    network = lasagne.layers.ReshapeLayer(network, (-1, n_actions))
    return network
Ejemplo n.º 2
0
 def init_model(self, n_actions, observation_shape):
     nn = InputLayer((None, ) + observation_shape,
                     input_var=self.observations)
     nn1 = DenseLayer(nn, 256, W=GlorotNormal())
     nn2 = DenseLayer(nn1, 64, W=GlorotNormal())
     self.m = DenseLayer(nn2, n_actions, nonlinearity=linear)
     self.logsigma = DenseLayer(nn2, n_actions, nonlinearity=linear)
     self.model = [self.m, self.logsigma]
def test_glorot_normal_c01b_4d_only():
    from lasagne.init import GlorotNormal

    with pytest.raises(RuntimeError):
        GlorotNormal(c01b=True).sample((100, ))

    with pytest.raises(RuntimeError):
        GlorotNormal(c01b=True).sample((100, 100))

    with pytest.raises(RuntimeError):
        GlorotNormal(c01b=True).sample((100, 100, 100))
Ejemplo n.º 4
0
def getNet2():
  inputLayer = layers.InputLayer(shape=(None, 1, imageShape[0], imageShape[1]))
  loc1Layer = layers.Conv2DLayer(inputLayer, num_filters=32, filter_size=(3,3), W=GlorotUniform('relu'), nonlinearity=rectify)
  loc2Layer = layers.MaxPool2DLayer(loc1Layer, pool_size=(2,2))
  loc3Layer = layers.Conv2DLayer(loc2Layer, num_filters=64, filter_size=(4,3), W=GlorotUniform('relu'), nonlinearity=rectify)
  loc4Layer = layers.MaxPool2DLayer(loc3Layer, pool_size=(2,2))
  loc5Layer = layers.Conv2DLayer(loc4Layer, num_filters=128, filter_size=(3,3), W=GlorotUniform('relu'), nonlinearity=rectify)
  loc6Layer = layers.MaxPool2DLayer(loc5Layer, pool_size=(2,2))
  loc7Layer = layers.Conv2DLayer(loc6Layer, num_filters=256, filter_size=(3,2), W=GlorotUniform('relu'), nonlinearity=rectify)
  #loc7Layer = layers.DenseLayer(loc5Layer, num_units=1024, nonlinearity=rectify)
  loc8Layer = layers.DenseLayer(loc7Layer, num_units=256, W=GlorotUniform('relu'), nonlinearity=rectify)
  loc9Layer = layers.DenseLayer(loc8Layer, num_units=128, W=GlorotUniform('relu'), nonlinearity=rectify)
  loc10Layer = layers.DenseLayer(loc9Layer, num_units=64, W=GlorotUniform('relu'), nonlinearity=rectify)
  #loc11Layer = layers.DenseLayer(loc10Layer, num_units=32, nonlinearity=tanh)
  #loc12Layer = layers.DenseLayer(loc11Layer, num_units=16, nonlinearity=tanh)
  locOutLayer = layers.DenseLayer(loc10Layer, num_units=6, W=GlorotUniform(1.0), nonlinearity=identity)

  transformLayer = layers.TransformerLayer(inputLayer, locOutLayer, downsample_factor=1.0)

  conv1Layer = layers.Conv2DLayer(transformLayer, num_filters=32, filter_size=(3,3), W=GlorotNormal('relu'), nonlinearity=rectify)
  pool1Layer = layers.MaxPool2DLayer(conv1Layer, pool_size=(2,2))
  conv2Layer = layers.Conv2DLayer(pool1Layer, num_filters=64, filter_size=(4,3), W=GlorotUniform('relu'), nonlinearity=rectify)
  pool2Layer = layers.MaxPool2DLayer(conv2Layer, pool_size=(2,2))
  conv3Layer = layers.Conv2DLayer(pool2Layer, num_filters=128, filter_size=(3,3), W=GlorotUniform('relu'), nonlinearity=rectify)
  pool3Layer = layers.MaxPool2DLayer(conv3Layer, pool_size=(2,2))
  conv4Layer = layers.Conv2DLayer(pool3Layer, num_filters=256, filter_size=(3,2), W=GlorotNormal('relu'), nonlinearity=rectify)
  hidden1Layer = layers.DenseLayer(conv4Layer, num_units=1024, W=GlorotUniform('relu'), nonlinearity=rectify)
  dropout1Layer = layers.DropoutLayer(hidden1Layer, p=0.5)
  hidden2Layer = layers.DenseLayer(dropout1Layer, num_units=512, W=GlorotUniform('relu'), nonlinearity=rectify)
  #hidden3Layer = layers.DenseLayer(hidden2Layer, num_units=256, nonlinearity=tanh)
  outputLayer = layers.DenseLayer(hidden2Layer, num_units=10, W=GlorotUniform(1.0), nonlinearity=softmax)
  return outputLayer
Ejemplo n.º 5
0
    def __init__(self, gain_ini, n_hidden, n_chars, n_mixt_attention,
                 n_mixtures):
        """
        Parameters
        ----------
        n_mixt_attention: int
            Number of mixtures used by the attention mechanism
        n_chars: int
            Number of different characters
        n_mixtures: int
            Number of mixtures in the Gaussian Mixture model
        """
        self.n_hidden = n_hidden
        self.n_chars = n_chars
        self.n_mixt_attention = n_mixt_attention
        self.n_mixtures = n_mixtures

        ini = GlorotNormal(gain_ini)

        self.pos_layer = PositionAttentionLayer(
            GRULayer([3, self.n_chars], n_hidden, ini), self.n_chars,
            self.n_mixt_attention, ini)

        self.mixture = MixtureGaussians2D([n_hidden, self.n_chars], n_mixtures,
                                          ini)

        self.params = self.pos_layer.params + self.mixture.params
Ejemplo n.º 6
0
    def __init__(self, gain_ini, n_hidden, n_mixtures):
        ini = GlorotNormal(gain_ini)

        self.gru_layer = GRULayer(3, n_hidden, ini)

        self.mixture = MixtureGaussians2D(n_hidden, n_mixtures, ini)

        self.params = self.gru_layer.params + self.mixture.params
def test_glorot_normal_gain():
    from lasagne.init import GlorotNormal

    sample = GlorotNormal(gain=10.0).sample((100, 100))
    assert -0.1 < sample.mean() < 0.1
    assert 0.9 < sample.std() < 1.1

    sample = GlorotNormal(gain='relu').sample((100, 100))
    assert -0.01 < sample.mean() < 0.01
    assert 0.132 < sample.std() < 0.152
Ejemplo n.º 8
0
def convert_initialization(component, nonlinearity="sigmoid"):
    # component = init_dic[component_key]
    assert(len(component) == 2)
    if component[0] == "uniform":
        return Uniform(component[1])
    elif component[0] == "glorotnormal":
        if nonlinearity in ["linear", "sigmoid", "tanh"]:
            return GlorotNormal(1.)
        else:
            return GlorotNormal("relu")
    elif component[0] == "glorotuniform":
        if nonlinearity in ["linear", "sigmoid", "tanh"]:
            return GlorotUniform(1.)
        else:
            return GlorotUniform("relu")
    elif component[0] == "normal":
        return Normal(*component[1])
    else:
        raise NotImplementedError()
Ejemplo n.º 9
0
def getNet1():
    inputLayer = layers.InputLayer(shape=(None, 1, imageShape[0],
                                          imageShape[1]))
    conv1Layer = layers.Conv2DLayer(inputLayer,
                                    num_filters=32,
                                    filter_size=(3, 3),
                                    W=GlorotNormal(0.8),
                                    nonlinearity=rectify)
    pool1Layer = layers.MaxPool2DLayer(conv1Layer, pool_size=(2, 2))
    dropout1Layer = layers.DropoutLayer(pool1Layer, p=0.5)
    conv2Layer = layers.Conv2DLayer(dropout1Layer,
                                    num_filters=64,
                                    filter_size=(4, 3),
                                    W=GlorotUniform(1.0),
                                    nonlinearity=rectify)
    pool2Layer = layers.MaxPool2DLayer(conv2Layer, pool_size=(2, 2))
    dropout2Layer = layers.DropoutLayer(pool2Layer, p=0.5)
    conv3Layer = layers.Conv2DLayer(dropout2Layer,
                                    num_filters=128,
                                    filter_size=(3, 3),
                                    W=GlorotUniform(1.0),
                                    nonlinearity=rectify)
    pool3Layer = layers.MaxPool2DLayer(conv3Layer, pool_size=(2, 2))
    dropout3Layer = layers.DropoutLayer(pool3Layer, p=0.5)
    conv4Layer = layers.Conv2DLayer(dropout3Layer,
                                    num_filters=256,
                                    filter_size=(3, 2),
                                    W=GlorotNormal(0.8),
                                    nonlinearity=rectify)
    hidden1Layer = layers.DenseLayer(conv4Layer,
                                     num_units=1024,
                                     W=GlorotUniform(1.0),
                                     nonlinearity=rectify)
    hidden2Layer = layers.DenseLayer(hidden1Layer,
                                     num_units=512,
                                     W=GlorotUniform(1.0),
                                     nonlinearity=rectify)
    #hidden3Layer = layers.DenseLayer(hidden2Layer, num_units=256, nonlinearity=tanh)
    outputLayer = layers.DenseLayer(hidden2Layer,
                                    num_units=10,
                                    nonlinearity=softmax)
    return outputLayer
Ejemplo n.º 10
0
def test_glorot_normal_gain():
    from lasagne.init import GlorotNormal

    sample = GlorotNormal(gain=10.0).sample((100, 100))
    assert -0.1 < sample.mean() < 0.1
    assert 0.9 < sample.std() < 1.1

    sample = GlorotNormal(gain='relu').sample((100, 100))
    assert -0.01 < sample.mean() < 0.01
    assert 0.132 < sample.std() < 0.152
Ejemplo n.º 11
0
def getNet8():
  inputLayer = layers.InputLayer(shape=(None, 1, imageShape[0], imageShape[1]))
  conv1Layer = layers.Conv2DLayer(inputLayer, num_filters=32, filter_size=(3,3), pad=(1,1), W=HeNormal('relu'), nonlinearity=rectify)
  conv2Layer = layers.Conv2DLayer(conv1Layer, num_filters=32, filter_size=(3,3), pad=(1,1), W=HeNormal('relu'), nonlinearity=rectify)
  conv3Layer = layers.Conv2DLayer(conv2Layer, num_filters=32, filter_size=(3,3), W=GlorotNormal('relu'), nonlinearity=rectify)
  pool1Layer = layers.MaxPool2DLayer(conv3Layer, pool_size=(2,2))
  conv4Layer = layers.Conv2DLayer(pool1Layer, num_filters=64, filter_size=(3,3), pad=(1,1), W=HeNormal('relu'), nonlinearity=rectify)
  conv5Layer = layers.Conv2DLayer(conv4Layer, num_filters=64, filter_size=(3,3), pad=(1,1), W=HeNormal('relu'), nonlinearity=rectify)
  conv6Layer = layers.Conv2DLayer(conv5Layer, num_filters=64, filter_size=(4,3), W=GlorotNormal('relu'), nonlinearity=rectify)
  pool2Layer = layers.MaxPool2DLayer(conv6Layer, pool_size=(2,2))
  conv7Layer = layers.Conv2DLayer(pool2Layer, num_filters=128, filter_size=(3,3), pad=(1,1), W=HeNormal('relu'), nonlinearity=rectify)
  conv8Layer = layers.Conv2DLayer(conv7Layer, num_filters=128, filter_size=(3,3), pad=(1,1), W=HeNormal('relu'), nonlinearity=rectify)
  conv9Layer = layers.Conv2DLayer(conv8Layer, num_filters=128, filter_size=(3,3), W=GlorotNormal('relu'), nonlinearity=rectify)
  pool3Layer = layers.MaxPool2DLayer(conv9Layer, pool_size=(2,2))
  #conv4Layer = layers.Conv2DLayer(pool3Layer, num_filters=256, filter_size=(3,2), W=GlorotNormal('relu'), nonlinearity=elu)
  hidden1Layer = layers.DenseLayer(pool3Layer, num_units=1024, W=GlorotNormal('relu'), nonlinearity=rectify)
  dropout1Layer = layers.DropoutLayer(hidden1Layer, p=0.5)
  hidden2Layer = layers.DenseLayer(dropout1Layer, num_units=512, W=GlorotNormal('relu'), nonlinearity=rectify)
  dropout2Layer = layers.DropoutLayer(hidden2Layer, p=0.5)
  hidden3Layer = layers.DenseLayer(dropout2Layer, num_units=256, W=GlorotNormal('relu'), nonlinearity=rectify)
  outputLayer = layers.DenseLayer(hidden3Layer, num_units=10, W=GlorotNormal(1.0), nonlinearity=softmax)
  return outputLayer
Ejemplo n.º 12
0
    ('dropout1', DropoutLayer),
    #           ('dense2', DenseLayer),
    #           ('dropout2', DropoutLayer),
    ('output', DenseLayer)
]

#0.686160
# inDrop=0.2, den0=1000, den0drop=.6, den1=1000, den1drop=0.6

np.random.seed(15)
net0 = NeuralNet(
    layers=layers0,
    input_shape=(None, num_features),
    inputDropout0_p=0.35,
    dense0_num_units=128,
    dense0_W=GlorotNormal(),
    dense0_b=Constant(1.0),
    dropout0_p=0.5,
    dense0_nonlinearity=rectify,
    #                 noise0_sigma=2,
    dense1_num_units=128,
    dense1_W=GlorotNormal(),
    dense1_b=Constant(1.0),
    dense1_nonlinearity=rectify,
    dropout1_p=0.5,
    #                 dense2_num_units=30,
    #                 dense2_W=GlorotUniform(),
    #                 dense2_b = Constant(1.0),
    #                 dense2_nonlinearity=rectify,
    #                 dropout2_p=0.3,
    output_num_units=num_classes,
Ejemplo n.º 13
0
def build(inputHeight, inputWidth, input_var, do_dropout=False):
    #net = OrderedDict()
    net = {
        'input':
        InputLayer((None, 3, inputHeight, inputWidth), input_var=input_var)
    }
    #net['input'] = InputLayer((None, 3, inputHeight, inputWidth), input_var=input_var)
    print "Input: {}".format(net['input'].output_shape[1:])

    net['bgr'] = RGBtoBGRLayer(net['input'])

    net['contr_1_1'] = batch_norm(
        ConvLayer(net['bgr'], 64, 3, pad='same', W=GlorotNormal(gain="relu")))
    print "convtr1_1: {}".format(net['contr_1_1'].output_shape[1:])
    net['contr_1_2'] = batch_norm(
        ConvLayer(net['contr_1_1'],
                  64,
                  3,
                  pad='same',
                  W=GlorotNormal(gain="relu")))
    print "convtr1_2: {}".format(net['contr_1_2'].output_shape[1:])
    net['pool1'] = Pool2DLayer(net['contr_1_2'], 2)
    print "pool1: {}".format(net['pool1'].output_shape[1:])

    net['contr_2_1'] = batch_norm(
        ConvLayer(net['pool1'],
                  128,
                  3,
                  pad='same',
                  W=GlorotNormal(gain="relu")))
    print "convtr2_1: {}".format(net['contr_2_1'].output_shape[1:])
    net['contr_2_2'] = batch_norm(
        ConvLayer(net['contr_2_1'],
                  128,
                  3,
                  pad='same',
                  W=GlorotNormal(gain="relu")))
    print "convtr2_2: {}".format(net['contr_2_2'].output_shape[1:])
    net['pool2'] = Pool2DLayer(net['contr_2_2'], 2)
    print "pool2: {}".format(net['pool2'].output_shape[1:])

    net['contr_3_1'] = batch_norm(
        ConvLayer(net['pool2'],
                  256,
                  3,
                  pad='same',
                  W=GlorotNormal(gain="relu")))
    print "convtr3_1: {}".format(net['contr_3_1'].output_shape[1:])
    net['contr_3_2'] = batch_norm(
        ConvLayer(net['contr_3_1'],
                  256,
                  3,
                  pad='same',
                  W=GlorotNormal(gain="relu")))
    print "convtr3_2: {}".format(net['contr_3_2'].output_shape[1:])
    net['pool3'] = Pool2DLayer(net['contr_3_2'], 2)
    print "pool3: {}".format(net['pool3'].output_shape[1:])

    net['contr_4_1'] = batch_norm(
        ConvLayer(net['pool3'],
                  512,
                  3,
                  pad='same',
                  W=GlorotNormal(gain="relu")))
    print "convtr4_1: {}".format(net['contr_4_1'].output_shape[1:])
    net['contr_4_2'] = batch_norm(
        ConvLayer(net['contr_4_1'],
                  512,
                  3,
                  pad='same',
                  W=GlorotNormal(gain="relu")))
    print "convtr4_2: {}".format(net['contr_4_2'].output_shape[1:])
    l = net['pool4'] = Pool2DLayer(net['contr_4_2'], 2)
    print "pool4: {}".format(net['pool4'].output_shape[1:])
    # the paper does not really describe where and how dropout is added. Feel free to try more options
    if do_dropout:
        l = DropoutLayer(l, p=0.4)

    net['encode_1'] = batch_norm(
        ConvLayer(l, 1024, 3, pad='same', W=GlorotNormal(gain="relu")))
    print "encode_1: {}".format(net['encode_1'].output_shape[1:])
    net['encode_2'] = batch_norm(
        ConvLayer(net['encode_1'],
                  1024,
                  3,
                  pad='same',
                  W=GlorotNormal(gain="relu")))
    print "encode_2: {}".format(net['encode_2'].output_shape[1:])
    net['upscale1'] = batch_norm(
        Deconv2DLayer(net['encode_2'],
                      1024,
                      2,
                      2,
                      crop="valid",
                      W=GlorotNormal(gain="relu")))
    print "upscale1: {}".format(net['upscale1'].output_shape[1:])

    net['concat1'] = ConcatLayer([net['upscale1'], net['contr_4_2']],
                                 cropping=(None, None, "center", "center"))
    print "concat1: {}".format(net['concat1'].output_shape[1:])
    net['expand_1_1'] = batch_norm(
        ConvLayer(net['concat1'],
                  512,
                  3,
                  pad='same',
                  W=GlorotNormal(gain="relu")))
    print "expand_1_1: {}".format(net['expand_1_1'].output_shape[1:])
    net['expand_1_2'] = batch_norm(
        ConvLayer(net['expand_1_1'],
                  512,
                  3,
                  pad='same',
                  W=GlorotNormal(gain="relu")))
    print "expand_1_2: {}".format(net['expand_1_2'].output_shape[1:])
    net['upscale2'] = batch_norm(
        Deconv2DLayer(net['expand_1_2'],
                      512,
                      2,
                      2,
                      crop="valid",
                      W=GlorotNormal(gain="relu")))
    print "upscale2: {}".format(net['upscale2'].output_shape[1:])

    net['concat2'] = ConcatLayer([net['upscale2'], net['contr_3_2']],
                                 cropping=(None, None, "center", "center"))
    print "concat2: {}".format(net['concat2'].output_shape[1:])
    net['expand_2_1'] = batch_norm(
        ConvLayer(net['concat2'],
                  256,
                  3,
                  pad='same',
                  W=GlorotNormal(gain="relu")))
    print "expand_2_1: {}".format(net['expand_2_1'].output_shape[1:])
    net['expand_2_2'] = batch_norm(
        ConvLayer(net['expand_2_1'],
                  256,
                  3,
                  pad='same',
                  W=GlorotNormal(gain="relu")))
    print "expand_2_2: {}".format(net['expand_2_2'].output_shape[1:])
    net['upscale3'] = batch_norm(
        Deconv2DLayer(net['expand_2_2'],
                      256,
                      2,
                      2,
                      crop="valid",
                      W=GlorotNormal(gain="relu")))
    print "upscale3: {}".format(net['upscale3'].output_shape[1:])

    net['concat3'] = ConcatLayer([net['upscale3'], net['contr_2_2']],
                                 cropping=(None, None, "center", "center"))
    print "concat3: {}".format(net['concat3'].output_shape[1:])
    net['expand_3_1'] = batch_norm(
        ConvLayer(net['concat3'],
                  128,
                  3,
                  pad='same',
                  W=GlorotNormal(gain="relu")))
    print "expand_3_1: {}".format(net['expand_3_1'].output_shape[1:])
    net['expand_3_2'] = batch_norm(
        ConvLayer(net['expand_3_1'],
                  128,
                  3,
                  pad='same',
                  W=GlorotNormal(gain="relu")))
    print "expand_3_2: {}".format(net['expand_3_2'].output_shape[1:])
    net['upscale4'] = batch_norm(
        Deconv2DLayer(net['expand_3_2'],
                      128,
                      2,
                      2,
                      crop="valid",
                      W=GlorotNormal(gain="relu")))
    print "upscale4: {}".format(net['upscale4'].output_shape[1:])

    net['concat4'] = ConcatLayer([net['upscale4'], net['contr_1_2']],
                                 cropping=(None, None, "center", "center"))
    print "concat4: {}".format(net['concat4'].output_shape[1:])
    net['expand_4_1'] = batch_norm(
        ConvLayer(net['concat4'],
                  64,
                  3,
                  pad='same',
                  W=GlorotNormal(gain="relu")))
    print "expand_4_1: {}".format(net['expand_4_1'].output_shape[1:])
    net['expand_4_2'] = batch_norm(
        ConvLayer(net['expand_4_1'],
                  64,
                  3,
                  pad='same',
                  W=GlorotNormal(gain="relu")))
    print "expand_4_2: {}".format(net['expand_4_2'].output_shape[1:])

    net['output'] = ConvLayer(net['expand_4_2'], 1, 1, nonlinearity=sigmoid)
    print "output: {}".format(net['output'].output_shape[1:])
    #    net['dimshuffle'] = DimshuffleLayer(net['output_segmentation'], (1, 0, 2, 3))
    #    print "dimshuffle: {}".format(net['dimshuffle'].output_shape[1:])
    #    net['reshapeSeg'] = ReshapeLayer(net['dimshuffle'], (2, -1))
    #    print "reshapeSeg: {}".format(net['reshapeSeg'].output_shape[1:])
    #    net['dimshuffle2'] = DimshuffleLayer(net['reshapeSeg'], (1, 0))
    #    print "dimshuffle2: {}".format(net['dimshuffle2'].output_shape[1:])
    #    net['output_flattened'] = NonlinearityLayer(net['dimshuffle2'], nonlinearity=lasagne.nonlinearities.softmax)
    #    print "output_flattened: {}".format(net['output_flattened'].output_shape[1:])

    return net
Ejemplo n.º 14
0
def build_UNet(n_input_channels=3,
               BATCH_SIZE=None,
               num_output_classes=2,
               pad='same',
               nonlinearity=elu,
               input_dim=(128, 128),
               base_n_filters=64,
               do_dropout=False,
               weights=None):
    net = OrderedDict()
    net['input'] = InputLayer(
        (BATCH_SIZE, n_input_channels, input_dim[0], input_dim[1]))

    net['contr_1_1'] = batch_norm(
        ConvLayer(
            net['input'],
            num_filters=base_n_filters,
            filter_size=3,
            nonlinearity=nonlinearity,
            pad=pad,
            W=GlorotNormal(),
        ))
    net['contr_1_2'] = batch_norm(
        ConvLayer(net['contr_1_1'],
                  num_filters=base_n_filters,
                  filter_size=3,
                  nonlinearity=nonlinearity,
                  pad=pad,
                  W=GlorotNormal()))
    net['pool1'] = Pool2DLayer(net['contr_1_2'], pool_size=2)

    net['contr_2_1'] = batch_norm(
        ConvLayer(net['pool1'],
                  num_filters=base_n_filters * 2,
                  filter_size=3,
                  nonlinearity=nonlinearity,
                  pad=pad,
                  W=GlorotNormal()))
    net['contr_2_2'] = batch_norm(
        ConvLayer(net['contr_2_1'],
                  num_filters=base_n_filters * 2,
                  filter_size=3,
                  nonlinearity=nonlinearity,
                  pad=pad,
                  W=GlorotNormal()))
    net['pool2'] = Pool2DLayer(net['contr_2_2'], pool_size=2)

    net['contr_3_1'] = batch_norm(
        ConvLayer(net['pool2'],
                  num_filters=base_n_filters * 4,
                  filter_size=3,
                  nonlinearity=nonlinearity,
                  pad=pad,
                  W=GlorotNormal()))
    net['contr_3_2'] = batch_norm(
        ConvLayer(net['contr_3_1'],
                  num_filters=base_n_filters * 4,
                  filter_size=3,
                  nonlinearity=nonlinearity,
                  pad=pad,
                  W=GlorotNormal()))
    net['pool3'] = Pool2DLayer(net['contr_3_2'], pool_size=2)

    net['contr_4_1'] = batch_norm(
        ConvLayer(net['pool3'],
                  num_filters=base_n_filters * 8,
                  filter_size=3,
                  nonlinearity=nonlinearity,
                  pad=pad,
                  W=GlorotNormal()))
    net['contr_4_2'] = batch_norm(
        ConvLayer(net['contr_4_1'],
                  num_filters=base_n_filters * 8,
                  filter_size=3,
                  nonlinearity=nonlinearity,
                  pad=pad,
                  W=GlorotNormal()))
    l = net['pool4'] = Pool2DLayer(net['contr_4_2'], pool_size=2)

    if do_dropout:
        l = DropoutLayer(l, p=0.4)

    net['encode_1'] = batch_norm(
        ConvLayer(l,
                  num_filters=base_n_filters * 16,
                  filter_size=3,
                  nonlinearity=nonlinearity,
                  pad=pad,
                  W=GlorotNormal()))
    net['encode_2'] = batch_norm(
        ConvLayer(net['encode_1'],
                  num_filters=base_n_filters * 16,
                  filter_size=3,
                  nonlinearity=nonlinearity,
                  pad=pad,
                  W=GlorotNormal()))
    net['upscale1'] = batch_norm(
        Deconv2DLayer(net['encode_2'],
                      num_filters=base_n_filters * 16,
                      filter_size=2,
                      stride=2,
                      crop="valid",
                      nonlinearity=nonlinearity,
                      W=GlorotNormal()))

    net['concat1'] = ConcatLayer([net['upscale1'], net['contr_4_2']],
                                 cropping=(None, None, "center", "center"))
    net['expand_1_1'] = batch_norm(
        ConvLayer(net['concat1'],
                  num_filters=base_n_filters * 8,
                  filter_size=3,
                  nonlinearity=nonlinearity,
                  pad=pad,
                  W=GlorotNormal()))
    net['expand_1_2'] = batch_norm(
        ConvLayer(net['expand_1_1'],
                  num_filters=base_n_filters * 8,
                  filter_size=3,
                  nonlinearity=nonlinearity,
                  pad=pad,
                  W=GlorotNormal()))
    net['upscale2'] = batch_norm(
        Deconv2DLayer(net['expand_1_2'],
                      num_filters=base_n_filters * 8,
                      filter_size=2,
                      stride=2,
                      crop="valid",
                      nonlinearity=nonlinearity,
                      W=GlorotNormal()))

    net['concat2'] = ConcatLayer([net['upscale2'], net['contr_3_2']],
                                 cropping=(None, None, "center", "center"))
    net['expand_2_1'] = batch_norm(
        ConvLayer(net['concat2'],
                  num_filters=base_n_filters * 4,
                  filter_size=3,
                  nonlinearity=nonlinearity,
                  pad=pad,
                  W=GlorotNormal()))
    net['expand_2_2'] = batch_norm(
        ConvLayer(net['expand_2_1'],
                  num_filters=base_n_filters * 4,
                  filter_size=3,
                  nonlinearity=nonlinearity,
                  pad=pad,
                  W=GlorotNormal()))
    net['upscale3'] = batch_norm(
        Deconv2DLayer(net['expand_2_2'],
                      num_filters=base_n_filters * 4,
                      filter_size=2,
                      stride=2,
                      crop="valid",
                      nonlinearity=nonlinearity,
                      W=GlorotNormal()))

    net['concat3'] = ConcatLayer([net['upscale3'], net['contr_2_2']],
                                 cropping=(None, None, "center", "center"))
    net['expand_3_1'] = batch_norm(
        ConvLayer(net['concat3'],
                  num_filters=base_n_filters * 2,
                  filter_size=3,
                  nonlinearity=nonlinearity,
                  pad=pad,
                  W=GlorotNormal()))
    net['expand_3_2'] = batch_norm(
        ConvLayer(net['expand_3_1'],
                  num_filters=base_n_filters * 2,
                  filter_size=3,
                  nonlinearity=nonlinearity,
                  pad=pad,
                  W=GlorotNormal()))
    net['upscale4'] = batch_norm(
        Deconv2DLayer(net['expand_3_2'],
                      num_filters=base_n_filters * 2,
                      filter_size=2,
                      stride=2,
                      crop="valid",
                      nonlinearity=nonlinearity,
                      W=GlorotNormal()))

    net['concat4'] = ConcatLayer([net['upscale4'], net['contr_1_2']],
                                 cropping=(None, None, "center", "center"))
    net['expand_4_1'] = batch_norm(
        ConvLayer(net['concat4'],
                  num_filters=base_n_filters,
                  filter_size=3,
                  nonlinearity=nonlinearity,
                  pad=pad,
                  W=GlorotNormal()))
    net['expand_4_2'] = batch_norm(
        ConvLayer(net['expand_4_1'],
                  num_filters=base_n_filters,
                  filter_size=3,
                  nonlinearity=nonlinearity,
                  pad=pad,
                  W=GlorotNormal()))

    net['output_segmentation'] = ConvLayer(net['expand_4_2'],
                                           num_filters=num_output_classes,
                                           filter_size=1,
                                           nonlinearity=None)
    net['dimshuffle'] = DimshuffleLayer(net['output_segmentation'],
                                        (1, 0, 2, 3))
    net['reshapeSeg'] = ReshapeLayer(net['dimshuffle'],
                                     (num_output_classes, -1))
    net['dimshuffle2'] = DimshuffleLayer(net['reshapeSeg'], (1, 0))
    net['output_flattened'] = NonlinearityLayer(
        net['dimshuffle2'], nonlinearity=lasagne.nonlinearities.softmax)

    if weights is not None:
        lasagne.layers.set_all_param_values(net['output_flattened'], weights)

    return net
def test_glorot_normal_receptive_field():
    from lasagne.init import GlorotNormal

    sample = GlorotNormal().sample((50, 50, 2))
    assert -0.01 < sample.mean() < 0.01
    assert 0.09 < sample.std() < 0.11
def test_glorot_1d_not_supported():
    from lasagne.init import GlorotNormal

    with pytest.raises(RuntimeError):
        GlorotNormal().sample((100, ))
def test_glorot_normal():
    from lasagne.init import GlorotNormal

    sample = GlorotNormal().sample((100, 100))
    assert -0.01 < sample.mean() < 0.01
    assert 0.09 < sample.std() < 0.11
Ejemplo n.º 18
0
def test_glorot_normal():
    from lasagne.init import GlorotNormal

    sample = GlorotNormal().sample((100, 100))
    assert -0.01 < sample.mean() < 0.01
    assert 0.09 < sample.std() < 0.11
Ejemplo n.º 19
0
seed(SEED)
print 'set random seed to {0} while loading NNet'.format(SEED)

nonlinearities = {
    'tanh': tanh,
    'sigmoid': sigmoid,
    'rectify': rectify,
    'leaky2': LeakyRectify(leakiness=0.02),
    'leaky20': LeakyRectify(leakiness=0.2),
    'softmax': softmax,
}

initializers = {
    'orthogonal': Orthogonal(),
    'sparse': Sparse(),
    'glorot_normal': GlorotNormal(),
    'glorot_uniform': GlorotUniform(),
    'he_normal': HeNormal(),
    'he_uniform': HeUniform(),
}


class NNet(BaseEstimator, ClassifierMixin):
    def __init__(
        self,
        name='nameless_net',  # used for saving, so maybe make it unique
        dense1_size=60,
        dense1_nonlinearity='tanh',
        dense1_init='orthogonal',
        dense2_size=None,
        dense2_nonlinearity=None,  # inherits dense1
Ejemplo n.º 20
0
def test_glorot_normal_c01b():
    from lasagne.init import GlorotNormal

    sample = GlorotNormal(c01b=True).sample((25, 2, 2, 25))
    assert -0.01 < sample.mean() < 0.01
    assert 0.09 < sample.std() < 0.11
Ejemplo n.º 21
0
def test_glorot_normal_receptive_field():
    from lasagne.init import GlorotNormal

    sample = GlorotNormal().sample((50, 50, 2))
    assert -0.01 < sample.mean() < 0.01
    assert 0.09 < sample.std() < 0.11
def test_glorot_normal_c01b():
    from lasagne.init import GlorotNormal

    sample = GlorotNormal(c01b=True).sample((25, 2, 2, 25))
    assert -0.01 < sample.mean() < 0.01
    assert 0.09 < sample.std() < 0.11
Ejemplo n.º 23
0
    def create_dadgm_model(self, X, Y, n_dim, n_out, n_chan=1, n_class=10):
        n_cat = 20  # number of categorical distributions
        n_lat = n_class * n_cat  # latent stochastic variables
        n_aux = 10  # number of auxiliary variables
        n_hid = 500  # size of hidden layer in encoder/decoder
        n_in = n_out = n_dim * n_dim * n_chan
        tau = self.tau
        hid_nl = T.nnet.relu
        relu_shift = lambda av: T.nnet.relu(av + 10) - 10

        # create the encoder network
        # - create q(a|x)
        qa_net_in = InputLayer(shape=(None, n_in), input_var=X)
        qa_net = DenseLayer(
            qa_net_in,
            num_units=n_hid,
            W=GlorotNormal('relu'),
            b=Normal(1e-3),
            nonlinearity=hid_nl,
        )
        qa_net_mu = DenseLayer(
            qa_net,
            num_units=n_aux,
            W=GlorotNormal(),
            b=Normal(1e-3),
            nonlinearity=None,
        )
        qa_net_logsigma = DenseLayer(
            qa_net,
            num_units=n_aux,
            W=GlorotNormal(),
            b=Normal(1e-3),
            nonlinearity=relu_shift,
        )
        qa_net_sample = GaussianSampleLayer(qa_net_mu, qa_net_logsigma)
        # - create q(z|a, x)
        qz_net_in = lasagne.layers.InputLayer((None, n_aux))
        qz_net_a = DenseLayer(
            qz_net_in,
            num_units=n_hid,
            nonlinearity=hid_nl,
        )
        qz_net_b = DenseLayer(
            qa_net_in,
            num_units=n_hid,
            nonlinearity=hid_nl,
        )
        qz_net = ElemwiseSumLayer([qz_net_a, qz_net_b])
        qz_net = DenseLayer(qz_net, num_units=n_hid, nonlinearity=hid_nl)
        qz_net_mu = DenseLayer(
            qz_net,
            num_units=n_lat,
            nonlinearity=None,
        )
        qz_net_mu = reshape(qz_net_mu, (-1, n_class))
        qz_net_sample = GumbelSoftmaxSampleLayer(qz_net_mu, tau)
        qz_net_sample = reshape(qz_net_sample, (-1, n_cat, n_class))
        # create the decoder network
        # - create p(x|z)
        px_net_in = lasagne.layers.InputLayer((None, n_cat, n_class))
        # --- rest is created from RBM ---
        # - create p(a|z)
        pa_net = DenseLayer(
            flatten(px_net_in),
            num_units=n_hid,
            W=GlorotNormal('relu'),
            b=Normal(1e-3),
            nonlinearity=hid_nl,
        )
        pa_net_mu = DenseLayer(
            pa_net,
            num_units=n_aux,
            W=GlorotNormal(),
            b=Normal(1e-3),
            nonlinearity=None,
        )
        pa_net_logsigma = DenseLayer(
            pa_net,
            num_units=n_aux,
            W=GlorotNormal(),
            b=Normal(1e-3),
            nonlinearity=relu_shift,
        )
        # save network params
        self.n_cat = n_cat
        self.input_layers = (qa_net_in, qz_net_in, px_net_in)

        return pa_net_mu, pa_net_logsigma, qz_net_mu, \
            qa_net_mu, qa_net_logsigma, qz_net_sample, qa_net_sample,