def cond_erg_enc_net(_incoming, _cond, noise_sigma=0.05, drop_ratio_conv=0.1):
    #_noise = L.GaussianNoiseLayer(_incoming, sigma=noise_sigma)
    _drop1 = L.DropoutLayer(_incoming, p=drop_ratio_conv, rescale=True)
    _drop1 = plu.concat_tc(_drop1, _cond)
    _conv1 = batch_norm(
        conv(_drop1,
             num_filters=128,
             filter_size=4,
             stride=2,
             pad=1,
             W=I.Normal(0.02),
             b=None,
             nonlinearity=NL.LeakyRectify(0.02)))

    _drop2 = L.DropoutLayer(_conv1, p=drop_ratio_conv, rescale=True)
    _drop2 = plu.concat_tc(_drop2, _cond)
    _emb = batch_norm(
        conv(_drop2,
             num_filters=256,
             filter_size=4,
             stride=2,
             pad=1,
             W=I.Normal(0.02),
             b=None,
             nonlinearity=NL.LeakyRectify(0.02)))
    return _emb
Ejemplo n.º 2
0
def nonlinearity(name):

    nonlinearities = {
        'rectify': nl.rectify,
        'relu': nl.rectify,
        'lrelu': nl.LeakyRectify(0.01),
        'vlrelu': nl.LeakyRectify(0.33),
        'elu': nl.elu,
        'softmax': nl.softmax,
        'sigmoid': nl.sigmoid,
        'identity': nl.identity
    }

    return nonlinearities[name]
Ejemplo n.º 3
0
def set_rec_net(num_filters,filtsize,superres = 1,nonlin = lnl.LeakyRectify(0.2), AR = False, n_rnn_units = 128, n_features = 13):

    input_l = ll.InputLayer((None, None))
    rec_nn = ll.DimshuffleLayer(input_l, (0, 'x', 1))
    hevar = np.sqrt(np.sqrt(2/(1+0.2**2)))

    if nonlin == lnl.tanh:
        init = lasagne.init.GlorotUniform()
    else:
        init = lasagne.init.HeNormal(hevar)

    for num,size in zip(num_filters,filtsize):

        rec_nn = (ll.Conv1DLayer(rec_nn, num_filters = num, filter_size = size, stride =1, pad = 'same',
                                    nonlinearity = nonlin, name='conv1', W = init))

    if not AR:
        prob_nn = (ll.Conv1DLayer(rec_nn, num_filters = superres,filter_size = 11,stride =1, pad = 'same',
                          nonlinearity=lnl.sigmoid,name='conv2', b = lasagne.init.Constant(-3.)))
        prob_nn = ll.DimshuffleLayer(prob_nn,(0,2,1))
        prob_nn = ll.FlattenLayer(prob_nn)
    else:
        prob_nn = (ll.Conv1DLayer(rec_nn, num_filters = n_features,filter_size = 11,stride =1, pad = 'same',
                          nonlinearity=nonlin,name='conv2'))
        prob_nn = ll.DimshuffleLayer(prob_nn,(0,2,1))

    return {'network':prob_nn,'input':input_l, 'superres':superres, 'n_features':n_features, 'rnn_units':n_rnn_units}
Ejemplo n.º 4
0
def build_discriminator(input_var=None,
                        nfilters=[64, 128, 256, 512],
                        input_channels=3):

    ###############################
    # Build Network Configuration #
    ###############################

    print('... Building the discriminator')

    leaky = nonlinearities.LeakyRectify(0.2)

    # Input of the network : shape = (batch_size, 3, 64, 64)
    network = layers.InputLayer(shape=(None, input_channels, 64, 64),
                                input_var=input_var)

    # Conv layer : shape = (batch_size, 64, 32, 32)
    network = layers.Conv2DLayer(network,
                                 num_filters=nfilters[0],
                                 filter_size=(5, 5),
                                 stride=2,
                                 pad=2,
                                 nonlinearity=leaky)

    # Conv layer : shape = (batch_size, 128, 16, 16)
    network = layers.batch_norm(
        lasagne.layers.Conv2DLayer(network,
                                   num_filters=nfilters[1],
                                   filter_size=(5, 5),
                                   stride=2,
                                   pad=2,
                                   nonlinearity=leaky))

    # Conv layer : shape = (batch_size, 256, 8, 8)
    network = layers.batch_norm(
        lasagne.layers.Conv2DLayer(network,
                                   num_filters=nfilters[2],
                                   filter_size=(5, 5),
                                   stride=2,
                                   pad=2,
                                   nonlinearity=leaky))

    # Conv layer : shape = (batch_size, 512, 4, 4)
    network = layers.batch_norm(
        lasagne.layers.Conv2DLayer(network,
                                   num_filters=nfilters[3],
                                   filter_size=(5, 5),
                                   stride=2,
                                   pad=2,
                                   nonlinearity=leaky))

    # Flatten layer :shape = (batch_size, 8192)
    network = lasagne.layers.FlattenLayer(network)

    # Dense layer :shape = (batch_size, 1)
    network = lasagne.layers.DenseLayer(
        network, 1, nonlinearity=lasagne.nonlinearities.sigmoid)

    return network
    def init_discriminator(self, first_layer, input_var=None):
        """
        Initialize the DCGAN discriminator network using lasagne
        Returns the network
        """

        lrelu = nonlinearities.LeakyRectify(0.2)
        layers = []

        l_in = lyr.InputLayer((None, 3, 64, 64), input_var)
        layers.append(l_in)

        l_1 = lyr.Conv2DLayer(incoming=l_in,
                              num_filters=first_layer,
                              filter_size=5,
                              stride=2,
                              pad=2,
                              nonlinearity=lrelu)
        layers.append(l_1)

        l_2 = lyr.batch_norm(
            lyr.Conv2DLayer(incoming=l_1,
                            num_filters=first_layer * 2,
                            filter_size=5,
                            stride=2,
                            pad=2,
                            nonlinearity=lrelu))
        layers.append(l_2)

        l_3 = lyr.batch_norm(
            lyr.Conv2DLayer(incoming=l_2,
                            num_filters=first_layer * 4,
                            filter_size=5,
                            stride=2,
                            pad=2,
                            nonlinearity=lrelu))
        layers.append(l_3)

        l_4 = lyr.batch_norm(
            lyr.Conv2DLayer(incoming=l_3,
                            num_filters=first_layer * 8,
                            filter_size=5,
                            stride=2,
                            pad=2,
                            nonlinearity=lrelu))
        l_4 = lyr.FlattenLayer(l_4)
        layers.append(l_4)

        l_out = lyr.DenseLayer(incoming=l_4,
                               num_units=1,
                               nonlinearity=nonlinearities.sigmoid)
        layers.append(l_out)

        if self.verbose:
            for i, layer in enumerate(layers):
                print 'dicriminator layer %s output shape:' % i, layer.output_shape

        return l_out
def erg_enc_net(_input, _cond=None):
    if _cond != None:
        _input = plu.concat_tc(_input, _cond)
    _conv1 = batch_norm(
        conv(_input,
             num_filters=64,
             filter_size=4,
             stride=2,
             pad=1,
             W=I.Normal(0.02),
             b=None,
             nonlinearity=NL.LeakyRectify(0.1)))
    #
    if _cond != None:
        _conv1 = plu.concat_tc(_conv1, _cond)
    _conv2 = batch_norm(
        conv(_conv1,
             num_filters=128,
             filter_size=4,
             stride=2,
             pad=0,
             W=I.Normal(0.02),
             b=None,
             nonlinearity=NL.LeakyRectify(0.1)))
    #
    if _cond != None:
        _conv2 = plu.concat_tc(_conv2, _cond)
    _emb = batch_norm(
        conv(_conv2,
             num_filters=256,
             filter_size=3,
             stride=1,
             pad=0,
             W=I.Normal(0.02),
             b=None,
             nonlinearity=NL.LeakyRectify(0.1)))
    return _emb
def erg_dec_net(_emb, _cond):
    if _cond != None:
        _conv3 = plu.concat_tc(_emb, _cond)
    else:
        _conv3 = _emb
    _deconv1 = batch_norm(
        deconv(_conv3,
               num_filters=128,
               filter_size=3,
               stride=1,
               crop=0,
               W=I.Normal(0.02),
               b=None,
               nonlinearity=NL.LeakyRectify(0.01)))
    #
    if _cond != None:
        _deconv1 = plu.concat_tc(_deconv1, _cond)
    _deconv2 = deconv(_deconv1,
                      num_filters=64,
                      filter_size=4,
                      stride=2,
                      crop=0,
                      W=I.Normal(0.02),
                      b=None,
                      nonlinearity=NL.LeakyRectify(0.01))
    #
    if _cond != None:
        _deconv2 = plu.concat_tc(_deconv2, _cond)
    _deconv3 = deconv(_deconv2,
                      num_filters=npc,
                      filter_size=4,
                      stride=2,
                      crop=1,
                      W=I.Normal(0.02),
                      b=None,
                      nonlinearity=None)
    return _deconv3
Ejemplo n.º 8
0
 def __init__(self,
              incoming,
              num_filters,
              filter_size,
              noise_std=0.01,
              stride=(1, 1),
              untie_biases=False,
              W=init.GlorotUniform(),
              b=init.Constant(0.),
              nonlinearity=nonlinearities.LeakyRectify(0.05),
              flip_filters=True,
              convolution=T.nnet.conv2d,
              seed=12347,
              **kwargs):
     pad = 'same'
     super(NoisyDiff2DLayer,
           self).__init__(incoming, num_filters, filter_size, noise_std,
                          stride, pad, untie_biases, W, b, nonlinearity,
                          flip_filters, convolution, seed, **kwargs)
Ejemplo n.º 9
0
    def __init__(self,
                 incoming,
                 num_units,
                 W=init.GlorotUniform(),
                 b=init.Constant(0.),
                 nonlinearity=nonlinearities.LeakyRectify(0.1),
                 num_leading_axes=1,
                 normalization='global',
                 eps=1.0e-6,
                 **kwargs):
        super(RestrictedDenseLayer,
              self).__init__(incoming, num_units, W, b, nonlinearity,
                             num_leading_axes, **kwargs)

        self.normalization = normalization
        self.eps = eps

        if normalization not in ['unit', 'global']:
            raise ValueError("normalization must be one of ['unit', 'global']")
def cond_erg_dec_net(_emb, _cond):
    _deconv2 = plu.concat_tc(_emb, _cond)
    _deconv2 = deconv(_deconv2,
                      num_filters=128,
                      filter_size=4,
                      stride=2,
                      crop=1,
                      W=I.Normal(0.02),
                      b=I.Constant(0),
                      nonlinearity=NL.LeakyRectify(0.02))

    _deconv1 = plu.concat_tc(_deconv2, _cond)
    _deconv1 = deconv(_deconv1,
                      num_filters=npc,
                      filter_size=4,
                      stride=2,
                      crop=1,
                      W=I.Normal(0.02),
                      b=I.Constant(0),
                      nonlinearity=None)
    return _deconv1
Ejemplo n.º 11
0
    def __init__(self,
                 incoming,
                 num_filters,
                 filter_size,
                 noise_std=0.01,
                 stride=(1, 1),
                 pad=0,
                 untie_biases=False,
                 W=init.GlorotUniform(),
                 b=init.Constant(0.),
                 nonlinearity=nonlinearities.LeakyRectify(0.05),
                 flip_filters=True,
                 convolution=T.nnet.conv2d,
                 seed=12347,
                 **kwargs):
        super(NoisyConv2DLayer,
              self).__init__(incoming, num_filters, filter_size, stride, pad,
                             untie_biases, W, b, nonlinearity, flip_filters,
                             convolution, **kwargs)

        self.noise_std = noise_std
        self.srng = MRG_RandomStreams(seed=seed)
Ejemplo n.º 12
0
)

nnet3 = NeuralNet(
    layers=[
        ('input', layers.InputLayer),
        ('hidden1', layers.DenseLayer),
        ('dropout1', layers.DropoutLayer),
        ('hidden2', layers.DenseLayer),
        ('dropout2', layers.DropoutLayer),
        ('hidden3', layers.DenseLayer),
        ('output', layers.DenseLayer),
    ],
    # layer parameters:
    input_shape=(None, 93),  # 96x96 input pixels per batch
    hidden1_num_units=800,  # number of units in hidden layer
    hidden1_nonlinearity=nonlinearities.LeakyRectify(leakiness=0.1),
    dropout1_p=0.5,
    hidden2_num_units=800,
    hidden2_nonlinearity=nonlinearities.LeakyRectify(leakiness=0.1),
    dropout2_p=0.5,
    hidden3_num_units=800,

    #hidden5_nonlinearity=nonlinearities.rectify,
    #dropout5_p=0.5,
    output_nonlinearity=nonlinearities.
    softmax,  # output layer uses identity function
    output_num_units=9,  # 30 target values
    objective=L2Regularization,
    objective_alpha=1E-9,
    eval_size=0.0,
Ejemplo n.º 13
0
 dropout1_p=0.5,
 #dense1_nonlinearity=sigmoid,
 dense2_num_units=dense_list[2],
 dropout2_p=0.5,
 #dense2_nonlinearity=sigmoid,
 #dense3_num_units=dense_list[3],
 #dropout3_p=0.5,
 #dense4_num_units=900,
 #dropout4_p=0.5,
 #dropout2_p=0.5,
 #dense3_num_units=550,
 #dropout3_p=0.4,
 #dense4_num_units=512,
 #dropout4_p=0.3,
 #hidden4_nonlinearity=lasagne.nonlinearities.sigmoid,
 dense0_nonlinearity=nonlin.LeakyRectify(
     leakiness=leakness_list[0]),
 dense1_nonlinearity=nonlin.LeakyRectify(
     leakiness=leakness_list[1]),
 dense2_nonlinearity=nonlin.LeakyRectify(
     leakiness=leakness_list[2]),
 #dense3_nonlinearity= nonlin.LeakyRectify(leakiness=leakness_list[3]),
 #dense0_nonlinearity= nonlin.LeakyRectify(0.1),
 #dense1_nonlinearity= nonlin.LeakyRectify(0.1),
 #dense2_nonlinearity= nonlin.LeakyRectify(0.1),
 output_num_units=1,
 output_nonlinearity=sigmoid,
 update=adagrad,
 #update=adadelta,
 #update=nesterov_momentum,
 objective_loss_function=binary_crossentropy,
 y_tensor_type=T.imatrix,
Ejemplo n.º 14
0
def set_rec_net(num_filters,
                filtsize,
                nonlin=lnl.LeakyRectify(0.2),
                AR=False,
                FB=False,
                n_rnn_units=64,
                n_features=13,
                n_genparams=0,
                p_sigma=None):
    """
    :param num_filters: Number of filters in each layer for the convolutional network, also sets number of layers
    :param filtsize: Size of the filters in each layer
    :param nonlin: Nonlinearity used
    
    These parameters are only relevant if a RNN is used to obtain a correlated posterior estimation
    
    :param AR: Whether this network is used as the first stage of an auto-regressive network
    :param FB: Whether the auto-regressive network uses a backwards running RNN
    :param n_rnn_units: Number of units in the RNN
    :param n_features: Number of features passed form the CNN to the RNN
    :param n_genparams: Number of generative model parameters inferred by the recognition network
    :param p_sigma: standard deviation of the prior on the inffered generative model parameter.
    """
    input_l = ll.InputLayer((None, None))
    rec_nn = ll.DimshuffleLayer(input_l, (0, 'x', 1))
    hevar = np.sqrt(np.sqrt(2 / (1 + 0.2**2)))
    convout_nonlin = nonlin
    if n_features == 1: convout_nonlin = lnl.linear

    if nonlin == lnl.tanh:
        init = lasagne.init.GlorotUniform()
    else:
        init = lasagne.init.HeNormal(hevar)

    for num, size in zip(num_filters, filtsize):

        rec_nn = (ll.Conv1DLayer(rec_nn,
                                 num_filters=num,
                                 filter_size=size,
                                 stride=1,
                                 pad='same',
                                 nonlinearity=nonlin,
                                 name='conv_filter',
                                 W=init))

    if not AR:
        prob_nn = (ll.Conv1DLayer(rec_nn,
                                  num_filters=1,
                                  filter_size=11,
                                  stride=1,
                                  pad='same',
                                  nonlinearity=lnl.sigmoid,
                                  name='conv_out',
                                  b=lasagne.init.Constant(-3.)))
        prob_nn = ll.DimshuffleLayer(prob_nn, (0, 2, 1))
        prob_nn = ll.FlattenLayer(prob_nn)
    else:
        prob_nn = (ll.Conv1DLayer(rec_nn,
                                  num_filters=n_features,
                                  filter_size=11,
                                  stride=1,
                                  pad='same',
                                  nonlinearity=convout_nonlin,
                                  name='conv_out'))
        prob_nn = ll.DimshuffleLayer(prob_nn, (0, 2, 1))
        if n_features == 1: prob_nn = ll.FlattenLayer(prob_nn)

    return {
        'network': prob_nn,
        'input': input_l,
        'n_features': n_features,
        'rnn_units': n_rnn_units,
        'n_genparams': n_genparams,
        'p_sigma': p_sigma,
        'forw_backw': FB
    }
Ejemplo n.º 15
0
from lasagne import layers, nonlinearities

import theano.tensor as T

__all__ = [
    'take', 'minimum', 'maximum', 'concat', 'noise', 'nothing', 'dropout',
    'dense', 'select', 'batch_norm', 'elementwise', 'elementwise_sum',
    'elementwise_mean', 'flatten', 'feature_pool', 'nonlinearity'
]

get_common_nonlinearity = lambda f=None: nonlinearities.LeakyRectify(
    0.1) if f is None else f

minimum = lambda: lambda incomings: layers.ElemwiseMergeLayer(
    incomings, merge_function=T.minimum)
maximum = lambda: lambda incomings: layers.ElemwiseMergeLayer(
    incomings, merge_function=T.maximum)
concat = lambda axis=1: lambda incomings: layers.ConcatLayer(incomings,
                                                             axis=axis)

noise = lambda sigma=0.1: lambda incoming: \
  layers.GaussianNoiseLayer(incoming, sigma=sigma) if sigma is not None and sigma > 0 else incoming

nothing = lambda incoming: incoming

dense = lambda num_units, f=None: lambda incoming: \
  layers.DenseLayer(incoming, num_units=num_units, nonlinearity=(nonlinearities.LeakyRectify(0.05) if f is None else f))

dropout = lambda p=0.1, rescale=True: lambda incoming: \
  layers.DropoutLayer(incoming, p=p, rescale=rescale) if p is not None else incoming
Ejemplo n.º 16
0
def build_context_encoder(input_var=None,
                          nfilters=[64, 128, 256, 512, 3000, 512, 256, 128, 3],
                          input_channels=3):
    ###############################
    # Build Network Configuration #
    ###############################

    print('... Building the generator')

    leaky = nonlinearities.LeakyRectify(0.2)

    # Input of the network : shape = (batch_size, 3, 64, 64)
    network = layers.InputLayer(shape=(None, input_channels, 64, 64),
                                input_var=input_var)

    # Conv layer : shape = (batch_size, 64, 32, 32)
    network = layers.Conv2DLayer(network,
                                 num_filters=nfilters[0],
                                 filter_size=(5, 5),
                                 stride=2,
                                 pad=2,
                                 nonlinearity=leaky)

    # Conv layer : shape = (batch_size, 128, 16, 16)
    network = layers.batch_norm(
        lasagne.layers.Conv2DLayer(network,
                                   num_filters=nfilters[1],
                                   filter_size=(5, 5),
                                   stride=2,
                                   pad=2,
                                   nonlinearity=leaky))

    # Conv layer : shape = (batch_size, 256, 8, 8)
    network = layers.batch_norm(
        lasagne.layers.Conv2DLayer(network,
                                   num_filters=nfilters[2],
                                   filter_size=(5, 5),
                                   stride=2,
                                   pad=2,
                                   nonlinearity=leaky))

    # Conv layer : shape = (batch_size, 512, 4, 4)
    network = layers.batch_norm(
        lasagne.layers.Conv2DLayer(network,
                                   num_filters=nfilters[3],
                                   filter_size=(5, 5),
                                   stride=2,
                                   pad=2,
                                   nonlinearity=leaky))

    # Conv layer : shape = (batch_size, 3000, 1, 1)
    network = layers.batch_norm(
        lasagne.layers.Conv2DLayer(network,
                                   num_filters=nfilters[4],
                                   filter_size=(4, 4),
                                   stride=2,
                                   nonlinearity=leaky))

    # Tranposed conv layer : shape = (batch_size, 512, 4, 4)
    network = layers.batch_norm(
        layers.TransposedConv2DLayer(network,
                                     num_filters=nfilters[5],
                                     filter_size=(4, 4),
                                     stride=(1, 1)))

    # Tranposed conv layer : shape = (batch_size, 256, 8, 8)
    network = layers.batch_norm(
        layers.TransposedConv2DLayer(network,
                                     num_filters=nfilters[6],
                                     filter_size=(5, 5),
                                     stride=(2, 2),
                                     crop=2,
                                     output_size=8))

    # Tranposed conv layer : shape = (batch_size, 128, 16, 16)
    network = layers.batch_norm(
        layers.TransposedConv2DLayer(network,
                                     num_filters=nfilters[7],
                                     filter_size=(5, 5),
                                     stride=(2, 2),
                                     crop=2,
                                     output_size=16))

    # Tranposed conv layer : shape = (batch_size, 3, 32, 32)
    network = layers.TransposedConv2DLayer(network,
                                           num_filters=nfilters[8],
                                           filter_size=5,
                                           stride=2,
                                           crop=2,
                                           output_size=32,
                                           nonlinearity=nonlinearities.sigmoid)

    return network
Ejemplo n.º 17
0
def build_conv_d(batch_size,
                 img_size,
                 n_conv_layer=4,
                 gan_mode=None,
                 wf=1,
                 filter_size=5,
                 fixed_nchan=None,
                 d_nl=ln.LeakyRectify(.2),
                 end_conv_layer=ll.FlattenLayer,
                 d_bn=None,
                 d_dp=None,
                 d_recon_reg=0,
                 bre_loc_mode='',
                 **kwargs):

    if d_bn is None:

        def d_bn(x):
            return x

    if d_dp is None:

        def d_dp(x):
            return x

    conv = ll.Conv2DLayer
    nl_layer = ll.NonlinearityLayer

    d_layers = OrderedDict([])
    d_register = registery_factory(d_layers)

    l_data = ll.InputLayer(shape=(None, 3, img_size, img_size))

    n_channel = fixed_nchan if fixed_nchan is not None else int(
        wf * _base_hidden_nf)

    name = layer_bre_name(bre_loc_mode, 0, n_conv_layer)

    l_h = d_register(
        conv(l_data,
             num_filters=n_channel,
             filter_size=(filter_size, filter_size),
             stride=1,
             pad='same',
             nonlinearity=None), name)
    l_h = d_dp(d_register(nl_layer(l_h, nonlinearity=d_nl)))

    for idx in xrange(1, n_conv_layer):
        if fixed_nchan:
            n_channel = fixed_nchan
        else:
            n_channel *= 2

        print bre_loc_mode

        name = layer_bre_name(bre_loc_mode, idx, n_conv_layer)

        l_h = d_register(
            conv(l_h,
                 num_filters=n_channel,
                 filter_size=(filter_size, filter_size),
                 stride=2,
                 pad='same',
                 nonlinearity=None), name)

        l_h = d_dp(d_bn(d_register(nl_layer(l_h, nonlinearity=d_nl))))

    l_h = d_register(end_conv_layer(l_h), 'l_h_feat')
    l_out_d = critic_output_layer(l_h, gan_mode, register_func=d_register)

    return l_out_d, l_data, d_layers
Ejemplo n.º 18
0
    def makeDiscriminator(self, aNBatch, aX, aXShape, aY, aYSize):
        #(D1)
        yb = aY.dimshuffle(0, 1, 'x', 'x')

        #(D2)
        layer_X = ll.InputLayer(shape=aXShape, input_var=aX, name='lX')
        layer_Y = ll.InputLayer(shape=(aNBatch, aYSize),
                                input_var=aY,
                                name='lY')
        dis = self.conv_cond_concat(layer_X, yb, aYSize)

        #(D3), (D4)
        if self.IS_DIS_BIN:
            dis = binary_net_ex.Conv2DLayer(
                dis,
                num_filters=NUM_DIS_FILTERS,
                filter_size=(5, 5),
                stride=(2, 2),
                nonlinearity=ln.LeakyRectify(0.2),  #TODO
                pad=2,
                binary=True,
                stochastic=IS_STOCHASTIC,
                H=H,
                W_LR_scale=W_LR_scale)
        else:
            dis = ll.Conv2DLayer(dis,
                                 num_filters=NUM_DIS_FILTERS,
                                 filter_size=(5, 5),
                                 stride=(2, 2),
                                 nonlinearity=ln.LeakyRectify(0.2),
                                 pad=2)
        print 'D4:', dis.output_shape  # (128, 64, 14, 14)

        #(D5)
        dis = self.conv_cond_concat(dis, yb, aYSize)

        #(D6)
        if self.IS_DIS_BIN:
            dis = binary_net_ex.Conv2DLayer(dis,
                                            num_filters=NUM_DIS_FILTERS * 2,
                                            filter_size=(5, 5),
                                            stride=(2, 2),
                                            nonlinearity=None,
                                            pad=2,
                                            binary=True,
                                            stochastic=IS_STOCHASTIC,
                                            H=H,
                                            W_LR_scale=W_LR_scale)
        else:
            dis = ll.Conv2DLayer(dis,
                                 num_filters=NUM_DIS_FILTERS * 2,
                                 filter_size=(5, 5),
                                 stride=(2, 2),
                                 nonlinearity=None,
                                 pad=2)
        print 'D6:', dis.output_shape  # (128, 128, 7, 7)

        dis = ll.BatchNormLayer(dis, epsilon=EPSILON, alpha=ALPHA)
        dis = ll.NonlinearityLayer(dis,
                                   nonlinearity=ln.LeakyRectify(0.2))  #TODO
        #(D7)
        dis = ll.FlattenLayer(dis, outdim=2)
        print 'D7:', dis.output_shape  # (128, 6272)
        #(D8)
        dis = ll.ConcatLayer([dis, layer_Y], axis=1)
        #(D9)
        if self.IS_DIS_BIN:
            dis = binary_net_ex.DenseLayer(
                dis,
                num_units=NUM_DIS_FC_UNITS,
                binary=True,
                stochastic=IS_STOCHASTIC,
                H=H,
                W_LR_scale=W_LR_scale,
                b=None,  #No Bias
                nonlinearity=None)
        else:
            dis = ll.DenseLayer(dis, num_units=NUM_DIS_FC_UNITS)

        dis = ll.BatchNormLayer(dis, epsilon=EPSILON, alpha=ALPHA)
        dis = ll.NonlinearityLayer(dis,
                                   nonlinearity=ln.LeakyRectify(0.2))  #TODO
        #(D10)
        dis = ll.ConcatLayer([dis, layer_Y], axis=1)

        #(D11) OUTPUT layer
        dis = ll.DenseLayer(dis, num_units=1, nonlinearity=ln.sigmoid)
        print 'D11:', dis.output_shape  # (128, 1)

        self.dis = dis
        return dis, layer_X, layer_Y
Ejemplo n.º 19
0
def model_CENTSD_33conv(patch_size, batch_size, FAST_network=False, FAST_imgheight=None, FAST_imgwidth=None, nonlin_func='leaky'):
    """ Describes the main network used in the PatchBatch paper """

    if nonlin_func == 'leaky':
        nonlin = nonlinearities.LeakyRectify(leaky_param)
    elif nonlin_func == 'elu':
        nonlin = nonlinearities.elu
    else:
        print 'Error! Unsupported non-linearity function'
        return

    if FAST_network:
        l_in0 = layers.InputLayer(
            shape=(1, 1, FAST_imgheight, FAST_imgwidth),name='l_in0')
    else:
        l_in0 = layers.InputLayer(
            shape=(batch_size, in_channels, patch_size, patch_size),name='l_in0')

    layer_params = {'conv_num_filters':32,
                    'conv_filter_size':(3,3),
                    'conv_border_mode':border_mode,
                    'conv_nonlinearity':nonlin,
                    'batch_norm':True,
                    'maxpool':True,
                    'maxpool_ds':(2,2)}
    layer = layer_factory(in_layer=l_in0,layer_type='conv',**layer_params)

    layer_params = {'conv_num_filters':64,
                    'conv_filter_size':(3,3),
                    'conv_border_mode':border_mode,
                    'conv_nonlinearity':nonlin,
                    'batch_norm':True,
                    'maxpool':True,
                    'maxpool_ds':(2,2)}
    layer = layer_factory(in_layer=layer,layer_type='conv',**layer_params)

    layer_params = {'conv_num_filters':128,
                    'conv_filter_size':(3,3),
                    'conv_border_mode':border_mode,
                    'conv_nonlinearity':nonlin,
                    'batch_norm':True,
                    'maxpool':True,
                    'maxpool_ds':(2,2)}
    layer = layer_factory(in_layer=layer,layer_type='conv',**layer_params)

    layer_params = {'conv_num_filters':256,
                    'conv_filter_size':(3,3),
                    'conv_border_mode':border_mode,
                    'conv_nonlinearity':nonlin,
                    'batch_norm':True,
                    'maxpool':True,
                    'maxpool_ds':(2,2)}
    layer = layer_factory(in_layer=layer,layer_type='conv',**layer_params)

    layer_params = {'conv_num_filters':512,
                    'conv_filter_size':(2,2),
                    'conv_border_mode':border_mode,
                    'conv_nonlinearity':nonlin,
                    'batch_norm':True,
                    'maxpool':False,
                    'maxpool_ds':(2,2)}
    layer = layer_factory(in_layer=layer,layer_type='conv',**layer_params)

    return layer