Exemple #1
0
    def build_roadar_full(shape, input_var):
        net = {}
        #Input layer:
        net['input'] = InputLayer((None, shape[0], shape[1], shape[2]),
                                  input_var=input_var)
        #Convolution + Pooling
        net['conv_11'] = ConvLayer(net['input'],
                                   num_filters=64,
                                   filter_size=5,
                                   stride=1)
        net['pool_11'] = PoolLayer(net['conv_11'], 2, stride=2)

        net['conv_12'] = ConvLayer(net['pool_11'],
                                   num_filters=64,
                                   filter_size=5,
                                   stride=1)
        net['pool_12'] = PoolLayer(net['conv_12'], 2, stride=2)

        net['conv_21'] = ConvLayer(net['pool_12'],
                                   num_filters=128,
                                   filter_size=3)
        net['pool_21'] = PoolLayer(net['conv_21'], pool_size=2)
        #Fully-connected + dropout
        net['fc_1'] = DenseLayer(net['pool_21'], num_units=500)
        net['drop_1'] = DropoutLayer(net['fc_1'], p=0.5)
        #Output layer:
        net['out'] = DenseLayer(net['drop_1'],
                                num_units=2,
                                nonlinearity=lasagne.nonlinearities.softmax)
        return net
Exemple #2
0
def build_simple_block(incoming_layer,
                       names,
                       num_filters,
                       filter_size,
                       stride,
                       pad,
                       use_bias=False,
                       nonlin=rectify):
    net = []
    net.append((names[0],
                ConvLayer(incoming_layer,
                          num_filters,
                          filter_size,
                          pad,
                          stride,
                          flip_filters=False,
                          nonlinearity=None)
                if use_bias else ConvLayer(incoming_layer,
                                           num_filters,
                                           filter_size,
                                           stride,
                                           pad,
                                           b=None,
                                           flip_filters=False,
                                           nonlinearity=None)))

    net.append((names[1], BatchNormLayer(net[-1][1])))
    if nonlin is not None:
        net.append((names[2], NonlinearityLayer(net[-1][1],
                                                nonlinearity=nonlin)))

    return dict(net), net[-1][0]
Exemple #3
0
def build_model(nActions):
    net = OrderedDict()
    net['input'] = InputLayer((None, 4, 84, 84))
    net['conv1'] = batch_norm(
        ConvLayer(net['input'],
                  num_filters=32,
                  filter_size=8,
                  stride=4,
                  pad='valid',
                  nonlinearity=ReLU))
    net['conv2'] = batch_norm(
        ConvLayer(net['conv1'],
                  num_filters=64,
                  filter_size=4,
                  stride=2,
                  pad='valid',
                  nonlinearity=ReLU))
    net['conv3'] = batch_norm(
        ConvLayer(net['conv2'],
                  num_filters=64,
                  filter_size=3,
                  stride=1,
                  pad='valid',
                  nonlinearity=ReLU))
    net['fc4'] = batch_norm(
        DenseLayer(net['conv3'], num_units=512, nonlinearity=ReLU))
    net['fc5'] = DenseLayer(net['fc4'],
                            num_units=nActions,
                            nonlinearity=linear)
    return net
    def residual_block(l, increase_dim=False, first=False, filters=16):
        if increase_dim:
            first_stride = (2, 2)
        else:
            first_stride = (1, 1)

        if first:
            # hacky solution to keep layers correct
            bn_pre_relu = l
        else:
            # contains the BN -> ReLU portion, steps 1 to 2
            bn_pre_conv = BatchNormLayer(l)
            bn_pre_relu = NonlinearityLayer(bn_pre_conv, rectify)

        # contains the weight -> BN -> ReLU portion, steps 3 to 5
        conv_1 = batch_norm(
            ConvLayer(bn_pre_relu,
                      num_filters=filters,
                      filter_size=(3, 3),
                      stride=first_stride,
                      nonlinearity=rectify,
                      pad='same',
                      W=HeNormal(gain='relu')))

        dropout = DropoutLayer(conv_1, p=0.3)

        # contains the last weight portion, step 6
        conv_2 = ConvLayer(dropout,
                           num_filters=filters,
                           filter_size=(3, 3),
                           stride=(1, 1),
                           nonlinearity=None,
                           pad='same',
                           W=HeNormal(gain='relu'))

        # add shortcut connections
        if increase_dim:
            # projection shortcut, as option B in paper
            projection = ConvLayer(l,
                                   num_filters=filters,
                                   filter_size=(1, 1),
                                   stride=(2, 2),
                                   nonlinearity=None,
                                   pad='same',
                                   b=None)
            block = ElemwiseSumLayer([conv_2, projection])
        elif first:
            # projection shortcut, as option B in paper
            projection = ConvLayer(l,
                                   num_filters=filters,
                                   filter_size=(1, 1),
                                   stride=(1, 1),
                                   nonlinearity=None,
                                   pad='same',
                                   b=None)
            block = ElemwiseSumLayer([conv_2, projection])
        else:
            block = ElemwiseSumLayer([conv_2, l])

        return block
def build_model(input_var,dro=0.5):
    net = {}
    net['input'] = InputLayer((None, 3, 299, 299),input_var=input_var)
    print(net['input'])
    net['conv1/7x7_s2'] = ConvLayer(
        net['input'], 64, 7, stride=2, pad=3, flip_filters=False)
    print(net['conv1/7x7_s2'])
    net['pool1/3x3_s2'] = PoolLayer(
        net['conv1/7x7_s2'], pool_size=3, stride=2, ignore_border=False)
    net['pool1/norm1'] = LRNLayer(net['pool1/3x3_s2'], alpha=0.00002, k=1)
    net['conv2/3x3_reduce'] = ConvLayer(
        net['pool1/norm1'], 64, 1, flip_filters=False)
    net['conv2/3x3'] = ConvLayer(
        net['conv2/3x3_reduce'], 192, 3, pad=1, flip_filters=False)
    net['conv2/norm2'] = LRNLayer(net['conv2/3x3'], alpha=0.00002, k=1)
    net['pool2/3x3_s2'] = PoolLayer(
      net['conv2/norm2'], pool_size=3, stride=2, ignore_border=False)

    net.update(build_inception_module('inception_3a',
                                      net['pool2/3x3_s2'],
                                      [32, 64, 96, 128, 16, 32]))
    net.update(build_inception_module('inception_3b',
                                      net['inception_3a/output'],
                                      [64, 128, 128, 192, 32, 96]))
    net['pool3/3x3_s2'] = PoolLayer(
      net['inception_3b/output'], pool_size=3, stride=2, ignore_border=False)

    net.update(build_inception_module('inception_4a',
                                      net['pool3/3x3_s2'],
                                      [64, 192, 96, 208, 16, 48],dro))
    net.update(build_inception_module('inception_4b',
                                      net['inception_4a/output'],
                                      [64, 160, 112, 224, 24, 64],dro))
    net.update(build_inception_module('inception_4c',
                                      net['inception_4b/output'],
                                      [64, 128, 128, 256, 24, 64],dro))
    net.update(build_inception_module('inception_4d',
                                      net['inception_4c/output'],
                                      [64, 112, 144, 288, 32, 64],dro))
    net.update(build_inception_module('inception_4e',
                                      net['inception_4d/output'],
                                      [128, 256, 160, 320, 32, 128],dro))
    net['pool4/3x3_s2'] = PoolLayer(
      net['inception_4e/output'], pool_size=3, stride=2, ignore_border=False)

    net.update(build_inception_module('inception_5a',
                                      net['pool4/3x3_s2'],
                                      [128, 256, 160, 320, 32, 128]))
    net.update(build_inception_module('inception_5b',
                                      net['inception_5a/output'],
                                      [128, 384, 192, 384, 48, 128]))

    net['pool5/7x7_s1'] = GlobalPoolLayer(net['inception_5b/output'])
    net['pool5/7x7_s1_dropout'] = DropoutLayer(net['pool5/7x7_s1'], p=dro)
    net['loss3/classifier'] = DenseLayer(net['pool5/7x7_s1_dropout'],
                                         num_units=1,
                                         nonlinearity=linear)
    net['prob'] = NonlinearityLayer(net['loss3/classifier'],
                                    nonlinearity=sigmoid)
    return net
    def build_model(self, input_batch):
        filter_size = self.dynamic_filter_size[0]

        ## get inputs
        input = InputLayer(input_var=input_batch[:, [0], :, :],
                           shape=(None, 1, self.npx, self.npx))
        theta = InputLayer(input_var=input_batch[:, [1], :, :],
                           shape=(None, 1, self.npx, self.npx))
        # theta = ReshapeLayer(theta, shape=(self.batch_size, 1, 1, 1))

        output = ConvLayer(theta,
                           num_filters=64,
                           filter_size=(1, 1),
                           stride=(1, 1),
                           pad='same',
                           nonlinearity=leaky_rectify)
        output = ConvLayer(output,
                           num_filters=128,
                           filter_size=(1, 1),
                           stride=(1, 1),
                           pad='same',
                           nonlinearity=leaky_rectify)
        filters = ConvLayer(output,
                            num_filters=filter_size**2,
                            filter_size=(1, 1),
                            stride=(1, 1),
                            pad='same',
                            nonlinearity=identity)

        image = SliceLayer(input, indices=slice(0, 1), axis=1)
        output = DynamicFilterLayer([image, filters],
                                    filter_size=(filter_size, filter_size, 1),
                                    pad=(filter_size // 2, filter_size // 2))

        return output, [output], filters
Exemple #7
0
    def __init__(self, weights=None, augmentation=False):
        super(GoogLeNet, self).__init__(weights, augmentation)

        def build_inception_module(name, input_layer, nfilters):
            # nfilters: (pool_proj, 1x1, 3x3_reduce, 3x3, 5x5_reduce, 5x5)
            net = {}
            net['pool'] = PoolLayerDNN(input_layer, pool_size=3, stride=1, pad=1)
            net['pool_proj'] = ConvLayer(net['pool'], nfilters[0], 1, flip_filters=False)

            net['1x1'] = ConvLayer(input_layer, nfilters[1], 1, flip_filters=False)

            net['3x3_reduce'] = ConvLayer(input_layer, nfilters[2], 1, flip_filters=False)
            net['3x3'] = ConvLayer(net['3x3_reduce'], nfilters[3], 3, pad=1, flip_filters=False)

            net['5x5_reduce'] = ConvLayer(input_layer, nfilters[4], 1, flip_filters=False)
            net['5x5'] = ConvLayer(net['5x5_reduce'], nfilters[5], 5, pad=2, flip_filters=False)

            net['output'] = lasagne.layers.ConcatLayer([
                net['1x1'],
                net['3x3'],
                net['5x5'],
                net['pool_proj']])

            return {'{}/{}'.format(name, k): v for k, v in net.items()}

        net = {}
        net['input'] = lasagne.layers.InputLayer((None, 3, 224, 224))
        net['conv1/7x7_s2'] = ConvLayer(net['input'], 64, 7, stride=2, pad=3, flip_filters=False)
        net['pool1/3x3_s2'] = PoolLayer(net['conv1/7x7_s2'], pool_size=3, stride=2, ignore_border=False)
        net['pool1/norm1'] = lasagne.layers.LocalResponseNormalization2DLayer(net['pool1/3x3_s2'], alpha=0.00002, k=1)
        net['conv2/3x3_reduce'] = ConvLayer(net['pool1/norm1'], 64, 1, flip_filters=False)
        net['conv2/3x3'] = ConvLayer(net['conv2/3x3_reduce'], 192, 3, pad=1, flip_filters=False)
        net['conv2/norm2'] = lasagne.layers.LocalResponseNormalization2DLayer(net['conv2/3x3'], alpha=0.00002, k=1)
        net['pool2/3x3_s2'] = PoolLayer(net['conv2/norm2'], pool_size=3, stride=2, ignore_border=False)

        net.update(build_inception_module('inception_3a', net['pool2/3x3_s2'], [32, 64, 96, 128, 16, 32]))
        net.update(build_inception_module('inception_3b', net['inception_3a/output'], [64, 128, 128, 192, 32, 96]))
        net['pool3/3x3_s2'] = PoolLayer(net['inception_3b/output'], pool_size=3, stride=2, ignore_border=False)

        net.update(build_inception_module('inception_4a', net['pool3/3x3_s2'], [64, 192, 96, 208, 16, 48]))

        net.update(build_inception_module('inception_4b', net['inception_4a/output'], [64, 160, 112, 224, 24, 64]))
        net.update(build_inception_module('inception_4c', net['inception_4b/output'], [64, 128, 128, 256, 24, 64]))
        net.update(build_inception_module('inception_4d', net['inception_4c/output'], [64, 112, 144, 288, 32, 64]))
        net.update(build_inception_module('inception_4e', net['inception_4d/output'], [128, 256, 160, 320, 32, 128]))
        net['pool4/3x3_s2'] = PoolLayer(net['inception_4e/output'], pool_size=3, stride=2, ignore_border=False)

        net.update(build_inception_module('inception_5a', net['pool4/3x3_s2'], [128, 256, 160, 320, 32, 128]))
        net.update(build_inception_module('inception_5b', net['inception_5a/output'], [128, 384, 192, 384, 48, 128]))

        net['pool5/7x7_s1'] = lasagne.layers.GlobalPoolLayer(net['inception_5b/output'])
        net['dropout5'] = lasagne.layers.DropoutLayer(net['pool5/7x7_s1'], p=0.4)

        self.net = net
        self.out_layer = net['dropout5']

        if self.weights is not None:
            init_weights = self._get_weights_from_file(self.weights, 'param values')
            init_weights = init_weights[:-2]  # since we have chopped off the last two layers of the network (loss3/classifier and prob), we won't need those
            lasagne.layers.set_all_param_values(self.out_layer, init_weights)
Exemple #8
0
    def residual_block(l, increase_dim=False, projection=False):
        input_num_filters = l.output_shape[1]
        if increase_dim:
            first_stride = (2,2)
            out_num_filters = input_num_filters*2
        else:
            first_stride = (1,1)
            out_num_filters = input_num_filters

        stack_1 = batch_norm(ConvLayer(l, num_filters=out_num_filters, filter_size=(3,3), stride=first_stride, nonlinearity=rectify, pad='same', W=lasagne.init.HeNormal(gain='relu'), flip_filters=False))
        stack_2 = batch_norm(ConvLayer(stack_1, num_filters=out_num_filters, filter_size=(3,3), stride=(1,1), nonlinearity=None, pad='same', W=lasagne.init.HeNormal(gain='relu'), flip_filters=False))

        # add shortcut connections
        if increase_dim:
            if projection:
                # projection shortcut, as option B in paper
                projection = batch_norm(ConvLayer(l, num_filters=out_num_filters, filter_size=(1,1), stride=(2,2), nonlinearity=None, pad='same', b=None, flip_filters=False))
                block = NonlinearityLayer(ElemwiseSumLayer([stack_2, projection]),nonlinearity=rectify)
            else:
                # identity shortcut, as option A in paper
                identity = ExpressionLayer(l, lambda X: X[:, :, ::2, ::2], lambda s: (s[0], s[1], s[2]//2, s[3]//2))
                padding = PadLayer(identity, [out_num_filters//4,0,0], batch_ndim=1)
                block = NonlinearityLayer(ElemwiseSumLayer([stack_2, padding]),nonlinearity=rectify)
        else:
            block = NonlinearityLayer(ElemwiseSumLayer([stack_2, l]),nonlinearity=rectify)

        return block
Exemple #9
0
def build_Discriminator(input_var=None, img_size=[128, 128], base_units=64):
    discr = {}

    if not input_var:
        discr["input"] = InputLayer((None, 3, img_size[0], img_size[1]))
    else:
        discr["input"] = InputLayer((None, 3, img_size[0], img_size[1]),
                                    input_var)

    discr["conv_1"] = ConvLayer(discr["input"],
                                base_units,
                                5,
                                nonlinearity=lref)
    discr["pool_1"] = Pool2DLayer(discr["input"], 2)

    discr["conv_2"] = ConvLayer(discr["pool_1"],
                                base_units * 2,
                                5,
                                nonlinearity=lref)
    discr["pool_2"] = Pool2DLayer(discr["conv_2"], 2)

    discr["dense_1"] = DenseLayer(discr["pool_2"], 1024, nonlinearity=lref)
    discr["output"] = DenseLayer(discr["dense_1"], 1, nonlinearity=sigmoid)

    return discr
Exemple #10
0
def build_model():
    #################
    # Regular model #
    #################
    l0 = InputLayer(data_sizes["sliced:data:ax"])
    l0r = reshape(l0, (
        -1,
        1,
    ) + data_sizes["sliced:data:ax"][-2:])

    # first do the segmentation steps
    l1a = ConvLayer(
        l0r,
        num_filters=32,
        filter_size=(3, 3),
        pad='same',
        W=lasagne.init.Orthogonal(),
        b=lasagne.init.Constant(0.1),
    )
    l1b = ConvLayer(
        l1a,
        num_filters=32,
        filter_size=(3, 3),
        pad='same',
        W=lasagne.init.Orthogonal(),
        b=lasagne.init.Constant(0.1),
    )
    l1c = ConvLayer(
        l1b,
        num_filters=64,
        filter_size=(3, 3),
        pad='same',
        W=lasagne.init.Orthogonal(),
        b=lasagne.init.Constant(0.1),
    )
    l1f = ConvLayer(l1c,
                    num_filters=1,
                    filter_size=(3, 3),
                    pad='same',
                    W=lasagne.init.Orthogonal(),
                    b=lasagne.init.Constant(0.1),
                    nonlinearity=lasagne.nonlinearities.sigmoid)

    l_1r = reshape(l1f, data_sizes["sliced:data:ax"])

    l_d3 = lasagne.layers.DenseLayer(l_1r, num_units=2)
    l_systole = MuLogSigmaErfLayer(l_d3)

    l_d3b = lasagne.layers.DenseLayer(l_1r, num_units=2)
    l_diastole = MuLogSigmaErfLayer(l_d3b)

    return {
        "inputs": {
            "sliced:data:ax": l0,
        },
        "outputs": {
            "systole": l_systole,
            "diastole": l_diastole
        }
    }
def makeNeuralNet(input_var=None):
    net = {}
    net['input'] = InputLayer(shape=(None, 3, 224, 224), input_var=input_var)
    net['bnorm'] = BatchNormLayer(net['input'])
    net['conv1'] = ConvLayer(net['bnorm'],
                             num_filters=96,
                             filter_size=5,
                             stride=2)  #96*112*112
    net['norm1'] = NormLayer(
        net['conv1'], alpha=0.0001)  # caffe has alpha = alpha * pool_size
    net['pool1'] = PoolLayer(net['norm1'],
                             pool_size=3,
                             stride=3,
                             ignore_border=False)  #96*37...approx
    net['conv2'] = ConvLayer(net['pool1'],
                             num_filters=256,
                             filter_size=5,
                             pad=1)
    net['pool2'] = PoolLayer(net['conv2'],
                             pool_size=2,
                             stride=2,
                             ignore_border=False)
    net['fc6'] = DenseLayer(net['pool2'], num_units=1024)
    net['drop6'] = DropoutLayer(net['fc6'], p=0.2)
    net['_fc7'] = DenseLayer(net['drop6'], num_units=256)
    net['_drop7'] = DropoutLayer(net['_fc7'], p=0.2)
    net['_fc8out'] = DenseLayer(net['_drop7'],
                                num_units=1,
                                nonlinearity=lasagne.nonlinearities.sigmoid)
    output_layer_driver = net['_fc8out']
    return output_layer_driver, net
def inc_dim_layer(l_in, num_filters):
    """
    Increase the dimension of filter number
    decrease image size
    Args:
        incoming:

    Returns:

    """
    l = batch_norm(
        ConvLayer(l_in,
                  num_filters=num_filters,
                  filter_size=(3, 3),
                  stride=(2, 2),
                  nonlinearity=rectify,
                  pad='same',
                  W=lasagne.init.HeNormal(gain='relu'))
    )  # 128 x 16 x 16 (1 highway block) (2 conv layers)

    l = batch_norm(
        ConvLayer(l,
                  num_filters=num_filters,
                  filter_size=(3, 3),
                  stride=(1, 1),
                  nonlinearity=rectify,
                  pad='same',
                  W=lasagne.init.HeNormal(gain='relu')))

    return l
Exemple #13
0
def fan_module_improved(inp,
                        net,
                        prefix,
                        features,
                        nb_filter,
                        scale,
                        upsampling_strategy="repeat"):
    r""" Implementation for simple LSTM block for feature based manipulation

    Takes input x and features and performs pixelwise manipulation of inp:
    $$
    y = x \sigma(f(z)) + \tanh(g(z)) (1 - \sigma(f(z)))
    $$

    $f$ and $g$ are functions implemented by 1x1 convolutions followed by upsampling
    to match the dimension of $x$.

    """

    # Input gate directly derived from feature representation. Sigmoid rescales to 0...1
    input_gate = ConvLayer(features,
                           nb_filter,
                           1,
                           pad=0,
                           flip_filters=False,
                           nonlinearity=sigmoid,
                           b=nn.init.Constant(0.5))

    # Addition gate uses inverse activation from input gate
    addition = ConvLayer(features,
                         nb_filter,
                         1,
                         pad=0,
                         flip_filters=False,
                         nonlinearity=rectify)

    input_gate_upsampled = upsample(input_gate,
                                    scale,
                                    mode=upsampling_strategy)
    addition_gate_upsampled = upsample(addition,
                                       scale,
                                       mode=upsampling_strategy)

    x_forget = ElemwiseProdLayer([inp, input_gate_upsampled],
                                 cropping=(None, None, "center", "center"))
    x_added = ElemwiseSumLayer([x_forget, addition_gate_upsampled],
                               cropping=(None, None, "center", "center"))

    ll = [
        input_gate, addition, input_gate_upsampled, addition_gate_upsampled,
        x_forget, x_added
    ]
    layers = locals()
    net.update({
        prefix + "/" + k: layers[k]
        for k in layers.keys() if layers[k] in ll
    })

    return x_added
Exemple #14
0
 def build_simple_block(self, incoming_layer, names,
                        num_filters, filter_size, stride, pad,
                        use_bias=False, nonlin=rectify):
     """Creates stacked Lasagne layers ConvLayer -> BN -> (ReLu)
 
     Parameters:
     ----------
     incoming_layer : instance of Lasagne layer
         Parent layer
 
     names : list of string
         Names of the layers in block
 
     num_filters : int
         Number of filters in convolution layer
 
     filter_size : int
         Size of filters in convolution layer
 
     stride : int
         Stride of convolution layer
 
     pad : int
         Padding of convolution layer
 
     use_bias : bool
         Whether to use bias in conlovution layer
 
     nonlin : function
         Nonlinearity type of Nonlinearity layer
 
     Returns
     -------
     tuple: (net, last_layer_name)
         net : dict
             Dictionary with stacked layers
         last_layer_name : string
             Last layer name
     """
     net = []
     net.append((
             names[0],
             ConvLayer(incoming_layer, num_filters, filter_size, pad, stride,
                       flip_filters=False, nonlinearity=None) if use_bias
             else ConvLayer(incoming_layer, num_filters, filter_size, stride, pad, b=None,
                            flip_filters=False, nonlinearity=None)
         ))
 
     net.append((
             names[1],
             BatchNormLayer(net[-1][1])
         ))
     if nonlin is not None:
         net.append((
             names[2],
             NonlinearityLayer(net[-1][1], nonlinearity=nonlin)
         ))
 
     return dict(net), net[-1][0]
    def lasagne_layers_method(self):
        '''
        INPUT: None
        OUTPUT: Dict

        Creates dictionary of vgg_cnn_s model Lasagne layer objects. Here the
        original output layer (softmax, 1000 classes) has been removed and
        the output layer returns a vector of shape (1,4096).
        '''
        # Create dictionary of VGG_CNN_S model layers
        self.lasagne_layers = {}
        self.lasagne_layers['input'] = InputLayer((None, 3, 224, 224))
        self.lasagne_layers['conv1'] = ConvLayer(self.lasagne_layers['input'],
                                                 num_filters=96,
                                                 filter_size=7,
                                                 stride=2,
                                                 flip_filters=False)
        self.lasagne_layers['norm1'] = NormLayer(self.lasagne_layers['conv1'],
                                                 alpha=0.0001)
        self.lasagne_layers['pool1'] = PoolLayer(self.lasagne_layers['norm1'],
                                                 pool_size=3,
                                                 stride=3,
                                                 ignore_border=False)
        self.lasagne_layers['conv2'] = ConvLayer(self.lasagne_layers['pool1'],
                                                 num_filters=256,
                                                 filter_size=5,
                                                 flip_filters=False)
        self.lasagne_layers['pool2'] = PoolLayer(self.lasagne_layers['conv2'],
                                                 pool_size=2,
                                                 stride=2,
                                                 ignore_border=False)
        self.lasagne_layers['conv3'] = ConvLayer(self.lasagne_layers['pool2'],
                                                 num_filters=512,
                                                 filter_size=3,
                                                 pad=1,
                                                 flip_filters=False)
        self.lasagne_layers['conv4'] = ConvLayer(self.lasagne_layers['conv3'],
                                                 num_filters=512,
                                                 filter_size=3,
                                                 pad=1,
                                                 flip_filters=False)
        self.lasagne_layers['conv5'] = ConvLayer(self.lasagne_layers['conv4'],
                                                 num_filters=512,
                                                 filter_size=3,
                                                 pad=1,
                                                 flip_filters=False)
        self.lasagne_layers['pool5'] = PoolLayer(self.lasagne_layers['conv5'],
                                                 pool_size=3,
                                                 stride=3,
                                                 ignore_border=False)
        self.lasagne_layers['fc6'] = DenseLayer(self.lasagne_layers['pool5'],
                                                num_units=4096)
        self.lasagne_layers['drop6'] = DropoutLayer(self.lasagne_layers['fc6'],
                                                    p=0.5)
        self.lasagne_layers['fc7'] = DenseLayer(self.lasagne_layers['drop6'],
                                                num_units=4096)
Exemple #16
0
def build_baseline2_feats(input_var, nb_filter=96):
    """ Slightly more complex model. Transform x to a feature space first
    """
    net = OrderedDict()

    # Input, standardization
    last = net['input'] = InputLayer(
        (None, 3, tools.INP_PSIZE, tools.INP_PSIZE), input_var=input_var)
    last = net['norm'] = ExpressionLayer(last, lambda x: normalize(x))

    # Pretrained Encoder as before
    last = net["conv1_1"] = ConvLayer(last,
                                      nb_filter,
                                      1,
                                      pad=0,
                                      flip_filters=False,
                                      nonlinearity=linear)
    last = net["bn1_1"] = BatchNormLayer(last)
    last = net["relu1_1"] = NonlinearityLayer(last, nonlinearity=rectify)
    last = net["conv1_2"] = ConvLayer(last,
                                      nb_filter,
                                      1,
                                      pad=0,
                                      flip_filters=False,
                                      nonlinearity=linear)
    last = net["bn1_2"] = BatchNormLayer(last)
    last = net["relu1_2"] = NonlinearityLayer(last, nonlinearity=rectify)

    # Modified Middle Part
    last = net["middle"] = ConvLayer(last, nb_filter, 1, nonlinearity=linear)

    # Decoder as before
    last = net["deconv1_2"] = TransposedConv2DLayer(
        last,
        net["conv1_2"].input_shape[1],
        net["conv1_2"].filter_size,
        stride=net["conv1_2"].stride,
        crop=net["conv1_2"].pad,
        W=net["conv1_2"].W,
        flip_filters=not net["conv1_2"].flip_filters,
        nonlinearity=None)
    last = net["deconv1_1"] = TransposedConv2DLayer(
        last,
        net["conv1_1"].input_shape[1],
        net["conv1_1"].filter_size,
        stride=net["conv1_1"].stride,
        crop=net["conv1_1"].pad,
        W=net["conv1_1"].W,
        flip_filters=not net["conv1_1"].flip_filters,
        nonlinearity=None)

    last = net["bn"] = BatchNormLayer(last,
                                      beta=nn.init.Constant(128.),
                                      gamma=nn.init.Constant(25.))

    return last, net
Exemple #17
0
def network_sm(ip_size, input_var):
    net = {}
    net['input'] = lasagne.layers.InputLayer(shape=(None, 1, ip_size[0], ip_size[1]), input_var=input_var)
    net['conv1'] = ConvLayer(net['input'], 16, 3, pad=0)
    net['pool1'] = PoolLayer(net['conv1'], 2)
    net['conv2'] = ConvLayer(net['pool1'], 16, 3, pad=0)
    net['pool2'] = PoolLayer(net['conv2'], 2)
    net['fc1']  = DenseLayer(lasagne.layers.dropout(net['pool2'], p=0.5), num_units=64, nonlinearity=lasagne.nonlinearities.rectify)
    net['prob'] = DenseLayer(lasagne.layers.dropout(net['fc1'], p=0.5), num_units=2, nonlinearity=lasagne.nonlinearities.softmax)
    return net
Exemple #18
0
def build_bottleneck_sb_residual_layer(prev_layer, n_out, stride,
                                       remaining_stick):
    size = n_out / 4

    # conv 1x1
    prev_layer_preact = batch_norm(prev_layer)
    prev_layer_preact = NonlinearityLayer(prev_layer_preact, nonlinearity=ReLU)
    layer = ConvLayer(prev_layer_preact,
                      num_filters=size,
                      nonlinearity=None,
                      filter_size=(1, 1),
                      stride=stride,
                      pad=(0, 0),
                      W=lasagne.init.HeNormal(gain='relu'))

    # conv 3x3
    layer = batch_norm(layer)
    layer = NonlinearityLayer(layer, nonlinearity=ReLU)
    layer = ConvLayer(layer,
                      num_filters=size,
                      nonlinearity=None,
                      filter_size=(3, 3),
                      stride=(1, 1),
                      pad=(1, 1),
                      W=lasagne.init.HeNormal(gain='relu'))

    # conv 1x1
    layer = batch_norm(layer)
    layer = NonlinearityLayer(layer, nonlinearity=ReLU)
    layer = ConvLayer(layer,
                      num_filters=n_out,
                      nonlinearity=None,
                      filter_size=(1, 1),
                      stride=(1, 1),
                      pad=(0, 0),
                      W=lasagne.init.HeNormal(gain='relu'))

    # Weigh layer by the remaining stick length.
    remaining_stick = RemainingStickLengthLayer(prev_layer, remaining_stick)
    layer = WeightedByStickLengthLayer(layer, remaining_stick)

    if prev_layer.output_shape[1] == n_out:
        shortcut_layer = prev_layer  # Identity shortcut.
    else:
        # Projection shortcut.
        shortcut_layer = ConvLayer(prev_layer_preact,
                                   num_filters=n_out,
                                   nonlinearity=None,
                                   filter_size=(1, 1),
                                   stride=stride,
                                   pad=(0, 0),
                                   W=lasagne.init.HeNormal(gain='relu'))

    output_layer = ElemwiseSumLayer([layer, shortcut_layer])
    return output_layer, remaining_stick
Exemple #19
0
def conv_pool_cnn_c():
    net = {}
    net['input'] = InputLayer((None, 3, 32, 32))
    net['drop_in'] = DropoutLayer(net['input'], p=0.2)

    net['conv1_1'] = ConvLayer(net['drop_in'],
                               num_filters=96,
                               filter_size=3,
                               pad=1,
                               flip_filters=False)
    net['conv1_2'] = ConvLayer(net['conv1_1'],
                               num_filters=96,
                               filter_size=3,
                               pad=1,
                               flip_filters=False)
    net['conv1_3'] = ConvLayer(net['conv1_2'],
                               num_filters=96,
                               filter_size=3,
                               pad=1,
                               flip_filters=False)

    net['conv2_1'] = PoolLayer(net['conv1_3'], pool_size=3, stride=2)
    net['drop2_1'] = DropoutLayer(net['conv2_1'], p=0.5)

    net['conv3_1'] = ConvLayer(net['drop2_1'],
                               num_filters=192,
                               filter_size=3,
                               pad=1,
                               flip_filters=False)
    net['conv3_2'] = ConvLayer(net['conv3_1'],
                               num_filters=192,
                               filter_size=3,
                               pad=1,
                               flip_filters=False)
    net['conv3_3'] = ConvLayer(net['conv3_2'],
                               num_filters=192,
                               filter_size=3,
                               pad=1,
                               flip_filters=False)

    net['conv4_1'] = PoolLayer(net['conv3_3'], pool_size=3, stride=2)
    net['drop4_1'] = DropoutLayer(net['conv4_1'], p=0.5)

    net['conv5_1'] = ConvLayer(net['drop4_1'],
                               num_filters=192,
                               filter_size=3,
                               pad=1,
                               flip_filters=False)
    net['conv6_1'] = ConvLayer(net['conv5_1'],
                               num_filters=192,
                               filter_size=1,
                               flip_filters=False)
    net['conv7_1'] = ConvLayer(net['conv6_1'],
                               num_filters=10,
                               filter_size=1,
                               flip_filters=False)
    net['global_avg'] = GlobalPoolLayer(net['conv7_1'])
    net['output'] = NonlinearityLayer(net['global_avg'], softmax)

    return net
Exemple #20
0
def mycnn1(library, img_channels, img_rows, img_cols, nb_classes):
	nb_filters = 32
	nb_pool = 2
	nb_conv = 3

	if library == 'keras':
		model = Sequential()

		# First convolutional layer
		model.add(Convolution2D(nb_filters, nb_conv, nb_conv,
		                        border_mode='valid',
		                        input_shape=(img_channels, img_rows, img_cols)))
		model.add(Activation('relu'))

		# Second convolutional layer
		model.add(Convolution2D(nb_filters, nb_conv, nb_conv))
		model.add(Activation('relu'))
		model.add(MaxPooling2D(pool_size=(nb_pool, nb_pool)))
		model.add(Dropout(0.25))

		# Fully connected layer
		model.add(Flatten())
		model.add(Dense(128))
		model.add(Activation('relu'))
		model.add(Dropout(0.5))

		# Softmax prediction layer
		model.add(Dense(nb_classes))
		model.add(Activation('softmax'))

		return model
	else:
		net = {}
		net['input'] = InputLayer((img_channels, img_rows, img_cols))

		# First convolutional layer
		net['conv1'] = ConvLayer(
				net['input'], nb_filters, nb_conv, flip_filters=False)

		# Second convolutional layer
		net['conv2'] = ConvLayer(
				net['conv1'], nb_filters, nb_conv, flip_filters=False)
		net['pool2'] = PoolLayer(net['conv2'], nb_pool)
		net['pool2_dropout'] = DropoutLayer(net['pool2'], p=0.25)

		# Fully connected layer
		net['fc1'] = DenseLayer(net['pool2_dropout'], num_units=128)
		net['fc1_dropout'] = DropoutLayer(net['fc1'], p=0.5)

		# Softmax prediction layer
		net['fc2'] = DenseLayer(
				net['fc1_dropout'], num_units=nb_classes, nonlinearity=None)
		net['prob'] = NonlinearityLayer(net['fc2'], softmax)

		return net
Exemple #21
0
def residual_block(l, increase_dim=False, projection=True, first=False):
    """
    Create a residual learning building block with two stacked 3x3 convlayers as in paper
    'Identity Mappings in Deep Residual Networks', Kaiming He et al. 2016 (https://arxiv.org/abs/1603.05027)
    """
    input_num_filters = l.output_shape[1]
    if increase_dim:
        first_stride = (2, 2)
        out_num_filters = input_num_filters * 2
    else:
        first_stride = (1, 1)
        out_num_filters = input_num_filters

    if first:
        # hacky solution to keep layers correct
        bn_pre_relu = l
    else:
        # contains the BN -> ReLU portion, steps 1 to 2
        bn_pre_conv = BatchNormLayer(l)
        bn_pre_relu = NonlinearityLayer(bn_pre_conv, rectify)

    # contains the weight -> BN -> ReLU portion, steps 3 to 5
    conv_1 = batch_norm(
        ConvLayer(bn_pre_relu,
                  num_filters=out_num_filters,
                  filter_size=(3, 3),
                  stride=first_stride,
                  nonlinearity=rectify,
                  pad='same',
                  W=he_norm))

    # contains the last weight portion, step 6
    conv_2 = ConvLayer(conv_1,
                       num_filters=out_num_filters,
                       filter_size=(3, 3),
                       stride=(1, 1),
                       nonlinearity=None,
                       pad='same',
                       W=he_norm)

    # add shortcut connections
    if increase_dim:
        # projection shortcut, as option B in paper
        projection = ConvLayer(l,
                               num_filters=out_num_filters,
                               filter_size=(1, 1),
                               stride=(2, 2),
                               nonlinearity=None,
                               pad='same',
                               b=None)
        block = ElemwiseSumLayer([conv_2, projection])
    else:
        block = ElemwiseSumLayer([conv_2, l])

    return block
    def _residual_block_(self,
                         l,
                         increase_dim=False,
                         projection=True,
                         first=False):
        input_num_filters = l.output_shape[1]
        if increase_dim:
            first_stride = (2, 2)
            out_num_filters = input_num_filters * 2
        else:
            first_stride = (1, 1)
            out_num_filters = input_num_filters

        if first:
            # hacky solution to keep layers correct
            bn_pre_relu = l
        else:
            # contains the BN -> ReLU portion, steps 1 to 2
            bn_pre_conv = BatchNormLayer(l)
            bn_pre_relu = NonlinearityLayer(bn_pre_conv, rectify)

        # contains the weight -> BN -> ReLU portion, steps 3 to 5
        conv_1 = batch_norm(
            ConvLayer(bn_pre_relu,
                      num_filters=out_num_filters,
                      filter_size=(3, 3),
                      stride=first_stride,
                      nonlinearity=rectify,
                      pad='same',
                      W=he_norm))

        # contains the last weight portion, step 6
        conv_2 = ConvLayer(conv_1,
                           num_filters=out_num_filters,
                           filter_size=(3, 3),
                           stride=(1, 1),
                           nonlinearity=None,
                           pad='same',
                           W=he_norm)

        # add shortcut connections
        if increase_dim:
            # projection shortcut, as option B in paper
            projection = ConvLayer(bn_pre_relu,
                                   num_filters=out_num_filters,
                                   filter_size=(1, 1),
                                   stride=(2, 2),
                                   nonlinearity=None,
                                   pad='same',
                                   b=None)
            block = ElemwiseSumLayer([conv_2, projection])
        else:
            block = ElemwiseSumLayer([conv_2, l])

        return block
Exemple #23
0
def build_model(input_shape):

    net = {}
    net['input'] = InputLayer(input_shape)
    net['conv1'] = ConvLayer(net['input'],
                             num_filters=96,
                             filter_size=7,
                             stride=2,
                             flip_filters=False)
    net['norm1'] = LRNLayer(
        net['conv1'], alpha=0.0001)  # caffe has alpha = alpha * pool_size
    net['pool1'] = PoolLayer(net['norm1'],
                             pool_size=3,
                             stride=3,
                             ignore_border=False)
    net['conv2'] = ConvLayer(net['pool1'],
                             num_filters=256,
                             filter_size=5,
                             flip_filters=False)
    net['pool2'] = PoolLayer(net['conv2'],
                             pool_size=2,
                             stride=2,
                             ignore_border=False)
    net['conv3'] = ConvLayer(net['pool2'],
                             num_filters=512,
                             filter_size=3,
                             pad=1,
                             flip_filters=False)
    net['conv4'] = ConvLayer(net['conv3'],
                             num_filters=512,
                             filter_size=3,
                             pad=1,
                             flip_filters=False)
    net['conv5'] = ConvLayer(net['conv4'],
                             num_filters=512,
                             filter_size=3,
                             pad=1,
                             flip_filters=False)
    net['pool5'] = PoolLayer(net['conv5'],
                             pool_size=3,
                             stride=3,
                             ignore_border=False)
    net['fc6'] = DenseLayer(net['pool5'], num_units=4096)
    net['drop6'] = DropoutLayer(net['fc6'], p=0.5)
    net['fc7'] = DenseLayer(net['drop6'], num_units=4096)
    net['drop7'] = DropoutLayer(net['fc7'], p=0.5)
    net['fc8'] = DenseLayer(net['drop7'],
                            num_units=1000,
                            nonlinearity=lasagne.nonlinearities.softmax)

    for layer in net.values():
        print str(lasagne.layers.get_output_shape(layer))

    return net
Exemple #24
0
def build_model():
    net = {}
    net['input'] = InputLayer((None, 3, None, None))
    net['conv1/7x7_s2'] = ConvLayer(net['input'], 64, 7, stride=2, pad=3)
    net['pool1/3x3_s2'] = PoolLayer(net['conv1/7x7_s2'],
                                    3,
                                    stride=2,
                                    ignore_border=False)
    net['pool1/norm1'] = LRNLayer(net['pool1/3x3_s2'], alpha=0.00002, k=1)
    net['conv2/3x3_reduce'] = ConvLayer(net['pool1/norm1'], 64, 1)
    net['conv2/3x3'] = ConvLayer(net['conv2/3x3_reduce'], 192, 3, pad=1)
    net['conv2/norm2'] = LRNLayer(net['conv2/3x3'], alpha=0.00002, k=1)
    net['pool2/3x3_s2'] = PoolLayer(net['conv2/norm2'], 3, stride=2)

    net.update(
        build_inception_module('inception_3a', net['pool2/3x3_s2'],
                               [32, 64, 96, 128, 16, 32]))
    net.update(
        build_inception_module('inception_3b', net['inception_3a/output'],
                               [64, 128, 128, 192, 32, 96]))
    net['pool3/3x3_s2'] = PoolLayer(net['inception_3b/output'], 3, stride=2)

    net.update(
        build_inception_module('inception_4a', net['pool3/3x3_s2'],
                               [64, 192, 96, 208, 16, 48]))
    net.update(
        build_inception_module('inception_4b', net['inception_4a/output'],
                               [64, 160, 112, 224, 24, 64]))
    net.update(
        build_inception_module('inception_4c', net['inception_4b/output'],
                               [64, 128, 128, 256, 24, 64]))
    net.update(
        build_inception_module('inception_4d', net['inception_4c/output'],
                               [64, 112, 144, 288, 32, 64]))
    net.update(
        build_inception_module('inception_4e', net['inception_4d/output'],
                               [128, 256, 160, 320, 32, 128]))
    net['pool4/3x3_s2'] = PoolLayer(net['inception_4e/output'], 3, stride=2)

    net.update(
        build_inception_module('inception_5a', net['pool4/3x3_s2'],
                               [128, 256, 160, 320, 32, 128]))
    net.update(
        build_inception_module('inception_5b', net['inception_5a/output'],
                               [128, 384, 192, 384, 48, 128]))

    net['pool5/7x7_s1'] = GlobalPoolLayer(net['inception_5b/output'])
    net['loss3/classifier'] = DenseLayer(net['pool5/7x7_s1'],
                                         num_units=1000,
                                         nonlinearity=linear)
    net['prob'] = NonlinearityLayer(net['loss3/classifier'],
                                    nonlinearity=softmax)
    return net
def residual_block(l, transition=False, first=False, filters=16):
    if transition:
        first_stride = (2, 2)
    else:
        first_stride = (1, 1)

    if first:
        bn_pre_relu = l
    else:
        bn_pre_conv = BatchNormLayer(l)
        bn_pre_relu = NonlinearityLayer(bn_pre_conv, rectify)

    conv_1 = NonlinearityLayer(BatchNormLayer(
        ConvLayer(bn_pre_relu,
                  num_filters=filters,
                  filter_size=(3, 3),
                  stride=first_stride,
                  nonlinearity=None,
                  pad='same',
                  W=he_norm)),
                               nonlinearity=rectify)

    #dropout = DropoutLayer(conv_1, p=0.3)
    conv_2 = ConvLayer(conv_1,
                       num_filters=filters,
                       filter_size=(3, 3),
                       stride=(1, 1),
                       nonlinearity=None,
                       pad='same',
                       W=he_norm)

    # add shortcut connections
    if transition:
        # projection shortcut, as option B in paper
        projection = ConvLayer(bn_pre_relu,
                               num_filters=filters,
                               filter_size=(1, 1),
                               stride=(2, 2),
                               nonlinearity=None,
                               pad='same',
                               b=None)
    elif conv_2.output_shape == l.output_shape:
        projection = l
    else:
        projection = ConvLayer(bn_pre_relu,
                               num_filters=filters,
                               filter_size=(1, 1),
                               stride=(1, 1),
                               nonlinearity=None,
                               pad='same',
                               b=None)

    return ElemwiseSumLayer([conv_2, projection])
Exemple #26
0
def build_model2(input_var, nOutput):
    net = {}
    net['input'] = InputLayer((None, 3, 32, 32), input_var=input_var)
    net['conv1'] = ConvLayer(net['input'],
                             num_filters=32,
                             filter_size=5,
                             pad=2,
                             flip_filters=False,
                             W=lasagne.init.Normal(std=0.1),
                             nonlinearity=lasagne.nonlinearities.rectify)
    net['pool1'] = PoolLayer(net['conv1'],
                             pool_size=3,
                             stride=2,
                             mode='max',
                             ignore_border=False)
    net['norm1'] = lasagne.layers.LocalResponseNormalization2DLayer(
        net['pool1'], n=3, alpha=5e-5)
    net['conv2'] = ConvLayer(net['norm1'],
                             num_filters=32,
                             filter_size=5,
                             pad=2,
                             flip_filters=False,
                             W=lasagne.init.Normal(std=0.1),
                             nonlinearity=lasagne.nonlinearities.rectify)
    net['pool2'] = PoolLayer(net['conv2'],
                             pool_size=3,
                             stride=2,
                             mode='average_exc_pad',
                             ignore_border=False)
    net['norm2'] = lasagne.layers.LocalResponseNormalization2DLayer(
        net['pool2'], n=3, alpha=5e-5)

    net['conv3'] = ConvLayer(net['norm2'],
                             num_filters=64,
                             filter_size=5,
                             pad=2,
                             flip_filters=False,
                             W=lasagne.init.Normal(std=0.1),
                             nonlinearity=lasagne.nonlinearities.rectify)
    net['pool3'] = PoolLayer(net['conv3'],
                             pool_size=3,
                             stride=2,
                             mode='average_exc_pad',
                             ignore_border=False)
    net['output'] = lasagne.layers.DenseLayer(
        net['pool3'],
        num_units=nOutput,
        W=lasagne.init.Normal(std=0.1),
        nonlinearity=lasagne.nonlinearities.softmax)

    return net
def CNN_model():
    net = {}
    net['input'] = InputLayer((None, 3, 64, 64)) 
    net['conv1/3x3_s1'] = ConvLayer(net['input'], 16, 3, stride=2, pad='same', W=lasagne.init.HeNormal(gain='relu'))   #  16*64*64
    # net['pool1/3x3_s2'] = PoolLayer(net['conv1/3x3_s1'], pool_size=3, stride=2, ignore_border=False) #  16*32*32
    net['pool1/norm1'] = LRNLayer(net['conv1/3x3_s2'], alpha=0.00002, k=1)

    net['conv2/3x3_s1'] = ConvLayer(net['pool1/norm1'], 32, 3, stride=1, pad='same', W=lasagne.init.HeNormal(gain='relu')) # 32*32*32
    net['pool2/3x3_s2'] = PoolLayer(net['conv2/3x3_s1'], pool_size=3, stride=2, ignore_border=False)     # 32*16*16
    net['pool2/norm1'] = LRNLayer(net['pool2/3x3_s2'], alpha=0.00002, k=1)


    net['conv3/3x3_s1'] = ConvLayer(net['pool2/norm1'], 64, 3, stride=1, pad='same', W=lasagne.init.HeNormal(gain='relu')) # 64*16*16
    net['pool3/3x3_s2'] = PoolLayer(net['conv3/3x3_s1'], pool_size=3, stride=2, ignore_border=False)     # 64*8*8
    net['pool3/norm1'] = LRNLayer(net['pool3/3x3_s2'], alpha=0.00002, k=1)

    net['conv4/3x3_s1'] = ConvLayer(net['pool3/norm1'], 128, 3, stride=1, pad='same', W=lasagne.init.HeNormal(gain='relu')) # 128*8*8
    net['pool4/3x3_s2'] = PoolLayer(net['conv4/3x3_s1'], pool_size=3, stride=2, ignore_border=False)      # 128*4*4
    net['pool4/norm1'] = LRNLayer(net['pool4/3x3_s2'], alpha=0.00002, k=1)

    net['conv5/3x3_s1'] = ConvLayer(net['pool4/norm1'], 128, 3, stride=1, pad='same', W=lasagne.init.HeNormal(gain='relu'))
    net['pool5/norm1'] = LRNLayer(net['conv5/3x3_s1'], alpha=0.00002, k=1)    

    net['conv6/3x3_s1'] = ConvLayer(net['pool5/norm1'], 128, 3, stride=1, pad='same', W=lasagne.init.HeNormal(gain='relu'))
    net['pool6/norm1'] = LRNLayer(net['conv6/3x3_s1'], alpha=0.00002, k=1)    

    net['conv7/3x3_s1'] = ConvLayer(net['pool6/norm1'], 128, 3, stride=1, pad='same', W=lasagne.init.HeNormal(gain='relu'))
    net['pool7/norm1'] = LRNLayer(net['conv7/3x3_s1'], alpha=0.00002, k=1)    


    net['conv8/3x3_s1'] = ConvLayer(net['pool7/norm1'], 256, 3, stride=1, pad='same', W=lasagne.init.HeNormal(gain='relu'))  # 256*4*4
    net['pool8/4x4_s1'] = GlobalPoolLayer(net['conv8/3x3_s1']); # 256

    return net
Exemple #28
0
def build_model(input_var):
    net = {}

    net['input'] = InputLayer((None, 3, 224, 224), input_var=input_var)
    net['conv1'] = ConvLayer(net['input'],
                             num_filters=96,
                             filter_size=7,
                             stride=2,
                             flip_filters=False)
    # caffe has alpha = alpha * pool_size
    net['norm1'] = NormLayer(net['conv1'], alpha=0.0001)
    net['pool1'] = PoolLayer(net['norm1'],
                             pool_size=3,
                             stride=3,
                             ignore_border=False)
    net['conv2'] = ConvLayer(net['pool1'],
                             num_filters=256,
                             filter_size=5,
                             flip_filters=False)
    net['pool2'] = PoolLayer(net['conv2'],
                             pool_size=2,
                             stride=2,
                             ignore_border=False)
    net['conv3'] = ConvLayer(net['pool2'],
                             num_filters=512,
                             filter_size=3,
                             pad=1,
                             flip_filters=False)
    net['conv4'] = ConvLayer(net['conv3'],
                             num_filters=512,
                             filter_size=3,
                             pad=1,
                             flip_filters=False)
    net['conv5'] = ConvLayer(net['conv4'],
                             num_filters=512,
                             filter_size=3,
                             pad=1,
                             flip_filters=False)
    net['pool5'] = PoolLayer(net['conv5'],
                             pool_size=3,
                             stride=3,
                             ignore_border=False)
    net['fc6'] = DenseLayer(net['pool5'], num_units=4096)
    net['drop6'] = DropoutLayer(net['fc6'], p=0.5)
    net['fc7'] = DenseLayer(net['drop6'], num_units=4096)
    net['drop7'] = DropoutLayer(net['fc7'], p=0.5)
    net['fc8'] = DenseLayer(net['drop7'], num_units=1000, nonlinearity=None)
    net['prob'] = NonlinearityLayer(net['fc8'], softmax)

    return net
def makeNeuralNet():
    net = {}
    net['input'] = InputLayer(shape=(None, 3, 224, 224))
    net['conv1'] = ConvLayer(net['input'],
                             num_filters=96,
                             filter_size=7,
                             stride=2)
    net['norm1'] = NormLayer(
        net['conv1'], alpha=0.0001)  # caffe has alpha = alpha * pool_size
    net['pool1'] = PoolLayer(net['norm1'],
                             pool_size=3,
                             stride=3,
                             ignore_border=False)
    net['conv2'] = ConvLayer(net['pool1'], num_filters=256, filter_size=5)
    net['pool2'] = PoolLayer(net['conv2'],
                             pool_size=2,
                             stride=2,
                             ignore_border=False)
    net['conv3'] = ConvLayer(net['pool2'],
                             num_filters=512,
                             filter_size=3,
                             pad=1)
    net['conv4'] = ConvLayer(net['conv3'],
                             num_filters=512,
                             filter_size=3,
                             pad=1)
    net['conv5'] = ConvLayer(net['conv4'],
                             num_filters=512,
                             filter_size=3,
                             pad=1)
    net['pool5'] = PoolLayer(net['conv5'],
                             pool_size=3,
                             stride=3,
                             ignore_border=False)
    net['fc6'] = DenseLayer(net['pool5'], num_units=4096)
    net['drop6'] = DropoutLayer(net['fc6'], p=0.5)
    #     net['fc7'] = DenseLayer(net['drop6'], num_units=4096)
    #     net['drop7'] = DropoutLayer(net['fc7'], p=0.5)
    #     net['fc8'] = DenseLayer(net['drop7'], num_units=1000, nonlinearity=lasagne.nonlinearities.softmax)
    #     output_layer_vgg = net['fc8']
    ini = lasagne.init.HeUniform()
    net['_fc7'] = DenseLayer(net['drop6'], num_units=4096, W=ini)
    net['_drop7'] = DropoutLayer(net['_fc7'], p=0.5)
    net['_fc8out'] = DenseLayer(net['_drop7'],
                                num_units=6,
                                nonlinearity=lasagne.nonlinearities.softmax,
                                W=ini)
    output_layer_driver = net['_fc8out']
    return net['drop6'], output_layer_driver
Exemple #30
0
def build_baseline5_fan(input_var):
    # TODO remove these imports + move relevant parts to layers.py once everything is
    # up and running
    import theano.tensor as T
    import numpy as np
    """ Using Baseline 1 with the novel FAN layer.

    VGG conv4_1 is used for feature extraction
    """
    net = OrderedDict()

    # Input, standardization
    last = net['input'] = InputLayer(
        (None, 3, tools.INP_PSIZE, tools.INP_PSIZE), input_var=input_var)
    last = net['norm'] = ExpressionLayer(last, lambda x: normalize(x))

    net['features_s8'] = get_features(last)["conv4_1"]
    net['features'] = Upscale2DLayer(net["features_s8"], 8)
    net['mask'] = ExpressionLayer(
        net["features"], lambda x: 1. * T.eq(x, x.max(axis=1, keepdims=True)))

    last = net["middle"] = ConvLayer(last, 3, 1, nonlinearity=linear)
    last = net["fan"] = FeatureAwareNormLayer(
        (last, net['mask']),
        beta=nn.init.Constant(np.float32(128.)),
        gamma=nn.init.Constant(np.float32(25.)))

    return last, net