コード例 #1
0
def __1viewPair_SurfaceNet__(input_var_5D, input_var_shape = (None,3*2)+(64,)*3,\
        N_predicts_perGroup = 6):
    """
    from the 5D input (N_cubePair, 2rgb, h, w, d) of the colored cubePairs 
    to predicts occupancy probability map (N_cubePair, 1, h, w, d)
    """
    input_var = input_var_5D
    net={}
    net["input"] = lasagne.layers.InputLayer(input_var_shape, input_var)
    input_chunk_len = input_var.shape[0] / N_predicts_perGroup

    conv_nonlinearity = lasagne.nonlinearities.rectify
    nonlinearity_sigmoid = lasagne.nonlinearities.sigmoid

    #---------------------------    
    net["conv1_1"] = batch_norm(Conv3DDNNLayer(net["input"],32,(3,3,3),nonlinearity=conv_nonlinearity,untie_biases=False,pad='same'))
    net["conv1_2"] = batch_norm(Conv3DDNNLayer(net["conv1_1"],32,(3,3,3),nonlinearity=conv_nonlinearity,untie_biases=False,pad='same'))
    net["conv1_3"] = batch_norm(Conv3DDNNLayer(net["conv1_2"],32,(3,3,3),nonlinearity=conv_nonlinearity,untie_biases=False,pad='same'))

    net["pool1"] = Pool3DDNNLayer(net["conv1_3"], (2,2,2), stride=2)
    net["side_op1"] = batch_norm(Conv3DDNNLayer(net["conv1_3"],16,(1,1,1),nonlinearity=nonlinearity_sigmoid,untie_biases=False,pad='same'))
    net["side_op1_deconv"] = net["side_op1"]

    #---------------------------
    net["conv2_1"] = batch_norm(Conv3DDNNLayer(net["pool1"],80,(3,3,3),nonlinearity=conv_nonlinearity,untie_biases=False,pad='same'))
    net["conv2_2"] = batch_norm(Conv3DDNNLayer(net["conv2_1"],80,(3,3,3),nonlinearity=conv_nonlinearity,untie_biases=False,pad='same'))
    net["conv2_3"] = batch_norm(Conv3DDNNLayer(net["conv2_2"],80,(3,3,3),nonlinearity=conv_nonlinearity,untie_biases=False,pad='same'))

    net["pool2"] = Pool3DDNNLayer(net["conv2_3"], (2,2,2), stride=2)  
    net["side_op2"] = batch_norm(Conv3DDNNLayer(net["conv2_3"],16,(1,1,1),nonlinearity=nonlinearity_sigmoid,untie_biases=False,pad='same'))
    net["side_op2_deconv"] = Bilinear_3DInterpolation(net["side_op2"], upscale_factor=2, untie_biases=False, nonlinearity=None, pad='same')
                                                    
    #---------------------------
    net["conv3_1"] = batch_norm(Conv3DDNNLayer(net["pool2"],160,(3,3,3),nonlinearity=conv_nonlinearity,untie_biases=False,pad='same'))
    net["conv3_2"] = batch_norm(Conv3DDNNLayer(net["conv3_1"],160,(3,3,3),nonlinearity=conv_nonlinearity,untie_biases=False,pad='same'))
    net["conv3_3"] = batch_norm(Conv3DDNNLayer(net["conv3_2"],160,(3,3,3),nonlinearity=conv_nonlinearity,untie_biases=False,pad='same') )

    ##pool3 = Pool3DDNNLayer(conv3_3, (2,2,2), stride=2)  
    net["side_op3"] = batch_norm(Conv3DDNNLayer(net["conv3_3"],16,(1,1,1),nonlinearity=nonlinearity_sigmoid,untie_biases=False,pad='same'))
    net["side_op3_deconv"] = Bilinear_3DInterpolation(net["side_op3"], upscale_factor=4, untie_biases=False, nonlinearity=None, pad='same')
    
    #---------------------------
    net["conv3_3_pad"] = PadLayer(net["conv3_3"], width=2, val=0, batch_ndim=2)
    net["conv4_1"] = batch_norm(DilatedConv3DLayer(net["conv3_3_pad"],300,(3,3,3),dilation=(2,2,2),nonlinearity=conv_nonlinearity,untie_biases=False))
    net["conv4_1_pad"] = PadLayer(net["conv4_1"], width=2, val=0, batch_ndim=2)
    net["conv4_2"] = batch_norm(DilatedConv3DLayer(net["conv4_1_pad"],300,(3,3,3),dilation=(2,2,2),nonlinearity=conv_nonlinearity,untie_biases=False))
    net["conv4_2_pad"] = PadLayer(net["conv4_2"], width=2, val=0, batch_ndim=2)
    net["conv4_3"] = batch_norm(DilatedConv3DLayer(net["conv4_2_pad"],300,(3,3,3),dilation=(2,2,2),nonlinearity=conv_nonlinearity,untie_biases=False) )
    net["conv4_3_pad"] = PadLayer(net["conv4_3"], width=0, val=0, batch_ndim=2)
    net["side_op4"] = batch_norm(DilatedConv3DLayer(net["conv4_3_pad"],16,(1,1,1),dilation=(2,2,2),nonlinearity=nonlinearity_sigmoid,untie_biases=False))
    net["side_op4_deconv"] = Bilinear_3DInterpolation(net["side_op4"], upscale_factor=4, untie_biases=False, nonlinearity=None, pad='same')
                                
    #---------------------------
    net["fuse_side_outputs"] = ConcatLayer([net["side_op1_deconv"],net["side_op2_deconv"],net["side_op3_deconv"],net["side_op4_deconv"]], axis=1)
    net["merge_conv"] = batch_norm(Conv3DDNNLayer(net["fuse_side_outputs"],100,(3,3,3),nonlinearity=conv_nonlinearity,untie_biases=False,pad='same'))
    net["merge_conv"] = batch_norm(Conv3DDNNLayer(net["merge_conv"],100,(3,3,3),nonlinearity=conv_nonlinearity,untie_biases=False,pad='same'))
    net["merge_conv3"] = batch_norm(Conv3DDNNLayer(net["merge_conv"],1,(1,1,1),nonlinearity=nonlinearity_sigmoid,untie_biases=False,pad='same')) # linear output for regression
    net["output_SurfaceNet"] = net["merge_conv3"]
    return net
コード例 #2
0
    def residual_block(l, increase_dim=False, projection=False):
        input_num_filters = l.output_shape[1]
        if increase_dim:
            first_stride = (2,2)
            out_num_filters = input_num_filters*2
        else:
            first_stride = (1,1)
            out_num_filters = input_num_filters

        stack_1 = batch_norm(ConvLayer(l, num_filters=out_num_filters, filter_size=(3,3), stride=first_stride, nonlinearity=rectify, pad='same', W=lasagne.init.HeNormal(gain='relu'), flip_filters=False))
        stack_2 = batch_norm(ConvLayer(stack_1, num_filters=out_num_filters, filter_size=(3,3), stride=(1,1), nonlinearity=None, pad='same', W=lasagne.init.HeNormal(gain='relu'), flip_filters=False))

        # add shortcut connections
        if increase_dim:
            if projection:
                # projection shortcut, as option B in paper
                projection = batch_norm(ConvLayer(l, num_filters=out_num_filters, filter_size=(1,1), stride=(2,2), nonlinearity=None, pad='same', b=None, flip_filters=False))
                block = NonlinearityLayer(ElemwiseSumLayer([stack_2, projection]),nonlinearity=rectify)
            else:
                # identity shortcut, as option A in paper
                identity = ExpressionLayer(l, lambda X: X[:, :, ::2, ::2], lambda s: (s[0], s[1], s[2]//2, s[3]//2))
                padding = PadLayer(identity, [out_num_filters//4,0,0], batch_ndim=1)
                block = NonlinearityLayer(ElemwiseSumLayer([stack_2, padding]),nonlinearity=rectify)
        else:
            block = NonlinearityLayer(ElemwiseSumLayer([stack_2, l]),nonlinearity=rectify)

        return block
    def nn_fn(self):

        l_in_z = InputLayer((None, self.z_dim))
        l_in_x = InputLayer((None, self.max_length, self.emb_dim))

        l_in_z_reshape = ReshapeLayer(l_in_z, ([0], 1, [1]))
        l_in_z_rep = TileLayer(l_in_z_reshape, (1, self.max_length, 1))

        l_x_pre_pad = SliceLayer(PadLayer(l_in_x, [(1, 0), (0, 0)],
                                          batch_ndim=1),
                                 indices=slice(0, -1),
                                 axis=1)
        l_in_x_pre_pad_drop = DropoutLayer(l_x_pre_pad,
                                           self.nn_word_drop,
                                           shared_axes=(-1, ))

        l_concat = ConcatLayer((l_in_z_rep, l_in_x_pre_pad_drop), axis=-1)

        l_h = LSTMLayer(l_concat, num_units=self.nn_hid_units)

        if self.nn_skip:
            l_h = ConcatLayer((l_h, l_in_z_rep), axis=-1)

        l_out = DenseLayer(l_h,
                           num_units=self.emb_dim,
                           num_leading_axes=2,
                           nonlinearity=None)

        return (l_in_z, l_in_x), l_out
コード例 #4
0
    def __init__(self,
                 incoming,
                 num_filters,
                 filter_size,
                 dilation=1,
                 untie_biases=False,
                 W=init.GlorotUniform(),
                 b=init.Constant(0.),
                 nonlinearity=nonlinearities.rectify,
                 flip_filters=False,
                 convolution=conv.conv1d_mc0,
                 **kwargs):

        self.dilation = dilation

        pre_pad = (filter_size - 1) * dilation

        filter_size += (filter_size - 1) * (dilation - 1)

        l_pad = PadLayer(incoming, batch_ndim=2, width=[(pre_pad, 0)])

        super(DilatedConv1DLayer, self).__init__(incoming=l_pad,
                                                 num_filters=num_filters,
                                                 filter_size=filter_size,
                                                 stride=1,
                                                 pad=0,
                                                 untie_biases=untie_biases,
                                                 W=W,
                                                 b=b,
                                                 nonlinearity=nonlinearity,
                                                 flip_filters=flip_filters,
                                                 convolution=convolution,
                                                 **kwargs)
コード例 #5
0
    def shortcut(self, incoming, residual, type=None):
        """Create a shortcut from ``incoming`` to ``residual``."""
        type = type or self.type
        in_shape = getattr(incoming, 'output_shape', incoming)
        out_shape = getattr(residual, 'output_shape', residual)
        in_filters = in_shape[1]
        out_filters = out_shape[1]
        stride = (in_shape[-2] // out_shape[-2], in_shape[-1] // out_shape[-1])

        if type == 'C':
            # all shortcuts are projections
            return self.projection(incoming, out_filters, stride=stride)
        elif in_filters == out_filters:
            # A and B use identity shortcuts (if the dimensions stay)
            return incoming
        elif type == 'B':
            # if dimensions increase, B uses projections
            return self.projection(incoming, out_filters, stride=stride)
        elif type == 'A':
            if not numpy.all(in_shape[2:] == out_shape[2:]):
                shortcut = ExpressionLayer(
                    incoming, lambda x: x[:, :, ::stride[0], ::stride[1]],
                    in_shape[:2] + out_shape[2:])
            else:
                shortcut = incoming
            side = (out_filters - in_filters) // 2
            return PadLayer(shortcut, [side, 0, 0], batch_ndim=1)
コード例 #6
0
    def pool_2d_layer(cls, cur_layer, name, pool_size, dilation,
                      pool_layers_to_expand, pool_layers_to_remove):
        new_dilation = dilation

        if name in pool_layers_to_expand or name in pool_layers_to_remove:
            new_dilation *= pool_size
            stride = 1
        else:
            stride = pool_size

        if name not in pool_layers_to_remove:
            pool_size *= dilation

            if stride < pool_size:
                pad0 = (pool_size - stride) // 2
                pad1 = (pool_size - stride) - pad0
                cur_layer = PadLayer(cur_layer,
                                     width=[(pad0, pad1), (pad0, pad1)],
                                     name='{}_pad'.format(name))

            cur_layer = Pool2DLayer(cur_layer,
                                    pool_size=pool_size,
                                    stride=stride,
                                    name=name)

        return cur_layer, new_dilation
コード例 #7
0
    def conv_2d_layer(cls,
                      cur_layer,
                      name,
                      num_filters,
                      filter_size,
                      dilation=1,
                      pad=1):
        if dilation == 1:
            cur_layer = Conv2DLayer(cur_layer,
                                    num_filters=num_filters,
                                    filter_size=filter_size,
                                    pad=pad,
                                    flip_filters=False,
                                    name=name)
        else:
            if pad == 0:
                pass
            elif pad >= 1:
                cur_layer = PadLayer(cur_layer,
                                     width=pad * dilation,
                                     name='{}_pad'.format(name))
            else:
                raise ValueError(
                    'Only padding of 0 or >= 1 supported, not {}'.format(pad))
            cur_layer = DilatedConv2DLayer(cur_layer,
                                           num_filters=num_filters,
                                           filter_size=filter_size,
                                           flip_filters=False,
                                           dilation=dilation,
                                           name=name)

        return cur_layer, dilation
コード例 #8
0
def build_model(height, width):
	net = OrderedDict()
	net['input'] = InputLayer((None, 3, height, width), name='input')
	net['conv1'] = ConvLayer(net['input'], num_filters=32, filter_size=7, pad='same', name='conv1')
	net['conv2'] = ConvLayer(net['conv1'], num_filters=32, filter_size=5, pad='same', name='conv2')
	net['conv3'] = ConvLayer(net['conv2'], num_filters=64, filter_size=3, pad='same', name='conv3')
	net['conv4'] = ConvLayer(net['conv3'], num_filters=64, filter_size=3, pad='same', name='conv4')

	net['pad5'] = PadLayer(net['conv4'], width=1, val=0, name='pad5')
	net['conv_dil5'] = DilatedConv2DLayer(net['pad5'], num_filters=64, filter_size=3, dilation=(1,1), name='conv_dil5')

	net['pad6'] = PadLayer(net['conv_dil5'], width=2, val=0, name='pad6')
	net['conv_dil6'] = DilatedConv2DLayer(net['pad6'], num_filters=64, filter_size=3, dilation=(2,2), name='conv_dil6')

	net['pad7'] = PadLayer(net['conv_dil6'], width=4, val=0, name='pad6')
	net['conv_dil7'] = DilatedConv2DLayer(net['pad7'], num_filters=64, filter_size=3, dilation=(4,4), name='conv_dil7')

	net['pad8'] = PadLayer(net['conv_dil7'], width=8, val=0, name='pad8')
	net['conv_dil8'] = DilatedConv2DLayer(net['pad8'], num_filters=64, filter_size=3, dilation=(8,8), name='conv_dil8')

	net['pad9'] = PadLayer(net['conv_dil8'], width=16, val=0, name='pad9')
	net['conv_dil9'] = DilatedConv2DLayer(net['pad9'], num_filters=64, filter_size=3, dilation=(16,16), name='conv_dil9')

	net['pad10'] = PadLayer(net['conv_dil9'], width=1, val=0, name='pad10')
	net['l_out'] = DilatedConv2DLayer(net['pad10'], num_filters=2, filter_size=3, dilation=(1,1), name='l_out')

	for layer in lasagne.layers.get_all_layers(net['l_out']):
		print layer.name,layer.output_shape
	print "output shape", net['l_out'].output_shape

	net['l_in'] = net['input']
	return net
    def nn_fn(self):

        l_in_z = InputLayer((None, self.z_dim))
        l_in_x = InputLayer((None, self.max_length, self.emb_dim))

        l_in_z_reshape = ReshapeLayer(l_in_z, (
            [0],
            [1],
            1,
        ))
        l_in_z_rep = TileLayer(l_in_z_reshape, (1, 1, self.max_length))

        l_x_pre_pad = SliceLayer(PadLayer(l_in_x, [(1, 0), (0, 0)],
                                          batch_ndim=1),
                                 indices=slice(0, -1),
                                 axis=1)
        l_x_pre_pad = DimshuffleLayer(l_x_pre_pad, (0, 2, 1))
        l_x_pre_pad_drop = DropoutLayer(l_x_pre_pad,
                                        self.nn_word_drop,
                                        shared_axes=(1, ))

        l_concat = ConcatLayer((l_in_z_rep, l_x_pre_pad_drop), axis=1)

        l_in_d = Conv1DLayer(l_concat,
                             num_filters=self.nn_channels_external,
                             pad='same',
                             filter_size=1,
                             nonlinearity=None)

        for d in self.nn_dilations:
            l_cnn1 = Conv1DLayer(l_in_d,
                                 filter_size=1,
                                 num_filters=self.nn_channels_internal)
            l_dcnn = DilatedConv1DLayer(l_cnn1,
                                        filter_size=self.nn_filter_size,
                                        num_filters=self.nn_channels_internal,
                                        dilation=d)
            l_cnn2 = Conv1DLayer(l_dcnn,
                                 filter_size=1,
                                 num_filters=self.nn_channels_external)

            l_in_d = ElemwiseSumLayer([l_in_d, l_cnn2])

        l_final = Conv1DLayer(l_in_d,
                              filter_size=1,
                              num_filters=self.emb_dim,
                              nonlinearity=None)

        l_out = DimshuffleLayer(l_final, (0, 2, 1))

        return (l_in_z, l_in_x), l_out
コード例 #10
0
def construct_gen(noise_1, noise_2, batch_size=10):
    # There are two time steps considered for this model, so two LSTMs
    # Reshape noises
    noise1_rshp = noise_1.dimshuffle(0, 'x', 1)
    noise2_rshp = noise_2.dimshuffle(0, 'x', 1)
    lstm1_inp = InputLayer((None, 1, 100), input_var=noise1_rshp)
    lstm2_inp = InputLayer((None, 1, 100), input_var=noise2_rshp)

    lstm2 = ExposedLSTMLayer(lstm2_inp, 100)
    lstm2_h = SliceLayer(lstm2, indices=slice(num_units, None), axis=-1)
    lstm2_reshape = ReshapeLayer(lstm2_h, (batch_size, 100))

    print("LSTM2's output is " + str(lstm2_reshape.output_shape))

    build_bg = gen_bg(lstm1_inp)
    build_gfc = gen_fc(lstm2_reshape)
    build_gif = gen_fi(build_gfc)
    build_gfmask = gen_fmask(build_gfc)

    # Affine transformation and pasting with bg
    a_t = DenseLayer(lstm2_reshape, num_units=6, W=w1)  #6 dim output
    m_t_hat = NonlinearityLayer(PadLayer(
        TransformerLayer(build_gfmask, a_t, downsample_factor=2), 8),
                                nonlinearity=tanh)
    f_t_hat = NonlinearityLayer(PadLayer(
        TransformerLayer(build_gif, a_t, downsample_factor=2), 8),
                                nonlinearity=tanh)

    prior = ElemwiseMergeLayer([m_t_hat, f_t_hat],
                               merge_function=tensor.mul,
                               broadcastable=1)
    posterior = ElemwiseMergeLayer([ComplimentLayer(m_t_hat), build_bg],
                                   merge_function=tensor.mul,
                                   broadcastable=1)

    gen_image = ElemwiseSumLayer([prior, posterior])

    return gen_image
    def nn_fn(self):

        l_in_z = InputLayer((None, self.z_dim))
        l_in_x = InputLayer((None, self.max_length, self.emb_dim))

        l_z_rep = TileLayer(ReshapeLayer(l_in_z, ([0], 1, [1])),
                            (1, self.max_length, 1))

        l_h = None

        l_h_all = []

        for h in range(self.nn_depth):

            if h > 0:
                l_in_h = ConcatLayer((l_h, l_z_rep), axis=-1)
            else:
                l_in_h = l_z_rep

            l_h = LSTMLayer(l_in_h, num_units=self.nn_hid_units)
            l_h_all.append(l_h)

        l_h = ElemwiseSumLayer(l_h_all)

        l_x_pre_pad = []

        for l in range(1, self.nn_look_back + 1):
            l_x_pre_pad_l = SliceLayer(PadLayer(l_in_x, [(l, 0), (0, 0)],
                                                batch_ndim=1),
                                       indices=slice(0, -l),
                                       axis=1)
            l_x_pre_pad.append(l_x_pre_pad_l)

        l_x_pre_pad = ConcatLayer(l_x_pre_pad, axis=-1)

        for d in range(self.nn_look_back_depth - 1):
            l_x_pre_pad = DenseLayer(l_x_pre_pad,
                                     num_units=self.nn_hid_units,
                                     num_leading_axes=2)

        l_concat = ConcatLayer([l_h, l_x_pre_pad], axis=-1)

        l_out = DenseLayer(l_concat,
                           num_units=self.emb_dim,
                           nonlinearity=None,
                           num_leading_axes=2)

        return (l_in_z, l_in_x), l_out
コード例 #12
0
 def build_pad_model(self, previous_layer):
     from lasagne.layers import PadLayer
     padnet = {}
     padnet['input'] = previous_layer
     padnet['pad'] = PadLayer(padnet['input'], (224-32)/2)
     return padnet, padnet['pad']
コード例 #13
0
    def residual_block3(l, base_dim, increase_dim=False, projection=False):
        if increase_dim:
            layer_1 = batch_norm(
                ConvLayer(l,
                          num_filters=base_dim,
                          filter_size=(1, 1),
                          stride=(2, 2),
                          nonlinearity=None,
                          pad='same',
                          W=nn.init.HeNormal(gain='relu')))
        else:
            layer_1 = batch_norm(
                ConvLayer(l,
                          num_filters=base_dim,
                          filter_size=(1, 1),
                          stride=(1, 1),
                          nonlinearity=rectify,
                          pad='same',
                          W=nn.init.HeNormal(gain='relu')))
        layer_2 = batch_norm(
            ConvLayer(layer_1,
                      num_filters=base_dim,
                      filter_size=(3, 3),
                      stride=(1, 1),
                      nonlinearity=rectify,
                      pad='same',
                      W=nn.init.HeNormal(gain='relu')))
        layer_3 = batch_norm(
            ConvLayer(layer_2,
                      num_filters=4 * base_dim,
                      filter_size=(1, 1),
                      stride=(1, 1),
                      nonlinearity=rectify,
                      pad='same',
                      W=nn.init.HeNormal(gain='relu')))

        # add shortcut connection
        if increase_dim:
            if projection:
                # projection shortcut (option B in paper)
                projection = batch_norm(
                    ConvLayer(l,
                              num_filters=4 * base_dim,
                              filter_size=(1, 1),
                              stride=(2, 2),
                              nonlinearity=None,
                              pad='same',
                              b=None))
                block = NonlinearityLayer(ElemwiseSumLayer(
                    [layer_3, projection]),
                                          nonlinearity=rectify)
            else:
                # identity shortcut (option A in paper)
                # we use a pooling layer to get identity with strides,
                # since identity layers with stride don't exist in Lasagne
                identity = PoolLayer(l,
                                     pool_size=1,
                                     stride=(2, 2),
                                     mode='average_exc_pad')
                padding = PadLayer(identity, [4 * base_dim, 0, 0],
                                   batch_ndim=1)
                block = NonlinearityLayer(ElemwiseSumLayer([layer_3, padding]),
                                          nonlinearity=rectify)

        else:
            block = NonlinearityLayer(ElemwiseSumLayer([layer_3, l]),
                                      nonlinearity=rectify)

        return block
コード例 #14
0
    def residual_block(l, increase_dim=False, projection=False):
        input_num_filters = l.output_shape[1]
        if increase_dim:
            first_stride = (2, 2)
            out_num_filters = input_num_filters * 2
        else:
            first_stride = (1, 1)
            out_num_filters = input_num_filters

        #print(l.output_shape)
        l_l = DenseLayer(l,
                         num_units=l.output_shape[3],
                         num_leading_axes=-1,
                         nonlinearity=None)
        #print(l.output_shape[3])
        #print("l_1.output_shape", l_l.output_shape)
        #stride=first_stride
        stack_left_1 = batch_norm(
            ConvLayer(l_l,
                      num_filters=out_num_filters,
                      filter_size=(3, 3),
                      stride=first_stride,
                      nonlinearity=rectify,
                      pad='same',
                      W=lasagne.init.HeNormal(gain='relu'),
                      flip_filters=False))
        stack_left_2 = batch_norm(
            ConvLayer(stack_left_1,
                      num_filters=out_num_filters,
                      filter_size=(3, 3),
                      stride=(1, 1),
                      nonlinearity=None,
                      pad='same',
                      W=lasagne.init.HeNormal(gain='relu'),
                      flip_filters=False))

        #stack_right_1 = batch_norm(ConvLayer(ElemwiseSumLayer([l, NegativeLayer(l_l)]), num_filters=out_num_filters, filter_size=(2,2), stride=first_stride, nonlinearity=rectify, pad='same', W=lasagne.init.HeNormal(gain='relu'), flip_filters=False))
        #stack_right_2 = batch_norm(ConvLayer(stack_right_1, num_filters=out_num_filters, filter_size=(2,2), stride=(1,1), nonlinearity=None, pad='same', W=lasagne.init.HeNormal(gain='relu'), flip_filters=False))
        print("first stack: ", stack_left_2.output_shape)

        # add shortcut connections
        if increase_dim:
            if projection:
                # projection shortcut, as option B in paper
                projection = batch_norm(
                    ConvLayer(l,
                              num_filters=out_num_filters,
                              filter_size=(1, 1),
                              stride=(2, 2),
                              nonlinearity=None,
                              pad='same',
                              b=None,
                              flip_filters=False))
                print("projection shape: ", projection.output_shape)
                ##block = NonlinearityLayer(ElemwiseSumLayer([stack_left_2, stack_right_2, projection]),nonlinearity=rectify)
                block = NonlinearityLayer(ElemwiseSumLayer(
                    [stack_left_2, projection]),
                                          nonlinearity=rectify)
            else:
                # identity shortcut, as option A in paper
                #print(l.output_shape[2])
                if (l.output_shape[2] % 2 == 0 and l.output_shape[3] % 2 == 0):
                    identity = ExpressionLayer(
                        l, lambda X: X[:, :, ::2, ::2], lambda s:
                        (s[0], s[1], s[2] // 2, s[3] // 2))
                elif (l.output_shape[2] % 2 == 0
                      and l.output_shape[3] % 2 == 1):
                    identity = ExpressionLayer(
                        l, lambda X: X[:, :, ::2, ::2], lambda s:
                        (s[0], s[1], s[2] // 2, s[3] // 2 + 1))
                elif (l.output_shape[2] % 2 == 1
                      and l.output_shape[3] % 2 == 0):
                    identity = ExpressionLayer(
                        l, lambda X: X[:, :, ::2, ::2], lambda s:
                        (s[0], s[1], s[2] // 2 + 1, s[3] // 2))
                else:
                    identity = ExpressionLayer(
                        l, lambda X: X[:, :, ::2, ::2], lambda s:
                        (s[0], s[1], s[2] // 2 + 1, s[3] // 2 + 1))
                padding = PadLayer(identity,
                                   [(int)(out_num_filters / 4), 0, 0],
                                   batch_ndim=1)
                print('------------------')
                print(stack_left_2.output_shape)
                #print(stack_right_2.output_shape)
                print(identity.output_shape)
                print(padding.output_shape)
                #block = NonlinearityLayer(ElemwiseSumLayer([stack_left_2, stack_right_2, padding]),nonlinearity=rectify)
                block = NonlinearityLayer(ElemwiseSumLayer(
                    [stack_left_2, padding]),
                                          nonlinearity=rectify)
        else:
            #block = NonlinearityLayer(ElemwiseSumLayer([stack_left_2, stack_right_2, l]),nonlinearity=rectify)
            print("l output shape: ", l.output_shape)
            block = NonlinearityLayer(ElemwiseSumLayer([stack_left_2, l]),
                                      nonlinearity=rectify)

        return block
コード例 #15
0
def network(image, p):
    input_image = InputLayer(input_var = image,
                             shape     = (None, 128, 256, 3))

    input_image = DimshuffleLayer(input_image,
                                  pattern = (0,3,1,2))

    conv1        = batch_norm(Conv2DLayer(input_image,
                                          num_filters  = 16,
                                          filter_size  = (3,3),
                                          stride       = (1,1),
                                          nonlinearity = rectify,
                                          pad          = 'same'))

    conv1        = batch_norm(Conv2DLayer(conv1,
                                          num_filters  = 16,
                                          filter_size  = (3,3),
                                          stride       = (1,1),
                                          nonlinearity = rectify,
                                          pad          = 'same'))

    conv1        = DropoutLayer(conv1, p=p)

    conv1        = ConcatLayer([input_image,
                                conv1], axis = 1)

    conv2        = batch_norm(Conv2DLayer(conv1,
                                          num_filters  = 32,
                                          filter_size  = (3,3),
                                          stride       = (1,1),
                                          nonlinearity = rectify,
                                          pad          = 'same'))

    conv2        = batch_norm(Conv2DLayer(conv2,
                                          num_filters  = 32,
                                          filter_size  = (3,3),
                                          stride       = (1,1),
                                          nonlinearity = rectify,
                                          pad          = 'same'))

    conv2        = DropoutLayer(conv2, p=p)

    conv2        = batch_norm(ConcatLayer([conv2,
                                           conv1], axis = 1))

    atr1         = DilatedConv2DLayer(PadLayer(conv2, width = 1),
                                      num_filters  = 16,
                                      filter_size  = (3,3),
                                      dilation     = (1,1),
                                      pad          = 0,
                                      nonlinearity = rectify)

    atr2         = DilatedConv2DLayer(PadLayer(conv2, width = 2),
                                      num_filters  = 16,
                                      filter_size  = (3,3),
                                      dilation     = (2,2),
                                      pad          = 0,
                                      nonlinearity = rectify)

    atr4         = DilatedConv2DLayer(PadLayer(conv2, width = 4),
                                      num_filters  = 16,
                                      filter_size  = (3,3),
                                      dilation     = (4,4),
                                      pad          = 0,
                                      nonlinearity = rectify)

    atr8         = DilatedConv2DLayer(PadLayer(conv2, width = 8),
                                      num_filters  = 16,
                                      filter_size  = (3,3),
                                      dilation     = (8,8),
                                      pad          = 0,
                                      nonlinearity = rectify)

    sumblock    = ConcatLayer([conv2,atr1,atr2,atr4,atr8], axis = 1)

    crp         = MaxPool2DLayer(PadLayer(sumblock, width = 1),
                                 pool_size     = (3,3),
                                 stride        = (1,1),
                                 ignore_border = False)

    crp         = batch_norm(Conv2DLayer(crp,
                                         num_filters  = 115,
                                         filter_size  = (3,3),
                                         stride       = (1,1),
                                         nonlinearity = rectify,
                                         pad          = 'same'))

    sumblock    = ElemwiseSumLayer([sumblock,
                                    crp])

    ground      = batch_norm(Conv2DLayer(sumblock,
                                         num_filters  = 1,
                                         filter_size  = (3,3),
                                         stride       = (1,1),
                                         nonlinearity = output_layer_nonlinearity,
                                         pad          = 'same'))

    ground        = ReshapeLayer(ground,
                                 shape = ([0],128,256))

    return ground
コード例 #16
0
ファイル: networks.py プロジェクト: sebastian-schlecht/im2vol
    def residual_block_up(l,
                          decrease_dim=False,
                          projection=True,
                          padding="same",
                          conv_filter=(5, 5),
                          proj_filter=(5, 5)):
        input_num_filters = l.output_shape[1]

        if decrease_dim:
            out_num_filters = input_num_filters / 2
            # Upsample
            l = Upscale2DLayer(l, 2)
        else:
            out_num_filters = input_num_filters
        # Now we can use a simple "normal" residual block
        stack_1 = batch_norm(
            ConvLayer(l,
                      num_filters=out_num_filters,
                      filter_size=conv_filter,
                      stride=(1, 1),
                      nonlinearity=rectify,
                      pad=padding,
                      W=lasagne.init.HeNormal(gain='relu'),
                      flip_filters=False))
        stack_2 = batch_norm(
            ConvLayer(stack_1,
                      num_filters=out_num_filters,
                      filter_size=(3, 3),
                      stride=(1, 1),
                      nonlinearity=None,
                      pad='same',
                      W=lasagne.init.HeNormal(gain='relu'),
                      flip_filters=False))

        # add shortcut connections
        if decrease_dim:
            if projection:
                # projection shortcut, as option B in paper
                projection = batch_norm(
                    ConvLayer(l,
                              num_filters=out_num_filters,
                              filter_size=proj_filter,
                              stride=(1, 1),
                              nonlinearity=None,
                              pad=padding,
                              b=None,
                              flip_filters=False))
                block = NonlinearityLayer(ElemwiseSumLayer(
                    [stack_2, projection]),
                                          nonlinearity=rectify)
            ## NOT IMPLEMENTED
            else:
                raise NotImplementedError()
                # identity shortcut, as option A in paper
                identity = ExpressionLayer(
                    l, lambda X: X[:, :, ::2, ::2], lambda s:
                    (s[0], s[1], s[2] // 2, s[3] // 2))
                padding = PadLayer(identity, [out_num_filters // 4, 0, 0],
                                   batch_ndim=1)
                block = NonlinearityLayer(ElemwiseSumLayer([stack_2, padding]),
                                          nonlinearity=rectify)
        else:
            block = NonlinearityLayer(ElemwiseSumLayer([stack_2, l]),
                                      nonlinearity=rectify)
        return block
コード例 #17
0
def buildDAE_contextmod(input_concat_h_vars,
                        input_mask_var,
                        n_classes,
                        path_weights='/Tmp/romerosa/itinf/models/',
                        model_name='dae_model.npz',
                        trainable=False,
                        load_weights=False,
                        out_nonlin=linear,
                        concat_h=['input'],
                        noise=0.1):
    '''
    Build context module

    Parameters
    ----------
    input_concat_h_vars: list of theano tensors, variables to concatenate
    input_mask_var: theano tensor, input to context module
    n_classes: int, number of classes
    path_weights: string, path to weights directory
    trainable: bool, whether the model is trainable (freeze parameters or not)
    load_weights: bool, whether to load pretrained weights
    out_nonlin: output nonlinearity
    concat_h: list of strings, names of layers we want to concatenate
    noise: float, noise
    '''

    # context module does not reduce the image resolution
    assert all([el in ['input'] for el in concat_h])
    net = {}
    pos = 0

    # Contracting path
    net['input'] = InputLayer((None, n_classes, None, None), input_mask_var)

    # Noise
    if noise > 0:
        # net['noisy_input'] = GaussianNoiseLayerSoftmax(net['input'],
        #                                                sigma=noise)
        net['noisy_input'] = GaussianNoiseLayer(net['input'], sigma=noise)
        in_next = 'noisy_input'
    else:
        in_next = 'input'

    pos, out = model_helpers.concatenate(net, in_next, concat_h,
                                         input_concat_h_vars, pos, 3)

    class IdentityInit(Initializer):
        """ We adapt the same initializiation method than in the paper"""
        def sample(self, shape):
            n_filters, n_filters2, filter_size, filter_size2 = shape
            assert ((n_filters == n_filters2) & (filter_size == filter_size2))
            assert (filter_size % 2 == 1)

            W = np.zeros(shape, dtype='float32')
            for i in range(n_filters):
                W[i, i, filter_size / 2, filter_size / 2] = 1.
            return W

    net['conv1'] = Conv2DLayer(net[out],
                               n_classes,
                               3,
                               pad='same',
                               nonlinearity=rectify,
                               flip_filters=False)
    net['pad1'] = PadLayer(net['conv1'], width=32, val=0, batch_ndim=2)
    net['dilconv1'] = DilatedConv2DLayer(net['pad1'],
                                         n_classes,
                                         3,
                                         1,
                                         W=IdentityInit(),
                                         nonlinearity=rectify)
    net['dilconv2'] = DilatedConv2DLayer(net['dilconv1'],
                                         n_classes,
                                         3,
                                         2,
                                         W=IdentityInit(),
                                         nonlinearity=rectify)
    net['dilconv3'] = DilatedConv2DLayer(net['dilconv2'],
                                         n_classes,
                                         3,
                                         4,
                                         W=IdentityInit(),
                                         nonlinearity=rectify)
    net['dilconv4'] = DilatedConv2DLayer(net['dilconv3'],
                                         n_classes,
                                         3,
                                         8,
                                         W=IdentityInit(),
                                         nonlinearity=rectify)
    net['dilconv5'] = DilatedConv2DLayer(net['dilconv4'],
                                         n_classes,
                                         3,
                                         16,
                                         W=IdentityInit(),
                                         nonlinearity=rectify)
    net['dilconv6'] = DilatedConv2DLayer(net['dilconv5'],
                                         n_classes,
                                         3,
                                         1,
                                         W=IdentityInit(),
                                         nonlinearity=rectify)
    net['dilconv7'] = DilatedConv2DLayer(net['dilconv6'],
                                         n_classes,
                                         1,
                                         1,
                                         W=IdentityInit(),
                                         nonlinearity=linear)

    # Final dimshuffle, reshape and softmax
    net['final_dimshuffle'] = DimshuffleLayer(net['dilconv7'], (0, 2, 3, 1))
    laySize = lasagne.layers.get_output(net['final_dimshuffle']).shape
    net['final_reshape'] = ReshapeLayer(net['final_dimshuffle'],
                                        (T.prod(laySize[0:3]), laySize[3]))
    net['probs'] = NonlinearityLayer(net['final_reshape'],
                                     nonlinearity=out_nonlin)

    # Go back to 4D
    net['probs_reshape'] = ReshapeLayer(
        net['probs'], (laySize[0], laySize[1], laySize[2], n_classes))

    net['probs_dimshuffle'] = DimshuffleLayer(net['probs_reshape'],
                                              (0, 3, 1, 2))
    # print('Input to last layer: ', net['probs_dimshuffle'].input_shape)
    print(net.keys())

    # Load weights
    if load_weights:
        with np.load(os.path.join(path_weights, model_name)) as f:
            param_values = [f['arr_%d' % i] for i in range(len(f.files))]

        lasagne.layers.set_all_param_values(net['probs_dimshuffle'],
                                            param_values)

    # Do not train
    if not trainable:
        model_helpers.freezeParameters(net['probs_dimshuffle'], single=False)

    return net['probs_dimshuffle']