예제 #1
0
def broadcast_dot_layer(l_pred, l_targets, feature_dim, id_tag):
    l_broadcast = dimshuffle(l_pred, (0, 1, 'x'), name=id_tag + 'dot_broadcast')
    l_forget = ForgetSizeLayer(l_broadcast, axis=2, name=id_tag + 'dot_nosize')
    l_merge = ElemwiseMergeLayer((l_forget, l_targets), T.mul, name=id_tag + 'dot_elemwise_mul')
    l_pool = FeaturePoolLayer(l_merge, pool_size=feature_dim, axis=1,
                              pool_function=T.sum, name=id_tag + 'dot_pool')
    return reshape(l_pool, ([0], [2]), name=id_tag + 'broadcast_dot')
예제 #2
0
    def create_attention(self, gru_con, in_con_mask, condition, batch_size,
                         n_hidden_con, **kwargs):

        # (batch_size, n_attention)
        gru_cond2 = non_flattening_dense_layer(gru_con,
                                               self.in_con_mask,
                                               self.n_attention,
                                               nonlinearity=None)
        gru_que2 = DenseLayer(condition, self.n_attention, nonlinearity=None)
        gru_que2 = dimshuffle(gru_que2, (0, 'x', 1))

        att = ElemwiseSumLayer([gru_cond2, gru_que2])
        att = NonlinearityLayer(att, T.tanh)
        att = SliceLayer(non_flattening_dense_layer(att,
                                                    self.in_con_mask,
                                                    1,
                                                    nonlinearity=None),
                         indices=0,
                         axis=2)

        att_softmax = SequenceSoftmax(att, self.in_con_mask)

        rep = ElemwiseMergeLayer(
            [ForgetSizeLayer(dimshuffle(att_softmax,
                                        (0, 1, 'x'))), gru_con], T.mul)

        return ExpressionLayer(rep, lambda x: T.sum(x, axis=1), lambda s:
                               (s[0], ) + s[2:])
예제 #3
0
def build_convpool_max(input_vars, input_shape=None):
    """
  Builds the complete network with maxpooling layer in time.
  :param input_vars: list of EEG images (one image per time window)
  :return: a pointer to the output of last layer
  """
    convnets = []
    W_init = None

    # Build 7 parallel CNNs with shared weights
    for i in range(input_shape[0]):
        if i == 0:
            convnet, W_init = build_cnn(input_vars[i], input_shape)
        else:
            convnet, _ = build_cnn(input_vars[i], input_shape, W_init)

        convnets.append(convnet)
    # convpooling using Max pooling over frames
    convpool = ElemwiseMergeLayer(convnets, theano.tensor.maximum)
    # A fully-connected layer of 512 units with 50% dropout on its inputs:
    convpool = DenseLayer(lasagne.layers.dropout(convpool, p=.5),
                          num_units=512,
                          nonlinearity=lasagne.nonlinearities.rectify)

    # And, finally, the output layer with 50% dropout on its inputs:
    convpool = lasagne.layers.DenseLayer(
        lasagne.layers.dropout(convpool, p=.5),
        num_units=num_classes,
        nonlinearity=lasagne.nonlinearities.softmax)
    return convpool
예제 #4
0
def build_convpool_max(input_vars, nb_classes, imsize=32, n_colors=3, n_timewin=3):
    """
    Builds the complete network with maxpooling layer in time.

    :param input_vars: list of EEG images (one image per time window)
    :param nb_classes: number of classes
    :param imsize: size of the input image (assumes a square input)
    :param n_colors: number of color channels in the image
    :param n_timewin: number of time windows in the snippet
    :return: a pointer to the output of last layer
    """
    convnets = []
    w_init = None
    # Build 7 parallel CNNs with shared weights
    for i in range(n_timewin):
        if i == 0:
            convnet, w_init = build_cnn(input_vars[i], imsize=imsize, n_colors=n_colors)
        else:
            convnet, _ = build_cnn(input_vars[i], w_init=w_init, imsize=imsize, n_colors=n_colors)
        convnets.append(convnet)
    # convpooling using Max pooling over frames
    convpool = ElemwiseMergeLayer(convnets, theano.tensor.maximum)
    # A fully-connected layer of 512 units with 50% dropout on its inputs:
    convpool = DenseLayer(lasagne.layers.dropout(convpool, p=.5),
            num_units=512, nonlinearity=lasagne.nonlinearities.rectify)
    # And, finally, the output layer with 50% dropout on its inputs:
    convpool = lasagne.layers.DenseLayer(lasagne.layers.dropout(convpool, p=.5),
            num_units=nb_classes, nonlinearity=lasagne.nonlinearities.softmax)
    return convpool
예제 #5
0
def broadcast_sub_layer(l_pred, l_targets, feature_dim, id_tag):
    l_broadcast = dimshuffle(l_pred, (0, 1, 'x'),
                             name=id_tag + 'sub_broadcast')
    l_forget = ForgetSizeLayer(l_broadcast, axis=2, name=id_tag + 'sub_nosize')
    return ElemwiseMergeLayer((l_forget, l_targets),
                              T.sub,
                              name=id_tag + 'broadcast_sub')
예제 #6
0
def apply_mask(layer_seq, layer_seq_mask):
    """
    seq: layer of shape (batch_size, length_seq, n_features)
    seq_mask: layer of shape (batch_size, length_seq)
    """
    return ElemwiseMergeLayer(
        [ForgetSizeLayer(dimshuffle(layer_seq_mask,
                                    (0, 1, 'x'))), layer_seq], T.mul)
예제 #7
0
def add(*args, **kwargs):
    """Element-wise sum of layers"""
    inp_names = [
        layer.name or "layer" + str(i) for i, layer in enumerate(args)
    ]
    kwargs["name"] = kwargs.get("name", "sum(%s)" % (', '.join(inp_names)))

    return ElemwiseMergeLayer(args, T.add, **kwargs)
예제 #8
0
    def cnn_fn(self):

        l_in = InputLayer((None, self.max_length, self.vocab_size))

        l_in_T = DimshuffleLayer(l_in, (0, 2, 1))

        l_causal_conv = DilatedConv1DLayer(
            l_in_T,
            num_filters=self.nn_residual_channels,
            dilation=1,
            nonlinearity=None)

        l_prev = l_causal_conv

        skip_layers = []

        for h in range(len(self.nn_dilations)):

            l_filter = DilatedConv1DLayer(
                l_prev,
                num_filters=self.nn_dilation_channels,
                dilation=self.nn_dilations[h],
                nonlinearity=tanh)

            l_gate = DilatedConv1DLayer(l_prev,
                                        num_filters=self.nn_dilation_channels,
                                        dilation=self.nn_dilations[h],
                                        nonlinearity=sigmoid)

            l_merge = ElemwiseMergeLayer([l_filter, l_gate],
                                         merge_function=T.mul)

            l_dense = Conv1DLayer(l_merge,
                                  num_filters=self.nn_residual_channels,
                                  filter_size=1,
                                  nonlinearity=None)

            l_residual = ElemwiseSumLayer([l_prev, l_dense])

            l_skip = Conv1DLayer(l_merge,
                                 num_filters=self.nn_residual_channels,
                                 filter_size=1,
                                 nonlinearity=None)

            skip_layers.append(l_skip)

            l_prev = l_residual

        l_skip_sum = NonlinearityLayer(ElemwiseSumLayer(skip_layers),
                                       nonlinearity=elu)

        l_final = DimshuffleLayer(l_skip_sum, (0, 2, 1))

        return l_final
def UNet_decoder_3(LR_conv1, LR_conv2, LR_conv3, LR_conv4, warp_conv1, warp_conv2, warp_conv3, warp_conv4): 
    # 80
    mask4 = Conv2DLayer(ConcatLayer([LR_conv4, warp_conv4]), 64, 5, pad=2, W = W_init_SELU, b=Constant(0.), nonlinearity=sigmoid)
    warp_conv4_m = ElemwiseMergeLayer([warp_conv4, mask4], T.mul)
    warp_deconv4 = Deconv2DLayer(ConcatLayer([LR_conv4, warp_conv4_m]), num_filters=64, filter_size=4, stride=2, crop=1, W = W_init_SELU, b=Constant(0.), nonlinearity=SELU_activation)

    # 160
    mask3 = Conv2DLayer(ConcatLayer([warp_deconv4, LR_conv3, warp_conv3]), 64, 5, pad=2, W = W_init_SELU, b=Constant(0.), nonlinearity=sigmoid)
    warp_conv3_m = ElemwiseMergeLayer([warp_conv3, mask3], T.mul)
    warp_deconv3 = Deconv2DLayer(ConcatLayer([warp_deconv4, LR_conv3, warp_conv3_m]), num_filters=64, filter_size=4, stride=2, crop=1, W = W_init_SELU, b=Constant(0.), nonlinearity=SELU_activation)
    # 320
    mask2 = Conv2DLayer(ConcatLayer([warp_deconv3, LR_conv2, warp_conv2]), 64, 5, pad=2, W = W_init_SELU, b=Constant(0.), nonlinearity=sigmoid)
    warp_conv2_m = ElemwiseMergeLayer([warp_conv2, mask2], T.mul)
    warp_deconv2 = Deconv2DLayer(ConcatLayer([warp_deconv3, LR_conv2, warp_conv2_m]), num_filters=64, filter_size=4, stride=2, crop=1, W = W_init_SELU, b=Constant(0.), nonlinearity=SELU_activation)
    # final 
    mask1 = Conv2DLayer(ConcatLayer([warp_deconv2, LR_conv1, warp_conv1]), 64, 5, pad=2, W = W_init_SELU, b=Constant(0.), nonlinearity=sigmoid)
    warp_conv1_m = ElemwiseMergeLayer([warp_conv1, mask1], T.mul)
    post_fusion1 =  Conv2DLayer(ConcatLayer([warp_deconv2, LR_conv1, warp_conv1_m]), 64, 5, pad=2, W = W_init_SELU, b=Constant(0.), nonlinearity=SELU_activation)

    post_fusion2 =  Conv2DLayer(post_fusion1, 64, 5, pad=2, W = W_init_SELU, b=Constant(0.), nonlinearity=SELU_activation)
    final = Conv2DLayer(post_fusion1, 3, 5, pad=2, W = W_init_linear, b=Constant(0.), nonlinearity=linear)

    test = Conv2DLayer(final, 3, 5, pad=2, W = W_init_linear, b=Constant(0.), nonlinearity=linear)
    return final
예제 #10
0
def build_test_model():
    T_net = {}
    T_net['input'] = InputLayer((None, 4, 224, 224))

    #slice the input to get image and feat map part
    T_net['input_map'] = SliceLayer(T_net['input'],
                                    indices=slice(3, 4),
                                    axis=1)
    T_net['map112'] = PoolLayer(T_net['input_map'], 2)
    T_net['map56'] = PoolLayer(T_net['map112'], 2)
    T_net['map28'] = PoolLayer(T_net['map56'], 2)
    T_net_buff56 = [T_net['map56'] for i in range(256)]
    T_net['map56x256'] = concat(T_net_buff56)
    T_net_buff28 = [T_net['map28'] for i in range(512)]
    T_net['map28x512'] = concat(T_net_buff28)

    T_net['input_im'] = SliceLayer(T_net['input'], indices=slice(0, 3), axis=1)
    T_net['conv1_1'] = ConvLayer(T_net['input_im'],
                                 64,
                                 3,
                                 pad=1,
                                 flip_filters=False)
    T_net['conv1_2'] = ConvLayer(T_net['conv1_1'],
                                 64,
                                 3,
                                 pad=1,
                                 flip_filters=False)
    T_net['pool1'] = PoolLayer(T_net['conv1_2'], 2)
    T_net['conv2_1'] = ConvLayer(T_net['pool1'],
                                 128,
                                 3,
                                 pad=1,
                                 flip_filters=False)
    T_net['conv2_2'] = ConvLayer(T_net['conv2_1'],
                                 128,
                                 3,
                                 pad=1,
                                 flip_filters=False)
    T_net['pool2'] = PoolLayer(T_net['conv2_2'], 2)
    T_net['conv3_1'] = ConvLayer(T_net['pool2'],
                                 256,
                                 3,
                                 pad=1,
                                 flip_filters=False)
    T_net['conv3_2'] = ConvLayer(T_net['conv3_1'],
                                 256,
                                 3,
                                 pad=1,
                                 flip_filters=False)
    T_net['conv3_3'] = ConvLayer(T_net['conv3_2'],
                                 256,
                                 3,
                                 pad=1,
                                 flip_filters=False)
    T_net['conv3_4'] = ConvLayer(T_net['conv3_3'],
                                 256,
                                 3,
                                 pad=1,
                                 flip_filters=False)

    T_net['conv3_map'] = ElemwiseMergeLayer(
        [T_net['conv3_1'], T_net['map56x256']], merge_function=T.mul)
    T_net['conv3_all'] = ElemwiseSumLayer(
        [T_net['conv3_4'], T_net['conv3_map']])

    T_net['pool3'] = PoolLayer(T_net['conv3_all'], 2)
    T_net['conv4_1'] = ConvLayer(T_net['pool3'],
                                 512,
                                 3,
                                 pad=1,
                                 flip_filters=False)
    T_net['conv4_2'] = ConvLayer(T_net['conv4_1'],
                                 512,
                                 3,
                                 pad=1,
                                 flip_filters=False)
    T_net['conv4_3'] = ConvLayer(T_net['conv4_2'],
                                 512,
                                 3,
                                 pad=1,
                                 flip_filters=False)
    T_net['conv4_4'] = ConvLayer(T_net['conv4_3'],
                                 512,
                                 3,
                                 pad=1,
                                 flip_filters=False)

    T_net['conv4_map'] = ElemwiseMergeLayer(
        [T_net['conv4_1'], T_net['map28x512']], merge_function=T.mul)
    T_net['conv4_all'] = ElemwiseSumLayer(
        [T_net['conv4_4'], T_net['conv4_map']])

    T_net['pool4'] = PoolLayer(T_net['conv4_all'], 2)
    T_net['conv5_1'] = ConvLayer(T_net['pool4'],
                                 512,
                                 3,
                                 pad=1,
                                 flip_filters=False)
    T_net['conv5_2'] = ConvLayer(T_net['conv5_1'],
                                 512,
                                 3,
                                 pad=1,
                                 flip_filters=False)
    T_net['conv5_3'] = ConvLayer(T_net['conv5_2'],
                                 512,
                                 3,
                                 pad=1,
                                 flip_filters=False)
    T_net['conv5_4'] = ConvLayer(T_net['conv5_3'],
                                 512,
                                 3,
                                 pad=1,
                                 flip_filters=False)
    T_net['pool5'] = PoolLayer(T_net['conv5_4'], 2)
    T_net['fc6'] = DenseLayer(T_net['pool5'], num_units=4096)
    T_net['fc6_dropout'] = DropoutLayer(T_net['fc6'], p=0.)
    T_net['fc7'] = DenseLayer(T_net['fc6_dropout'], num_units=4096)
    T_net['fc7_dropout'] = DropoutLayer(T_net['fc7'], p=0.5)
    T_net['fc8'] = DenseLayer(T_net['fc7_dropout'],
                              num_units=1000,
                              nonlinearity=None)
    T_net['prob'] = NonlinearityLayer(T_net['fc8'], softmax)

    # T_net['pos_fc_layer']=DenseLayer(T_net['fc6_dropout'],num_units=2048)
    # T_net['pos_drop']=DropoutLayer(T_net['pos_fc_layer'],p=0.)
    # T_net['pred_pos_layer']=DenseLayer(T_net['pos_drop'],num_units=40,nonlinearity=sigmoid)

    #AU detection part
    T_net['au_fc_layer'] = DenseLayer(T_net['fc6_dropout'], num_units=2048)
    T_net['au_drop'] = DropoutLayer(T_net['au_fc_layer'], p=0.)
    T_net['output_layer'] = DenseLayer(T_net['au_drop'],
                                       num_units=12,
                                       nonlinearity=sigmoid)
    # T_net['final']=concat([T_net['pred_pos_layer'],T_net['output_layer']])

    return T_net
예제 #11
0
    def fused_convnets(self,
                       fusion_level,
                       fusion_type,
                       input_var1=None,
                       input_var2=None,
                       bottleneck_W=None,
                       weights_dir=None):

        net = OrderedDict()
        net['input_rgb'] = InputLayer((None, 4, 128, 128),
                                      input_var=input_var1)
        layer = 0
        for i in range(fusion_level):
            # Add convolution layers
            net['conv_rgb{0:d}'.format(i + 1)] = Conv2DLayer(
                net.values()[layer],
                num_filters=self._net_specs_dict['num_conv_filters'][i],
                filter_size=(self._net_specs_dict['conv_filter_size'][i], ) *
                2,
                pad='same')
            layer += 1
            if self._net_specs_dict['num_conv_layers'] <= 2 and\
                    i != fusion_level - 1:
                # Add pooling layers
                net['pool_rgb{0:d}'.format(i + 1)] = MaxPool2DLayer(
                    net.values()[layer], pool_size=(3, 3))
                layer += 1
            else:
                if i < 4:
                    if (i + 1) % 2 == 0 and i != fusion_level - 1:
                        # Add pooling layers
                        net['pool_rgb{0:d}'.format(i + 1)] = MaxPool2DLayer(
                            net.values()[layer], pool_size=(3, 3))
                        layer += 1
                else:
                    if (i + 1) == 7 and i != fusion_level - 1:
                        # Add pooling layers
                        net['pool_rgb{0:d}'.format(i + 1)] = MaxPool2DLayer(
                            net.values()[layer], pool_size=(3, 3))
                        layer += 1

        net['input_depth'] = InputLayer((None, 1, 128, 128),
                                        input_var=input_var2)
        layer += 1
        for i in range(fusion_level):
            # Add convolution layers
            net['conv_depth{0:d}'.format(i + 1)] = Conv2DLayer(
                net.values()[layer],
                num_filters=self._net_specs_dict['num_conv_filters'][i],
                filter_size=(self._net_specs_dict['conv_filter_size'][i], ) *
                2,
                pad='same')
            layer += 1
            if self._net_specs_dict['num_conv_layers'] <= 2 and\
                    i != fusion_level - 1:
                # Add pooling layers
                net['pool_depth{0:d}'.format(i + 1)] = MaxPool2DLayer(
                    net.values()[layer], pool_size=(3, 3))
                layer += 1
            else:
                if i < 4:
                    if (i + 1) % 2 == 0 and i != fusion_level - 1:
                        # Add pooling layers
                        net['pool_depth{0:d}'.format(i+1)] =\
                            MaxPool2DLayer(net.values()[layer],
                                           pool_size=(3, 3))
                        layer += 1
                else:
                    if (i + 1) == 7 and i != fusion_level - 1:
                        # Add pooling layers
                        net['pool_depth{0:d}'.format(i+1)] =\
                            MaxPool2DLayer(net.values()[layer],
                                           pool_size=(3, 3))
                        layer += 1
        # Fuse ConvNets by fusion_level and fusion_type
        if fusion_type == self.MAX:
            net['merge'] =\
                ElemwiseMergeLayer([net['conv_rgb{0:d}'.format(fusion_level)],
                                    net['conv_depth{0:d}'.format(fusion_level)]
                                    ], T.maximum)
            layer += 1
        elif fusion_type == self.SUM:
            net['merge'] =\
                ElemwiseMergeLayer([net['conv_rgb{0:d}'.format(fusion_level)],
                                    net['conv_depth{0:d}'.format(fusion_level)]
                                    ], T.add)
            layer += 1
        elif fusion_type == self.CONCAT:
            net['merge'] = concat([
                net['conv_rgb{0:d}'.format(fusion_level)],
                net['conv_depth{0:d}'.format(fusion_level)]
            ])
            layer += 1
        elif fusion_type == self.CONCATCONV:
            net['concat'] = concat([
                net['conv_rgb{0:d}'.format(fusion_level)],
                net['conv_depth{0:d}'.format(fusion_level)]
            ])
            layer += 1
            net['merge'] = Conv2DLayer(
                net['concat'],
                num_filters=self._net_specs_dict['num_conv_filters'][
                    fusion_level - 1],
                filter_size=(1, 1),
                nonlinearity=None)
            layer += 1
        # Max-pooling to the merged
        if fusion_level in [2, 4, 7]:
            net['pool_merged'] = MaxPool2DLayer(net['merge'], pool_size=(3, 3))
            layer += 1
        # Continue the rest of the convolutional part of the network,
        # if the fusion took place before the last convolutional layer,
        # else just connect the convolutional part with the fully connected
        # part
        if self._net_specs_dict['num_conv_layers'] > fusion_level:
            for i in range(fusion_level,
                           self._net_specs_dict['num_conv_layers']):
                # Add convolution layers
                net['conv_merged{0:d}'.format(i + 1)] = Conv2DLayer(
                    net.values()[layer],
                    num_filters=self._net_specs_dict['num_conv_filters'][i],
                    filter_size=(self._net_specs_dict['conv_filter_size'][i], )
                    * 2,
                    pad='same')
                layer += 1
                if self._net_specs_dict['num_conv_layers'] <= 2:
                    # Add pooling layers
                    net['pool_merged{0:d}'.format(i + 1)] = MaxPool2DLayer(
                        net.values()[layer], pool_size=(3, 3))
                    layer += 1
                else:
                    if i < 4:
                        if (i + 1) % 2 == 0:
                            # Add pooling layers
                            net['pool_merged{0:d}'.format(i+1)] =\
                                MaxPool2DLayer(net.values()[layer],
                                               pool_size=(3, 3))
                            layer += 1
                    else:
                        if (i + 1) == 7:
                            # Add pooling layers
                            net['pool_merged{0:d}'.format(i+1)] =\
                                MaxPool2DLayer(net.values()[layer],
                                               pool_size=(3, 3))
                            layer += 1
        # Fc-layers
        net['fc1'] = DenseLayer(net.values()[layer],
                                self._net_specs_dict['num_fc_units'][0])
        # Add dropout layer
        net['dropout1'] = dropout(net['fc1'], p=self._model_hp_dict['p'])
        net['fc2'] = DenseLayer(net['dropout1'],
                                self._net_specs_dict['num_fc_units'][1])
        # Add dropout layer
        net['dropout2'] = dropout(net['fc2'], p=self._model_hp_dict['p'])
        if bottleneck_W is not None:
            # Add bottleneck layer
            net['bottleneck'] = DenseLayer(net['dropout2'], 30)
            # Add output layer(linear activation because it's regression)
            net['output'] = DenseLayer(
                net['bottleneck'],
                3 * self._num_joints,
                W=bottleneck_W[0:30],
                nonlinearity=lasagne.nonlinearities.tanh)
        else:
            # Add output layer(linear activation because it's regression)
            net['output'] = DenseLayer(
                net['dropout2'],
                3 * self._num_joints,
                nonlinearity=lasagne.nonlinearities.tanh)
        if weights_dir is not None:
            lw = LoadWeights(weights_dir, net)
            lw.load_weights_numpy()
        return net
예제 #12
0
    def dense_fused_convnets(self,
                             fusion_level,
                             fusion_type,
                             input_var1=None,
                             input_var2=None,
                             bottleneck_W=None,
                             weights_dir=None):

        net = OrderedDict()
        net['input_rgb'] = InputLayer((None, 4, 128, 128),
                                      input_var=input_var1)
        layer = 0
        for i in range(self._net_specs_dict['num_conv_layers']):
            # Add convolution layers
            net['conv_rgb{0:d}'.format(i + 1)] = Conv2DLayer(
                net.values()[layer],
                num_filters=self._net_specs_dict['num_conv_filters'][i],
                filter_size=(self._net_specs_dict['conv_filter_size'][i], ) *
                2,
                pad='same')
            layer += 1
            if self._net_specs_dict['num_conv_layers'] <= 2:
                # Add pooling layers
                net['pool_rgb{0:d}'.format(i + 1)] = MaxPool2DLayer(
                    net.values()[layer], pool_size=(3, 3))
                layer += 1
            else:
                if i < 4:
                    if (i + 1) % 2 == 0:
                        # Add pooling layers
                        net['pool_rgb{0:d}'.format(i + 1)] = MaxPool2DLayer(
                            net.values()[layer], pool_size=(3, 3))
                        layer += 1
                else:
                    if (i + 1) == 7:
                        # Add pooling layers
                        net['pool_rgb{0:d}'.format(i + 1)] = MaxPool2DLayer(
                            net.values()[layer], pool_size=(3, 3))
                        layer += 1
        # Fc-layers
        net['fc1_rgb'] = DenseLayer(net.values()[layer],
                                    self._net_specs_dict['num_fc_units'][0])
        layer += 1
        if fusion_level == 2:
            # Add dropout layer
            net['dropout1_rgb'] = dropout(net['fc1_rgb'],
                                          p=self._model_hp_dict['p'])
            layer += 1
            net['fc2_rgb'] = DenseLayer(
                net['dropout1_rgb'], self._net_specs_dict['num_fc_units'][1])
            layer += 1

        net['input_depth'] = InputLayer((None, 1, 128, 128),
                                        input_var=input_var2)
        layer += 1
        for i in range(self._net_specs_dict['num_conv_layers']):
            # Add convolution layers
            net['conv_depth{0:d}'.format(i + 1)] = Conv2DLayer(
                net.values()[layer],
                num_filters=self._net_specs_dict['num_conv_filters'][i],
                filter_size=(self._net_specs_dict['conv_filter_size'][i], ) *
                2,
                pad='same')
            layer += 1
            if self._net_specs_dict['num_conv_layers'] <= 2:
                # Add pooling layers
                net['pool_depth{0:d}'.format(i + 1)] = MaxPool2DLayer(
                    net.values()[layer], pool_size=(3, 3))
                layer += 1
            else:
                if i < 4:
                    if (i + 1) % 2 == 0:
                        # Add pooling layers
                        net['pool_depth{0:d}'.format(i+1)] =\
                            MaxPool2DLayer(net.values()[layer],
                                           pool_size=(3, 3))
                        layer += 1
                else:
                    if (i + 1) == 7:
                        # Add pooling layers
                        net['pool_depth{0:d}'.format(i+1)] =\
                            MaxPool2DLayer(net.values()[layer],
                                           pool_size=(3, 3))
                        layer += 1
        # Fc-layers
        net['fc1_depth'] = DenseLayer(net.values()[layer],
                                      self._net_specs_dict['num_fc_units'][0])
        layer += 1
        if fusion_level == 2:
            # Add dropout layer
            net['dropout1_depth'] = dropout(net['fc1_depth'],
                                            p=self._model_hp_dict['p'])
            layer += 1
            net['fc2_depth'] = DenseLayer(
                net['dropout1_depth'], self._net_specs_dict['num_fc_units'][1])
            layer += 1

        # Fuse ConvNets by fusion_level and fusion_type
        if fusion_type == self.MAX:
            net['merge'] =\
                ElemwiseMergeLayer([net['fc%i_rgb' % fusion_level],
                                    net['fc%i_depth' % fusion_level]],
                                   T.maximum)
            layer += 1
        elif fusion_type == self.SUM:
            net['merge'] =\
                ElemwiseMergeLayer([net['fc%i_rgb' % fusion_level],
                                    net['fc%i_depth' % fusion_level]],
                                   T.add)
            layer += 1
        elif fusion_type == self.CONCAT:
            net['merge'] = concat([
                net['fc%i_rgb' % fusion_level],
                net['fc%i_depth' % fusion_level]
            ])
            layer += 1
        elif fusion_type == self.CONCATCONV:
            net['fc%i_rgb_res' % fusion_level] =\
                reshape(net['fc%i_rgb' % fusion_level], ([0], 1, [1]))
            layer += 1
            net['fc%i_depth_res' % fusion_level] =\
                reshape(net['fc%i_depth' % fusion_level], ([0], 1, [1]))
            layer += 1
            net['concat'] = concat([
                net['fc%i_rgb_res' % fusion_level],
                net['fc%i_depth_res' % fusion_level]
            ])
            layer += 1
            net['merge_con'] = Conv1DLayer(net['concat'],
                                           num_filters=1,
                                           filter_size=(1, ),
                                           nonlinearity=None)
            layer += 1
            net['merge'] = reshape(net['merge_con'], ([0], [2]))
            layer += 1

        if fusion_level == 1:
            # Add dropout layer
            net['dropout1'] = dropout(net['merge'], p=self._model_hp_dict['p'])
            layer += 1
            net['fc2'] = DenseLayer(net['dropout1'],
                                    self._net_specs_dict['num_fc_units'][1])
            layer += 1
            # Add dropout layer
            net['dropout2'] = dropout(net['fc2'], p=self._model_hp_dict['p'])
            layer += 1
        else:
            # Add dropout layer
            net['dropout2'] = dropout(net['merge'], p=self._model_hp_dict['p'])
            layer += 1
        # Add output layer(linear activation because it's regression)
        if bottleneck_W is not None:
            # Add bottleneck layer
            net['bottleneck'] = DenseLayer(net['dropout2'], 30)
            # Add output layer(linear activation because it's regression)
            net['output'] = DenseLayer(
                net['bottleneck'],
                3 * self._num_joints,
                W=bottleneck_W[0:30],
                nonlinearity=lasagne.nonlinearities.tanh)
        else:
            # Add output layer(linear activation because it's regression)
            net['output'] = DenseLayer(
                net['dropout2'],
                3 * self._num_joints,
                nonlinearity=lasagne.nonlinearities.tanh)
        if weights_dir is not None:
            lw = LoadWeights(weights_dir, net)
            lw.load_weights_numpy()
        return net
예제 #13
0
    def __init__(self,
                 input_shape=(None, 3, None, None),
                 n_filters=48,
                 n_pool=4,
                 n_layers_per_block=5,
                 dropout_p=0.2):
        """
        This code implements the Fully Convolutional DenseNet described in https://arxiv.org/abs/1611.09326
        The network consist of a downsampling path, where dense blocks and transition down are applied, followed
        by an upsampling path where transition up and dense blocks are applied.
        Skip connections are used between the downsampling path and the upsampling path
        Each layer is a composite function of BN - ReLU - Conv and the last layer is a softmax layer.

        :param input_shape: shape of the input batch. Only the first dimension (n_channels) is needed
        :param n_classes: number of classes
        :param n_filters_first_conv: number of filters for the first convolution applied
        :param n_pool: number of pooling layers = number of transition down = number of transition up
        :param growth_rate: number of new feature maps created by each layer in a dense block
        :param n_layers_per_block: number of layers per block. Can be an int or a list of size 2 * n_pool + 1
        :param dropout_p: dropout rate applied after each convolution (0. for not using)
        """

        if type(n_layers_per_block) == list:
            assert (len(n_layers_per_block) == 2 * n_pool + 1)
        elif type(n_layers_per_block) == int:
            n_layers_per_block = [n_layers_per_block] * (2 * n_pool + 1)
        else:
            raise ValueError

        # Theano variables
        self.input_var = T.tensor4('input_var', dtype='float32')  # input image
        self.target_var = T.tensor4('target_var', dtype='float32')  # target

        #####################
        # First Convolution #
        #####################

        inputs = InputLayer(input_shape, self.input_var)

        # We perform a first convolution. All the features maps will be stored in the tensor called stack (the Tiramisu)
        stack = Conv2DLayer(inputs,
                            n_filters[0],
                            filter_size=1,
                            pad='same',
                            W=HeUniform(gain='relu'),
                            nonlinearity=linear,
                            flip_filters=False)

        #####################
        # Downsampling path #
        #####################

        skip_connection_list = []

        for i in range(n_pool):
            # Dense Block
            for j in range(n_layers_per_block[i]):
                # Compute new feature maps
                l = BN_ReLU_Conv(stack, n_filters[i], dropout_p=dropout_p)
                # add new outputs
                stack = ElemwiseMergeLayer([stack, l], T.add)
            # At the end of the block, the current stack is stored in the skip_connections list
            skip_connection_list.append(stack)

            # Transition Down
            stack = TransitionDown(stack, n_filters[i + 1], dropout_p)

        skip_connection_list = skip_connection_list[::-1]

        #####################
        #     Bottleneck    #
        #####################

        # We store now the output of the next dense block in a list. We will only upsample these new feature maps
        block_to_upsample = []

        # Dense Block
        for j in range(n_layers_per_block[n_pool]):
            l = BN_ReLU_Conv(stack, n_filters[n_pool], dropout_p=dropout_p)
            stack = ElemwiseMergeLayer([stack, l], T.add)

        #######################
        #   Upsampling path   #
        #######################

        for i in range(n_pool):
            # Transition Up ( Upsampling + concatenation with the skip connection)
            stack = TransitionUpRes(skip_connection_list[i],
                                    stack,
                                    n_filters[n_pool + i + 1],
                                    dropout_p=dropout_p)

            # Dense Block
            block_to_upsample = []
            for j in range(n_layers_per_block[n_pool + i + 1]):
                l = BN_ReLU_Conv(stack,
                                 n_filters[n_pool + i + 1],
                                 dropout_p=dropout_p)
                stack = ElemwiseMergeLayer([stack, l], T.add)

        #####################
        #      Sigmoid      #
        #####################

        self.output_layer = SpatialSoftmaxLayer(stack)
예제 #14
0
	set4 = get_multiple_block(set3,num_filt=128, pooling_size=(8,8))

	# Dense Layers follow.
	h_flat = flatten(set4)

	## 5 Way Max-Out Layer (DenseMaxout)

	'''
	Reference - https://github.com/fchollet/keras/pull/3128
	'''
	
	h_dense = []
	for _ in xrange(5):
		h_dense.append( DenseLayer(h_flat,500,W = lasagne.init.GlorotUniform(), nonlinearity = lasagne.nonlinearities.linear))
	
	h17 = ElemwiseMergeLayer( h_dense, merge_function=T.maximum())

	h17 = BatchNormLayer(h17)
	h17_drop = dropout(h17,0.2)

	# Softmax Layer
	network = DenseLayer(h17_drop,nb_classes, nonlinearity = lasagne.nonlinearities.softmax, W = lasagne.init.GlorotUniform())

	net_output = lasagne.layers.get_output(network)
	true_output = T.matrix()

	all_params = lasagne.layers.get_all_params(network,trainable=True)
	loss = T.mean(lasagne.objectives.categorical_crossentropy(net_output,true_output))
	updates = lasagne.updates.adam(loss,all_params)

	train = theano.function(inputs= [l_in.input_var,true_output] , outputs=[net_output,loss], updates = updates)
예제 #15
0
파일: kdnet.py 프로젝트: Alobal123/diplomka
    def __init__(self, config):
        self.clouds = T.tensor3(dtype='float32')
        self.norms = [
            T.tensor3(dtype='float32') for step in xrange(config['steps'])
        ]
        self.target = T.vector(dtype='int64')
        KDNet = {}
        if config['input_features'] == 'no':
            KDNet['input'] = InputLayer((None, 1, 2**config['steps']),
                                        input_var=self.clouds)
        else:
            KDNet['input'] = InputLayer((None, 3, 2**config['steps']),
                                        input_var=self.clouds)
        for i in xrange(config['steps']):
            KDNet['norm{}_r'.format(i + 1)] = InputLayer(
                (None, 3, 2**(config['steps'] - 1 - i)),
                input_var=self.norms[i])
            KDNet['norm{}_l'.format(i + 1)] = ExpressionLayer(
                KDNet['norm{}_r'.format(i + 1)], lambda X: -X)
            KDNet['norm{}_l_X-'.format(i + 1)] = SPTNormReshapeLayer(
                KDNet['norm{}_l'.format(i + 1)], '-', 0, config['n_f'][i + 1])
            KDNet['norm{}_l_Y-'.format(i + 1)] = SPTNormReshapeLayer(
                KDNet['norm{}_l'.format(i + 1)], '-', 1, config['n_f'][i + 1])
            KDNet['norm{}_l_Z-'.format(i + 1)] = SPTNormReshapeLayer(
                KDNet['norm{}_l'.format(i + 1)], '-', 2, config['n_f'][i + 1])
            KDNet['norm{}_l_X+'.format(i + 1)] = SPTNormReshapeLayer(
                KDNet['norm{}_l'.format(i + 1)], '+', 0, config['n_f'][i + 1])
            KDNet['norm{}_l_Y+'.format(i + 1)] = SPTNormReshapeLayer(
                KDNet['norm{}_l'.format(i + 1)], '+', 1, config['n_f'][i + 1])
            KDNet['norm{}_l_Z+'.format(i + 1)] = SPTNormReshapeLayer(
                KDNet['norm{}_l'.format(i + 1)], '+', 2, config['n_f'][i + 1])
            KDNet['norm{}_r_X-'.format(i + 1)] = SPTNormReshapeLayer(
                KDNet['norm{}_r'.format(i + 1)], '-', 0, config['n_f'][i + 1])
            KDNet['norm{}_r_Y-'.format(i + 1)] = SPTNormReshapeLayer(
                KDNet['norm{}_r'.format(i + 1)], '-', 1, config['n_f'][i + 1])
            KDNet['norm{}_r_Z-'.format(i + 1)] = SPTNormReshapeLayer(
                KDNet['norm{}_r'.format(i + 1)], '-', 2, config['n_f'][i + 1])
            KDNet['norm{}_r_X+'.format(i + 1)] = SPTNormReshapeLayer(
                KDNet['norm{}_r'.format(i + 1)], '+', 0, config['n_f'][i + 1])
            KDNet['norm{}_r_Y+'.format(i + 1)] = SPTNormReshapeLayer(
                KDNet['norm{}_r'.format(i + 1)], '+', 1, config['n_f'][i + 1])
            KDNet['norm{}_r_Z+'.format(i + 1)] = SPTNormReshapeLayer(
                KDNet['norm{}_r'.format(i + 1)], '+', 2, config['n_f'][i + 1])
            KDNet['cloud{}'.format(i+1)] = SharedDotLayer(KDNet['input'], config['n_f'][i]) if i == 0 else \
                                    ElemwiseSumLayer([KDNet['cloud{}_l_X-_masked'.format(i)],
                                                     KDNet['cloud{}_l_Y-_masked'.format(i)],
                                                     KDNet['cloud{}_l_Z-_masked'.format(i)],
                                                     KDNet['cloud{}_l_X+_masked'.format(i)],
                                                     KDNet['cloud{}_l_Y+_masked'.format(i)],
                                                     KDNet['cloud{}_l_Z+_masked'.format(i)],
                                                     KDNet['cloud{}_r_X-_masked'.format(i)],
                                                     KDNet['cloud{}_r_Y-_masked'.format(i)],
                                                     KDNet['cloud{}_r_Z-_masked'.format(i)],
                                                     KDNet['cloud{}_r_X+_masked'.format(i)],
                                                     KDNet['cloud{}_r_Y+_masked'.format(i)],
                                                     KDNet['cloud{}_r_Z+_masked'.format(i)]])
            KDNet['cloud{}_bn'.format(i + 1)] = BatchNormDNNLayer(
                KDNet['cloud{}'.format(i + 1)])
            KDNet['cloud{}_relu'.format(i + 1)] = NonlinearityLayer(
                KDNet['cloud{}_bn'.format(i + 1)], rectify)
            KDNet['cloud{}_r'.format(i + 1)] = ExpressionLayer(
                KDNet['cloud{}_relu'.format(i + 1)], lambda X: X[:, :, 1::2],
                (None, config['n_f'][i], 2**(config['steps'] - i - 1)))
            KDNet['cloud{}_l'.format(i + 1)] = ExpressionLayer(
                KDNet['cloud{}_relu'.format(i + 1)], lambda X: X[:, :, ::2],
                (None, config['n_f'][i], 2**(config['steps'] - i - 1)))

            KDNet['cloud{}_l_X-'.format(i + 1)] = SharedDotLayer(
                KDNet['cloud{}_l'.format(i + 1)], config['n_f'][i + 1])
            KDNet['cloud{}_l_Y-'.format(i + 1)] = SharedDotLayer(
                KDNet['cloud{}_l'.format(i + 1)], config['n_f'][i + 1])
            KDNet['cloud{}_l_Z-'.format(i + 1)] = SharedDotLayer(
                KDNet['cloud{}_l'.format(i + 1)], config['n_f'][i + 1])
            KDNet['cloud{}_l_X+'.format(i + 1)] = SharedDotLayer(
                KDNet['cloud{}_l'.format(i + 1)], config['n_f'][i + 1])
            KDNet['cloud{}_l_Y+'.format(i + 1)] = SharedDotLayer(
                KDNet['cloud{}_l'.format(i + 1)], config['n_f'][i + 1])
            KDNet['cloud{}_l_Z+'.format(i + 1)] = SharedDotLayer(
                KDNet['cloud{}_l'.format(i + 1)], config['n_f'][i + 1])

            KDNet['cloud{}_r_X-'.format(i + 1)] = SharedDotLayer(
                KDNet['cloud{}_r'.format(i + 1)],
                config['n_f'][i + 1],
                W=KDNet['cloud{}_l_X-'.format(i + 1)].W,
                b=KDNet['cloud{}_l_X-'.format(i + 1)].b)
            KDNet['cloud{}_r_X-'.format(i + 1)] = SharedDotLayer(
                KDNet['cloud{}_r'.format(i + 1)],
                config['n_f'][i + 1],
                W=KDNet['cloud{}_l_X-'.format(i + 1)].W,
                b=KDNet['cloud{}_l_X-'.format(i + 1)].b)
            KDNet['cloud{}_r_Y-'.format(i + 1)] = SharedDotLayer(
                KDNet['cloud{}_r'.format(i + 1)],
                config['n_f'][i + 1],
                W=KDNet['cloud{}_l_Y-'.format(i + 1)].W,
                b=KDNet['cloud{}_l_Y-'.format(i + 1)].b)
            KDNet['cloud{}_r_Z-'.format(i + 1)] = SharedDotLayer(
                KDNet['cloud{}_r'.format(i + 1)],
                config['n_f'][i + 1],
                W=KDNet['cloud{}_l_Z-'.format(i + 1)].W,
                b=KDNet['cloud{}_l_Z-'.format(i + 1)].b)
            KDNet['cloud{}_r_X+'.format(i + 1)] = SharedDotLayer(
                KDNet['cloud{}_r'.format(i + 1)],
                config['n_f'][i + 1],
                W=KDNet['cloud{}_l_X+'.format(i + 1)].W,
                b=KDNet['cloud{}_l_X+'.format(i + 1)].b)
            KDNet['cloud{}_r_Y+'.format(i + 1)] = SharedDotLayer(
                KDNet['cloud{}_r'.format(i + 1)],
                config['n_f'][i + 1],
                W=KDNet['cloud{}_l_Y+'.format(i + 1)].W,
                b=KDNet['cloud{}_l_Y+'.format(i + 1)].b)
            KDNet['cloud{}_r_Z+'.format(i + 1)] = SharedDotLayer(
                KDNet['cloud{}_r'.format(i + 1)],
                config['n_f'][i + 1],
                W=KDNet['cloud{}_l_Z+'.format(i + 1)].W,
                b=KDNet['cloud{}_l_Z+'.format(i + 1)].b)

            KDNet['cloud{}_l_X-_masked'.format(i + 1)] = ElemwiseMergeLayer([
                KDNet['cloud{}_l_X-'.format(i + 1)],
                KDNet['norm{}_l_X-'.format(i + 1)]
            ], T.mul)
            KDNet['cloud{}_l_Y-_masked'.format(i + 1)] = ElemwiseMergeLayer([
                KDNet['cloud{}_l_Y-'.format(i + 1)],
                KDNet['norm{}_l_Y-'.format(i + 1)]
            ], T.mul)
            KDNet['cloud{}_l_Z-_masked'.format(i + 1)] = ElemwiseMergeLayer([
                KDNet['cloud{}_l_Z-'.format(i + 1)],
                KDNet['norm{}_l_Z-'.format(i + 1)]
            ], T.mul)
            KDNet['cloud{}_l_X+_masked'.format(i + 1)] = ElemwiseMergeLayer([
                KDNet['cloud{}_l_X+'.format(i + 1)],
                KDNet['norm{}_l_X+'.format(i + 1)]
            ], T.mul)
            KDNet['cloud{}_l_Y+_masked'.format(i + 1)] = ElemwiseMergeLayer([
                KDNet['cloud{}_l_Y+'.format(i + 1)],
                KDNet['norm{}_l_Y+'.format(i + 1)]
            ], T.mul)
            KDNet['cloud{}_l_Z+_masked'.format(i + 1)] = ElemwiseMergeLayer([
                KDNet['cloud{}_l_Z+'.format(i + 1)],
                KDNet['norm{}_l_Z+'.format(i + 1)]
            ], T.mul)
            KDNet['cloud{}_r_X-_masked'.format(i + 1)] = ElemwiseMergeLayer([
                KDNet['cloud{}_r_X-'.format(i + 1)],
                KDNet['norm{}_r_X-'.format(i + 1)]
            ], T.mul)
            KDNet['cloud{}_r_Y-_masked'.format(i + 1)] = ElemwiseMergeLayer([
                KDNet['cloud{}_r_Y-'.format(i + 1)],
                KDNet['norm{}_r_Y-'.format(i + 1)]
            ], T.mul)
            KDNet['cloud{}_r_Z-_masked'.format(i + 1)] = ElemwiseMergeLayer([
                KDNet['cloud{}_r_Z-'.format(i + 1)],
                KDNet['norm{}_r_Z-'.format(i + 1)]
            ], T.mul)
            KDNet['cloud{}_r_X+_masked'.format(i + 1)] = ElemwiseMergeLayer([
                KDNet['cloud{}_r_X+'.format(i + 1)],
                KDNet['norm{}_r_X+'.format(i + 1)]
            ], T.mul)
            KDNet['cloud{}_r_Y+_masked'.format(i + 1)] = ElemwiseMergeLayer([
                KDNet['cloud{}_r_Y+'.format(i + 1)],
                KDNet['norm{}_r_Y+'.format(i + 1)]
            ], T.mul)
            KDNet['cloud{}_r_Z+_masked'.format(i + 1)] = ElemwiseMergeLayer([
                KDNet['cloud{}_r_Z+'.format(i + 1)],
                KDNet['norm{}_r_Z+'.format(i + 1)]
            ], T.mul)

        KDNet['cloud_fin'] = ElemwiseSumLayer([
            KDNet['cloud{}_l_X-_masked'.format(config['steps'])],
            KDNet['cloud{}_l_Y-_masked'.format(config['steps'])],
            KDNet['cloud{}_l_Z-_masked'.format(config['steps'])],
            KDNet['cloud{}_l_X+_masked'.format(config['steps'])],
            KDNet['cloud{}_l_Y+_masked'.format(config['steps'])],
            KDNet['cloud{}_l_Z+_masked'.format(config['steps'])],
            KDNet['cloud{}_r_X-_masked'.format(config['steps'])],
            KDNet['cloud{}_r_Y-_masked'.format(config['steps'])],
            KDNet['cloud{}_r_Z-_masked'.format(config['steps'])],
            KDNet['cloud{}_r_X+_masked'.format(config['steps'])],
            KDNet['cloud{}_r_Y+_masked'.format(config['steps'])],
            KDNet['cloud{}_r_Z+_masked'.format(config['steps'])]
        ])

        KDNet['cloud_fin_bn'] = BatchNormDNNLayer(KDNet['cloud_fin'])
        KDNet['cloud_fin_relu'] = NonlinearityLayer(KDNet['cloud_fin_bn'],
                                                    rectify)
        KDNet['cloud_fin_reshape'] = ReshapeLayer(KDNet['cloud_fin_relu'],
                                                  (-1, config['n_f'][-1]))
        KDNet['output'] = DenseLayer(KDNet['cloud_fin_reshape'],
                                     config['num_classes'],
                                     nonlinearity=softmax)

        prob = get_output(KDNet['output'])
        prob_det = get_output(KDNet['output'], deterministic=True)

        weights = get_all_params(KDNet['output'], trainable=True)
        l2_pen = regularize_network_params(KDNet['output'], l2)

        loss = categorical_crossentropy(
            prob, self.target).mean() + config['l2'] * l2_pen
        accuracy = categorical_accuracy(prob, self.target).mean()

        lr = theano.shared(np.float32(config['learning_rate']))
        updates = adam(loss, weights, learning_rate=lr)

        self.train_fun = theano.function([self.clouds] + self.norms +
                                         [self.target], [loss, accuracy],
                                         updates=updates)
        self.prob_fun = theano.function([self.clouds] + self.norms +
                                        [self.target], [loss, prob_det])

        self.KDNet = KDNet
예제 #16
0
def build_model():
    net = {}
    net['input'] = InputLayer((None, 4, 224, 224))

    #slice the input to get image and feat map part
    net['input_map']=SliceLayer(net['input'],indices=slice(3,4),axis=1)
    net['map112']=PoolLayer(net['input_map'],2)
    net['map56']=PoolLayer(net['map112'],2)
    net['map28']=PoolLayer(net['map56'],2)
    net_buff56=[net['map56'] for i in range(256)]
    net['map56x256']=concat(net_buff56)
    net_buff28=[net['map28'] for i in range(512)]
    net['map28x512']=concat(net_buff28)





    net['input_im']=SliceLayer(net['input'],indices=slice(0,3),axis=1)
    net['conv1_1'] = ConvLayer(
        net['input_im'], 64, 3, pad=1, flip_filters=False,trainable=False)
    net['conv1_2'] = ConvLayer(
        net['conv1_1'], 64, 3, pad=1, flip_filters=False,trainable=False)
    net['pool1'] = PoolLayer(net['conv1_2'], 2)
    net['conv2_1'] = ConvLayer(
        net['pool1'], 128, 3, pad=1, flip_filters=False,trainable=False)
    net['conv2_2'] = ConvLayer(
        net['conv2_1'], 128, 3, pad=1, flip_filters=False,trainable=False)
    net['pool2'] = PoolLayer(net['conv2_2'], 2)
    net['conv3_1'] = ConvLayer(
        net['pool2'], 256, 3, pad=1, flip_filters=False)
    net['conv3_2'] = ConvLayer(
        net['conv3_1'], 256, 3, pad=1, flip_filters=False)
    net['conv3_3'] = ConvLayer(
        net['conv3_2'], 256, 3, pad=1, flip_filters=False)
    net['conv3_4'] = ConvLayer(
        net['conv3_3'], 256, 3, pad=1, flip_filters=False)

    net['conv3_map']=ElemwiseMergeLayer([net['conv3_1'],net['map56x256']],merge_function=T.mul)
    net['conv3_all']=ElemwiseSumLayer([net['conv3_4'],net['conv3_map']])

    net['pool3'] = PoolLayer(net['conv3_all'], 2)
    net['conv4_1'] = ConvLayer(
        net['pool3'], 512, 3, pad=1, flip_filters=False)
    net['conv4_2'] = ConvLayer(
        net['conv4_1'], 512, 3, pad=1, flip_filters=False)
    net['conv4_3'] = ConvLayer(
        net['conv4_2'], 512, 3, pad=1, flip_filters=False)
    net['conv4_4'] = ConvLayer(
        net['conv4_3'], 512, 3, pad=1, flip_filters=False)

    net['conv4_map']=ElemwiseMergeLayer([net['conv4_1'],net['map28x512']],merge_function=T.mul)
    net['conv4_all']=ElemwiseSumLayer([net['conv4_4'],net['conv4_map']])

    net['pool4'] = PoolLayer(net['conv4_all'], 2)
    net['conv5_1'] = ConvLayer(
        net['pool4'], 512, 3, pad=1, flip_filters=False)
    net['conv5_2'] = ConvLayer(
        net['conv5_1'], 512, 3, pad=1, flip_filters=False)
    net['conv5_3'] = ConvLayer(
        net['conv5_2'], 512, 3, pad=1, flip_filters=False)
    net['conv5_4'] = ConvLayer(
        net['conv5_3'], 512, 3, pad=1, flip_filters=False)
    net['pool5'] = PoolLayer(net['conv5_4'], 2)
    net['fc6'] = DenseLayer(net['pool5'], num_units=4096)
    net['fc6_dropout'] = DropoutLayer(net['fc6'], p=0.5)
    net['fc7'] = DenseLayer(net['fc6_dropout'], num_units=4096)
    net['fc7_dropout'] = DropoutLayer(net['fc7'], p=0.5)
    net['fc8'] = DenseLayer(net['fc7_dropout'], num_units=1000, nonlinearity=None)
    net['prob'] = NonlinearityLayer(net['fc8'], softmax)


    return net
예제 #17
0
def buildFCN8(nb_in_channels,
              input_var,
              path_weights='/Tmp/romerosa/itinf/models/' +
              'camvid/fcn8_model.npz',
              n_classes=21,
              load_weights=False,
              void_labels=[],
              trainable=True,
              layer=['probs_dimshuffle'],
              pascal=False,
              temperature=1.0):
    '''
    Build fcn8 model
    '''

    net = {}

    # Contracting path
    net['input'] = InputLayer((None, nb_in_channels, None, None), input_var)

    # pool 1
    net['conv1_1'] = ConvLayer(net['input'],
                               64,
                               3,
                               pad=100,
                               flip_filters=False)
    net['conv1_2'] = ConvLayer(net['conv1_1'],
                               64,
                               3,
                               pad='same',
                               flip_filters=False)
    net['pool1'] = PoolLayer(net['conv1_2'], 2)

    # pool 2
    net['conv2_1'] = ConvLayer(net['pool1'],
                               128,
                               3,
                               pad='same',
                               flip_filters=False)
    net['conv2_2'] = ConvLayer(net['conv2_1'],
                               128,
                               3,
                               pad='same',
                               flip_filters=False)
    net['pool2'] = PoolLayer(net['conv2_2'], 2)

    # pool 3
    net['conv3_1'] = ConvLayer(net['pool2'],
                               256,
                               3,
                               pad='same',
                               flip_filters=False)
    net['conv3_2'] = ConvLayer(net['conv3_1'],
                               256,
                               3,
                               pad='same',
                               flip_filters=False)
    net['conv3_3'] = ConvLayer(net['conv3_2'],
                               256,
                               3,
                               pad='same',
                               flip_filters=False)
    net['pool3'] = PoolLayer(net['conv3_3'], 2)

    # pool 4
    net['conv4_1'] = ConvLayer(net['pool3'],
                               512,
                               3,
                               pad='same',
                               flip_filters=False)
    net['conv4_2'] = ConvLayer(net['conv4_1'],
                               512,
                               3,
                               pad='same',
                               flip_filters=False)
    net['conv4_3'] = ConvLayer(net['conv4_2'],
                               512,
                               3,
                               pad='same',
                               flip_filters=False)
    net['pool4'] = PoolLayer(net['conv4_3'], 2)

    # pool 5
    net['conv5_1'] = ConvLayer(net['pool4'],
                               512,
                               3,
                               pad='same',
                               flip_filters=False)
    net['conv5_2'] = ConvLayer(net['conv5_1'],
                               512,
                               3,
                               pad='same',
                               flip_filters=False)
    net['conv5_3'] = ConvLayer(net['conv5_2'],
                               512,
                               3,
                               pad='same',
                               flip_filters=False)
    net['pool5'] = PoolLayer(net['conv5_3'], 2)

    # fc6
    net['fc6'] = ConvLayer(net['pool5'],
                           4096,
                           7,
                           pad='valid',
                           flip_filters=False)
    net['fc6_dropout'] = DropoutLayer(net['fc6'])

    # fc7
    net['fc7'] = ConvLayer(net['fc6_dropout'],
                           4096,
                           1,
                           pad='valid',
                           flip_filters=False)
    net['fc7_dropout'] = DropoutLayer(net['fc7'], p=0.5)

    net['score_fr'] = ConvLayer(net['fc7_dropout'],
                                n_classes,
                                1,
                                pad='valid',
                                flip_filters=False)

    # Upsampling path

    # Unpool
    net['score2'] = DeconvLayer(net['score_fr'],
                                n_classes,
                                4,
                                stride=2,
                                crop='valid',
                                nonlinearity=linear)
    net['score_pool4'] = ConvLayer(net['pool4'], n_classes, 1, pad='same')
    net['score_fused'] = ElemwiseSumLayer(
        (net['score2'], net['score_pool4']),
        cropping=[None, None, 'center', 'center'])

    # Unpool
    net['score4'] = DeconvLayer(net['score_fused'],
                                n_classes,
                                4,
                                stride=2,
                                crop='valid',
                                nonlinearity=linear)
    net['score_pool3'] = ConvLayer(net['pool3'], n_classes, 1, pad='valid')
    net['score_final'] = ElemwiseSumLayer(
        (net['score4'], net['score_pool3']),
        cropping=[None, None, 'center', 'center'])
    # Unpool
    net['upsample'] = DeconvLayer(net['score_final'],
                                  n_classes,
                                  16,
                                  stride=8,
                                  crop='valid',
                                  nonlinearity=linear)
    upsample_shape = lasagne.layers.get_output_shape(net['upsample'])[1]
    net['input_tmp'] = InputLayer((None, upsample_shape, None, None),
                                  input_var)

    net['score'] = ElemwiseMergeLayer(
        (net['input_tmp'], net['upsample']),
        merge_function=lambda input, deconv: deconv,
        cropping=[None, None, 'center', 'center'])

    # Final dimshuffle, reshape and softmax
    net['final_dimshuffle'] = \
        lasagne.layers.DimshuffleLayer(net['score'], (0, 2, 3, 1))
    laySize = lasagne.layers.get_output(net['final_dimshuffle']).shape
    net['final_reshape'] = \
        lasagne.layers.ReshapeLayer(net['final_dimshuffle'],
                                    (T.prod(laySize[0:3]),
                                     laySize[3]))
    net['probs'] = lasagne.layers.NonlinearityLayer(net['final_reshape'],
                                                    nonlinearity=softmax)

    # Do not train
    if not trainable:
        model_helpers.freezeParameters(net['probs'])

    # Go back to 4D
    net['probs_reshape'] = ReshapeLayer(
        net['probs'], (laySize[0], laySize[1], laySize[2], n_classes))

    net['probs_dimshuffle'] = DimshuffleLayer(net['probs_reshape'],
                                              (0, 3, 1, 2))

    # Apply temperature
    if load_weights:
        soft_value = net['upsample'].W.get_value() / temperature
        net['upsample'].W.set_value(soft_value)
        soft_value = net['upsample'].b.get_value() / temperature
        net['upsample'].b.set_value(soft_value)

    return [net[el] for el in layer]
def buildFCN8_DAE(input_concat_h_vars, input_mask_var, n_classes, nb_in_channels=3,
                  path_weights='/Tmp/romerosa/itinf/models/',
                  model_name='fcn8_model.npz', trainable=False,
                  load_weights=False, pretrained=False, freeze=False,
                  pretrained_path='/data/lisatmp4/romerosa/itinf/models/camvid/',
                  pascal=False, return_layer='probs_dimshuffle',
                  concat_h=['input'], noise=0.1, dropout=0.5):

    '''
    Build fcn8 model as DAE
    '''

    net = {}
    pos = 0

    assert all([el in ['pool1', 'pool2', 'pool3', 'pool4', 'input']
                for el in concat_h])

    # Contracting path
    net['input'] = InputLayer((None, nb_in_channels, None, None),
                              input_mask_var)
    # Add noise
    # Noise
    if noise > 0:
        # net['noisy_input'] = GaussianNoiseLayerSoftmax(net['input'],
        #                                                sigma=noise)
        net['noisy_input'] = GaussianNoiseLayer(net['input'], sigma=noise)
        in_layer = 'noisy_input'
    else:
        in_layer = 'input'

    pos, out = model_helpers.concatenate(net, in_layer, concat_h,
                                         input_concat_h_vars, pos,
                                         net['input'].output_shape[1])

    # pool 1
    net['conv1_1'] = ConvLayer(
        net[out], 64, 3, pad=100, flip_filters=False)
    net['conv1_2'] = ConvLayer(
        net['conv1_1'], 64, 3, pad='same', flip_filters=False)
    net['pool1'] = PoolLayer(net['conv1_2'], 2)

    pos, out = model_helpers.concatenate(net, 'pool1', concat_h,
                                         input_concat_h_vars, pos,
                                         net['pool1'].output_shape[1])

    # pool 2
    net['conv2_1'] = ConvLayer(
        net[out], 128, 3, pad='same', flip_filters=False)
    net['conv2_2'] = ConvLayer(
        net['conv2_1'], 128, 3, pad='same', flip_filters=False)
    net['pool2'] = PoolLayer(net['conv2_2'], 2)

    pos, out = model_helpers.concatenate(net, 'pool2', concat_h,
                                         input_concat_h_vars, pos,
                                         net['pool2'].output_shape[1])

    # pool 3
    net['conv3_1'] = ConvLayer(
        net[out], 256, 3, pad='same', flip_filters=False)
    net['conv3_2'] = ConvLayer(
        net['conv3_1'], 256, 3, pad='same', flip_filters=False)
    net['conv3_3'] = ConvLayer(
        net['conv3_2'], 256, 3, pad='same', flip_filters=False)
    net['pool3'] = PoolLayer(net['conv3_3'], 2)

    pos, out = model_helpers.concatenate(net, 'pool3', concat_h,
                                         input_concat_h_vars, pos,
                                         net['pool3'].output_shape[1])

    # pool 4
    net['conv4_1'] = ConvLayer(
        net[out], 512, 3, pad='same', flip_filters=False)
    net['conv4_2'] = ConvLayer(
        net['conv4_1'], 512, 3, pad='same', flip_filters=False)
    net['conv4_3'] = ConvLayer(
        net['conv4_2'], 512, 3, pad='same', flip_filters=False)
    net['pool4'] = PoolLayer(net['conv4_3'], 2)

    pos, out = model_helpers.concatenate(net, 'pool4', concat_h,
                                         input_concat_h_vars, pos,
                                         net['pool4'].output_shape[1])

    # pool 5
    net['conv5_1'] = ConvLayer(
        net[out], 512, 3, pad='same', flip_filters=False)
    net['conv5_2'] = ConvLayer(
        net['conv5_1'], 512, 3, pad='same', flip_filters=False)
    net['conv5_3'] = ConvLayer(
        net['conv5_2'], 512, 3, pad='same', flip_filters=False)
    net['pool5'] = PoolLayer(net['conv5_3'], 2)

    pos, out = model_helpers.concatenate(net, 'pool5', concat_h,
                                         input_concat_h_vars, pos,
                                         net['pool5'].output_shape[1])

    # fc6
    net['fc6'] = ConvLayer(
        net[out], 4096, 7, pad='valid', flip_filters=False)
    net['fc6_dropout'] = DropoutLayer(net['fc6'], p=dropout)

    # fc7
    net['fc7'] = ConvLayer(
        net['fc6_dropout'], 4096, 1, pad='valid', flip_filters=False)
    net['fc7_dropout'] = DropoutLayer(net['fc7'], p=dropout)

    net['score_fr'] = ConvLayer(
        net['fc7_dropout'], n_classes, 1, pad='valid', flip_filters=False)

    # Upsampling path

    # Unpool
    net['score2'] = DeconvLayer(net['score_fr'], n_classes, 4, stride=2,
                                crop='valid', nonlinearity=linear)
    net['score_pool4'] = ConvLayer(net['pool4'], n_classes, 1,
                                   pad='same')
    net['score_fused'] = ElemwiseSumLayer((net['score2'],
                                           net['score_pool4']),
                                          cropping=[None, None, 'center',
                                                    'center'])

    # Unpool
    net['score4'] = DeconvLayer(net['score_fused'], n_classes, 4,
                                stride=2, crop='valid', nonlinearity=linear)
    net['score_pool3'] = ConvLayer(net['pool3'], n_classes, 1,
                                   pad='valid')
    net['score_final'] = ElemwiseSumLayer((net['score4'],
                                           net['score_pool3']),
                                          cropping=[None, None, 'center',
                                                    'center'])
    # Unpool
    net['upsample'] = DeconvLayer(net['score_final'], n_classes, 16,
                                  stride=8, crop='valid', nonlinearity=linear)
    upsample_shape = lasagne.layers.get_output_shape(net['upsample'])[1]
    net['input_tmp'] = InputLayer((None, upsample_shape, None,
                                   None), input_mask_var)

    net['score'] = ElemwiseMergeLayer((net['input_tmp'], net['upsample']),
                                      merge_function=lambda input, deconv:
                                      deconv,
                                      cropping=[None, None, 'center',
                                                'center'])

    # Final dimshuffle, reshape and softmax
    net['final_dimshuffle'] = \
        lasagne.layers.DimshuffleLayer(net['score'], (0, 2, 3, 1))
    laySize = lasagne.layers.get_output(net['final_dimshuffle']).shape
    net['final_reshape'] = \
        lasagne.layers.ReshapeLayer(net['final_dimshuffle'],
                                    (T.prod(laySize[0:3]),
                                     laySize[3]))
    net['probs'] = lasagne.layers.NonlinearityLayer(net['final_reshape'],
                                                    nonlinearity=softmax)

    # Load weights
    if load_weights:
        pretrained = False
        with np.load(os.path.join(path_weights, model_name)) as f:
            param_values = [f['arr_%d' % i] for i in range(len(f.files))]
        lasagne.layers.set_all_param_values(net['probs'], param_values)

    # In case we want to re-use the weights of an FCN8 model pretrained from images (not GT)
    if pretrained:
        print 'Loading pretrained weights'
        if pascal:
            path_weights = '/data/lisatmp4/romerosa/itinf/models/camvid/pascal-fcn8s-tvg-dag.mat'
            if 'tvg' in path_weights:
                str_filter = 'f'
                str_bias = 'b'
            else:
                str_filter = '_filter'
                str_bias = '_bias'

            W = sio.loadmat(path_weights)

            # Load the parameter values into the net
            num_params = W.get('params').shape[1]
            str_ind = [''.join(x for x in concat if x.isdigit()) for concat in concat_h]
            list_of_lays = ['conv' + str(int(x)+1) + '_1' for x in str_ind if x]
            list_of_lays += ['conv1_1'] if nb_in_channels != 3 or 'input' in concat_h else []
            print list_of_lays

            for i in range(num_params):
                # Get layer name from the saved model
                name = str(W.get('params')[0][i][0])[3:-2]
                # Get parameter value
                param_value = W.get('params')[0][i][1]

                # Load weights
                if name.endswith(str_filter):
                    raw_name = name[:-len(str_filter)]

                    if raw_name not in list_of_lays:
                        print 'Copying weights for ' + raw_name
                        if 'score' not in raw_name and \
                           'upsample' not in raw_name and \
                           'final' not in raw_name and \
                           'probs' not in raw_name:

                            # print 'Initializing layer ' + raw_name
                            param_value = param_value.T
                            param_value = np.swapaxes(param_value, 2, 3)
                            net[raw_name].W.set_value(param_value)
                    else:
                        print 'Ignoring ' + raw_name

                # Load bias terms
                if name.endswith(str_bias):
                    raw_name = name[:-len(str_bias)]
                    if 'score' not in raw_name and \
                       'upsample' not in raw_name and \
                       'final' not in raw_name and \
                       'probs' not in raw_name:

                        param_value = np.squeeze(param_value)
                        net[raw_name].b.set_value(param_value)

        else:
            with np.load(os.path.join(pretrained_path, model_name)) as f:
                start = 0 if nb_in_channels == f['arr_%d' % 0].shape[1] \
                    else 2
                param_values = [f['arr_%d' % i] for i in range(start,
                                                               len(f.files))]
            all_layers = lasagne.layers.get_all_layers(net['probs'])
            all_layers = [l for l in all_layers if (not isinstance(l, InputLayer) and not isinstance(l, GaussianNoiseLayerSoftmax) and not isinstance(l,GaussianNoiseLayer))]
            all_layers = all_layers[1:] if start > 0 else all_layers
            # Freeze parameters after last concatenation layer
            last_concat = [idx for idx,l in enumerate(all_layers) if isinstance(l,ConcatLayer)][-1]
            count = 0
            for ixd, layer in enumerate(all_layers):
                layer_params = layer.get_params()
                for p in layer_params:
                    if hasattr(layer, 'input_layer') and not isinstance(layer.input_layer, ConcatLayer):
                        p.set_value(param_values[count])
                        if freeze:
                            model_helpers.freezeParameters(layer, single=True)
                    if isinstance(layer.input_layer, ConcatLayer) and idx == last_concat:
                        print('freezing')
                        freeze = True
                    count += 1

    # Do not train
    if not trainable:
        model_helpers.freezeParameters(net['probs'])

    # Go back to 4D
    net['probs_reshape'] = ReshapeLayer(net['probs'], (laySize[0], laySize[1],
                                                       laySize[2], n_classes))

    net['probs_dimshuffle'] = DimshuffleLayer(net['probs_reshape'],
                                              (0, 3, 1, 2))

    return net[return_layer]
예제 #19
0
    def _build_disc(self):
        inputs = OrderedDict()
        inputs['x'] = InputLayer((None, 4, 64, 64))
        inputs['c'] = InputLayer((None, 843))
        inputs['v'] = InputLayer((None, 4))
        inputs['t'] = InputLayer((None, 8))

        layer_c = inputs['c']
        layer_c = DenseLayer(layer_c, 512, nonlinearity=leaky_rectify)
        layer_c.params[layer_c.W].add('dense')
        layer_c = (DenseLayer(layer_c, 512, nonlinearity=leaky_rectify))
        layer_c.params[layer_c.W].add('dense')

        layer_v = inputs['v']
        layer_v = DenseLayer(layer_v, 512, nonlinearity=leaky_rectify)
        layer_v.params[layer_v.W].add('dense')
        layer_v = (DenseLayer(layer_v, 512, nonlinearity=leaky_rectify))
        layer_v.params[layer_v.W].add('dense')

        layer_t = inputs['t']
        layer_t = DenseLayer(layer_t, 512, nonlinearity=leaky_rectify)
        layer_t.params[layer_t.W].add('dense')
        layer_t = (DenseLayer(layer_t, 512, nonlinearity=leaky_rectify))
        layer_t.params[layer_t.W].add('dense')

        layer_i = ConcatLayer([layer_c, layer_v, layer_t])
        layer_i = DenseLayer(layer_i, 1024, nonlinearity=leaky_rectify)
        layer_i.params[layer_i.W].add('dense')
        layer_i = DenseLayer(layer_i, 1024, nonlinearity=None)
        layer_i.params[layer_i.W].add('dense')
        layer_i = NonlinearityLayer(
            layer_i,
            lambda x: x / T.sqrt(T.sum(T.square(x), axis=1, keepdims=True)))

        layer_x = inputs['x']
        layer_x_n = layer_x
        layer_x = weight_norm(
            Conv2DLayer(layer_x_n, 64, 5, 2, 'same', nonlinearity=None,
                        b=None))
        if self.reg: layer_x = dropout(layer_x)
        layer_x = NonlinearityLayer(layer_x, leaky_rectify)
        layer_x = weight_norm(
            Conv2DLayer(layer_x, 64, 5, 2, 'same', nonlinearity=None, b=None))
        if self.reg: layer_x = dropout(layer_x)
        layer_x = NonlinearityLayer(layer_x, leaky_rectify)
        layer_x = weight_norm(
            Conv2DLayer(layer_x, 128, 5, 2, 'same', nonlinearity=None, b=None))
        if self.reg: layer_x = dropout(layer_x)
        layer_x = NonlinearityLayer(layer_x, leaky_rectify)
        layer_x = weight_norm(
            Conv2DLayer(layer_x, 256, 5, 2, 'same', nonlinearity=None, b=None))
        layer_x = NonlinearityLayer(layer_x, leaky_rectify)

        layer_x = FlattenLayer(layer_x)
        layer_x = DenseLayer(layer_x, 1024, nonlinearity=leaky_rectify)
        layer_x.params[layer_x.W].add('dense')

        layer_x = DenseLayer(layer_x, 1024, nonlinearity=None)
        layer_x.params[layer_x.W].add('dense')
        layer_x = NonlinearityLayer(
            layer_x,
            lambda x: x / T.sqrt(T.sum(T.square(x), axis=1, keepdims=True)))

        layer = ElemwiseMergeLayer([layer_i, layer_x], T.mul)
        layer = ConcatLayer([layer, layer_x, layer_i])
        layer = DenseLayer(layer, 1024, nonlinearity=leaky_rectify)
        layer.params[layer.W].add('dense')

        layer_r = DenseLayer(layer, 1024, nonlinearity=leaky_rectify)
        layer_r.params[layer_r.W].add('dense')
        layer_r = DenseLayer(layer_r, 1, nonlinearity=None)
        layer_r.params[layer_r.W].add('dense')
        layer_r_0 = NonlinearityLayer(layer_r, nonlinearity=sigmoid)
        layer_r_1 = NonlinearityLayer(
            layer_r, nonlinearity=lambda x: x - T.log(1 + T.exp(x)))
        layer_r_2 = NonlinearityLayer(
            layer_r, nonlinearity=lambda x: -T.log(1 + T.exp(x)))

        layer_s = DenseLayer(layer, 1024, nonlinearity=leaky_rectify)
        layer_s.params[layer_s.W].add('dense')
        layer_s = DenseLayer(layer_s, 1, nonlinearity=None)
        layer_s.params[layer_s.W].add('dense')
        layer_s_0 = NonlinearityLayer(layer_s, nonlinearity=sigmoid)
        layer_s_1 = NonlinearityLayer(
            layer_s, nonlinearity=lambda x: x - T.log(1 + T.exp(x)))
        layer_s_2 = NonlinearityLayer(
            layer_s, nonlinearity=lambda x: -T.log(1 + T.exp(x)))

        outputs = OrderedDict()
        outputs['s'] = layer_s_0
        outputs['log(s)'] = layer_s_1
        outputs['log(1-s)'] = layer_s_2
        outputs['r'] = layer_r_0
        outputs['log(r)'] = layer_r_1
        outputs['log(1-r)'] = layer_r_2

        self.disc_inputs = inputs
        self.disc_outputs = outputs
예제 #20
0
def network_convpool_cnn_max(input_vars,
                             nb_classes,
                             imsize=[32, 32],
                             n_colors=3,
                             n_timewin=5,
                             n_layers=(4, 2),
                             n_filters_first=32,
                             dense_num_unit=[512, 512],
                             shared_weights=True,
                             batch_norm_dense=False,
                             batch_norm_conv=False):
    """
    Builds the complete network with maxpooling layer in time.

    :param input_vars: list of EEG images (one image per time window)
    :param nb_classes: number of classes
    :param imsize: size of the input image (assumes a square input)
    :param n_colors: number of color channels in the image
    :param n_timewin: number of time windows in the snippet
    :return: a pointer to the output of last layer
    """
    convnets = []
    w_init = None
    # Build 5 parallel CNNs with shared weights
    for i in range(n_timewin):
        if i == 0:
            convnet, w_init = build_cnn(input_vars[i],
                                        imsize=imsize,
                                        n_colors=n_colors,
                                        n_filters_first=n_filters_first,
                                        n_layers=n_layers,
                                        batch_norm_conv=batch_norm_conv)
            if not shared_weights:
                w_init = None
        else:
            convnet, _ = build_cnn(input_vars[i],
                                   w_init=w_init,
                                   imsize=imsize,
                                   n_colors=n_colors,
                                   n_filters_first=n_filters_first,
                                   n_layers=n_layers,
                                   batch_norm_conv=batch_norm_conv)
        convnets.append(FlattenLayer(convnet))
    # convpooling using Max pooling over frames
    convpool = ElemwiseMergeLayer(convnets, theano.tensor.maximum)
    # A fully-connected layer of 256 units with 50% dropout on its inputs:
    for i in range(len(dense_num_unit)):
        convpool = DenseLayer(lasagne.layers.dropout(convpool, p=.5),
                              num_units=dense_num_unit[i],
                              nonlinearity=lasagne.nonlinearities.rectify)
        if batch_norm_dense:
            convpool = batch_norm(convpool)
    # And, finally, the 2-unit output layer with 50% dropout on its inputs:
    if nb_classes == 1:
        nonlinearity = lasagne.nonlinearities.sigmoid
    else:
        nonlinearity = lasagne.nonlinearities.softmax

    convpool = DenseLayer(lasagne.layers.dropout(convpool, p=.5),
                          num_units=nb_classes,
                          nonlinearity=nonlinearity)

    return convpool
예제 #21
0
    def nn_fn(self):
        l_in_x = InputLayer((None, self.embedding_dim, self.max_length))
        l_in_z = InputLayer((None, self.z_dim))
        l_causal_conv = DilatedConv1DLayer(
            l_in_x,
            num_filters=self.nn_residual_channels,
            dilation=1,
            nonlinearity=None)
        l_prev = l_causal_conv

        skip_layers = []

        for h in range(len(self.nn_dilations)):
            l_x_filter = DilatedConv1DLayer(
                l_prev,
                num_filters=self.nn_dilation_channels,
                dilation=self.nn_dilations[h],
                nonlinearity=None,
                b=None)

            l_z_filter = DenseLayer(l_in_z,
                                    num_units=self.nn_dilation_channels,
                                    nonlinearity=None,
                                    b=None)
            l_z_filter_reshape = ReshapeLayer(l_z_filter, (
                [0],
                [1],
                1,
            ))
            l_z_filter_rep = RepeatLayer(l_z_filter_reshape,
                                         self.max_length,
                                         axis=-1,
                                         ndim=3)

            l_filter = NonlinearityLayer(ElemwiseSumLayer(
                [l_x_filter, l_z_filter_rep]),
                                         nonlinearity=tanh)

            l_x_gate = DilatedConv1DLayer(
                l_prev,
                num_filters=self.nn_dilation_channels,
                dilation=self.nn_dilations[h],
                nonlinearity=None,
                b=None)

            l_z_gate = DenseLayer(l_in_z,
                                  num_units=self.nn_dilation_channels,
                                  nonlinearity=None,
                                  b=None)
            l_z_gate_reshape = ReshapeLayer(l_z_gate, (
                [0],
                [1],
                1,
            ))
            l_z_gate_rep = RepeatLayer(l_z_gate_reshape,
                                       self.max_length,
                                       axis=-1,
                                       ndim=3)

            l_gate = NonlinearityLayer(ElemwiseSumLayer(
                [l_x_gate, l_z_gate_rep]),
                                       nonlinearity=sigmoid)

            l_merge = ElemwiseMergeLayer([l_filter, l_gate],
                                         merge_function=T.mul)

            l_dense = Conv1DLayer(l_merge,
                                  num_filters=self.nn_residual_channels,
                                  filter_size=1,
                                  nonlinearity=None,
                                  b=None)

            l_residual = ElemwiseSumLayer([l_prev, l_dense])

            l_skip = Conv1DLayer(l_merge,
                                 num_filters=self.embedding_dim,
                                 filter_size=1,
                                 nonlinearity=None,
                                 b=None)

            skip_layers.append(l_skip)

            l_prev = l_residual

        l_skip_sum = NonlinearityLayer(ElemwiseSumLayer(skip_layers),
                                       nonlinearity=elu)
        l_prev = l_skip_sum

        for h in range(2):
            l_h = Conv1DLayer(l_prev,
                              num_filters=self.embedding_dim,
                              filter_size=1,
                              nonlinearity=None,
                              b=None)
            l_z = DenseLayer(l_in_z,
                             num_units=self.embedding_dim,
                             nonlinearity=None,
                             b=None)
            l_z_reshape = ReshapeLayer(l_z, (
                [0],
                [1],
                1,
            ))
            l_z_reshape_rep = RepeatLayer(l_z_reshape,
                                          self.max_length,
                                          axis=-1,
                                          ndim=3)
            l_sum = NonlinearityLayer(ElemwiseSumLayer([l_h, l_z_reshape_rep]),
                                      nonlinearity=elu)
            l_prev = l_sum

        l_out = DimshuffleLayer(l_prev, (0, 2, 1))

        return (l_in_x, l_in_z), l_out
def buildFCN8(nb_in_channels, input_var,
              path_weights='/Tmp/romerosa/itinf/models/' +
              'camvid/fcn8_model.npz',
              n_classes=21, load_weights=True,
              void_labels=[], trainable=False,
              layer=['probs_dimshuffle']):

    '''
    Build fcn8 model (generator)
    '''

    net = {}

    # Contracting path
    net['input'] = InputLayer((None, nb_in_channels, None, None),
                              input_var)

    # pool 1
    net['conv1_1'] = ConvLayer(
        net['input'], 64, 3, pad=100, flip_filters=False)
    net['conv1_2'] = ConvLayer(
        net['conv1_1'], 64, 3, pad='same', flip_filters=False)
    net['pool1'] = PoolLayer(net['conv1_2'], 2)

    # pool 2
    net['conv2_1'] = ConvLayer(
        net['pool1'], 128, 3, pad='same', flip_filters=False)
    net['conv2_2'] = ConvLayer(
        net['conv2_1'], 128, 3, pad='same', flip_filters=False)
    net['pool2'] = PoolLayer(net['conv2_2'], 2)

    # pool 3
    net['conv3_1'] = ConvLayer(
        net['pool2'], 256, 3, pad='same', flip_filters=False)
    net['conv3_2'] = ConvLayer(
        net['conv3_1'], 256, 3, pad='same', flip_filters=False)
    net['conv3_3'] = ConvLayer(
        net['conv3_2'], 256, 3, pad='same', flip_filters=False)
    net['pool3'] = PoolLayer(net['conv3_3'], 2)

    # pool 4
    net['conv4_1'] = ConvLayer(
        net['pool3'], 512, 3, pad='same', flip_filters=False)
    net['conv4_2'] = ConvLayer(
        net['conv4_1'], 512, 3, pad='same', flip_filters=False)
    net['conv4_3'] = ConvLayer(
        net['conv4_2'], 512, 3, pad='same', flip_filters=False)
    net['pool4'] = PoolLayer(net['conv4_3'], 2)

    # pool 5
    net['conv5_1'] = ConvLayer(
        net['pool4'], 512, 3, pad='same', flip_filters=False)
    net['conv5_2'] = ConvLayer(
        net['conv5_1'], 512, 3, pad='same', flip_filters=False)
    net['conv5_3'] = ConvLayer(
        net['conv5_2'], 512, 3, pad='same', flip_filters=False)
    net['pool5'] = PoolLayer(net['conv5_3'], 2)

    # fc6
    net['fc6'] = ConvLayer(
        net['pool5'], 4096, 7, pad='valid', flip_filters=False)
    net['fc6_dropout'] = DropoutLayer(net['fc6'])

    # fc7
    net['fc7'] = ConvLayer(
        net['fc6_dropout'], 4096, 1, pad='valid', flip_filters=False)
    net['fc7_dropout'] = DropoutLayer(net['fc7'], p=0.5)

    net['score_fr'] = ConvLayer(
        net['fc7_dropout'], n_classes, 1, pad='valid', flip_filters=False)

    # Upsampling path

    # Unpool
    net['score2'] = DeconvLayer(net['score_fr'], n_classes, 4, stride=2,
                                crop='valid', nonlinearity=linear)
    net['score_pool4'] = ConvLayer(net['pool4'], n_classes, 1,
                                   pad='same')
    net['score_fused'] = ElemwiseSumLayer((net['score2'],
                                           net['score_pool4']),
                                          cropping=[None, None, 'center',
                                                    'center'])

    # Unpool
    net['score4'] = DeconvLayer(net['score_fused'], n_classes, 4,
                                stride=2, crop='valid', nonlinearity=linear)
    net['score_pool3'] = ConvLayer(net['pool3'], n_classes, 1,
                                   pad='valid')
    net['score_final'] = ElemwiseSumLayer((net['score4'],
                                           net['score_pool3']),
                                          cropping=[None, None, 'center',
                                                    'center'])
    # Unpool
    net['upsample'] = DeconvLayer(net['score_final'], n_classes, 16,
                                  stride=8, crop='valid', nonlinearity=linear)
    upsample_shape = lasagne.layers.get_output_shape(net['upsample'])[1]
    net['input_tmp'] = InputLayer((None, upsample_shape, None,
                                   None), input_var)

    net['score'] = ElemwiseMergeLayer((net['input_tmp'], net['upsample']),
                                      merge_function=lambda input, deconv:
                                      deconv,
                                      cropping=[None, None, 'center',
                                                'center'])

    # Final dimshuffle, reshape and softmax
    net['final_dimshuffle'] = DimshuffleLayer(net['score'], (0, 2, 3, 1))
    laySize = lasagne.layers.get_output(net['final_dimshuffle']).shape
    net['final_reshape'] = ReshapeLayer(net['final_dimshuffle'],
                                        (T.prod(laySize[0:3]),
                                         laySize[3]))
    net['probs'] = NonlinearityLayer(net['final_reshape'],
                                     nonlinearity=softmax)

    # Load weights
    if load_weights:
        with np.load(path_weights) as f:
            param_values = [f['arr_%d' % i] for i in range(len(f.files))]
        lasagne.layers.set_all_param_values(net['probs'], param_values)

    if not trainable:
        freezeParameters(net['probs'], single=False)

    if any(void_labels):
        layVoid = lasagne.layers.get_output(net['probs']).shape
        input_discrim_var = T.zeros((layVoid[0], 1))
        net['input_void'] = InputLayer((None, 1), input_discrim_var)
        net['concat'] = ConcatLayer([net['probs'], net['input_void']],
                                    axis=1, cropping=None)
        n_classes = n_classes + 1
    else:
        net['concat'] = net['probs']

    # Go back to 4D
    net['probs_reshape'] = ReshapeLayer(net['concat'], (laySize[0], laySize[1],
                                                        laySize[2], n_classes))

    net['probs_dimshuffle'] = DimshuffleLayer(net['probs_reshape'],
                                              (0, 3, 1, 2))

    return [net[el] for el in layer]
예제 #23
0
def buildFCN8(nb_in_channels, input_var,
              path_weights='/Tmp/romerosa/itinf/models/' +
              'camvid/new_fcn8_model_best.npz',
              n_classes=21, load_weights=True,
              void_labels=[], trainable=False,
              layer=['probs_dimshuffle'], pascal=False,
              temperature=1.0, dropout=0.5):
    '''
    Build fcn8 model
    '''

    net = {}

    # Contracting path
    net['input'] = InputLayer((None, nb_in_channels, None, None),
                              input_var)

    # pool 1
    net['conv1_1'] = ConvLayer(
        net['input'], 64, 3, pad=100, flip_filters=False)
    net['conv1_2'] = ConvLayer(
        net['conv1_1'], 64, 3, pad='same', flip_filters=False)
    net['pool1'] = PoolLayer(net['conv1_2'], 2)

    # pool 2
    net['conv2_1'] = ConvLayer(
        net['pool1'], 128, 3, pad='same', flip_filters=False)
    net['conv2_2'] = ConvLayer(
        net['conv2_1'], 128, 3, pad='same', flip_filters=False)
    net['pool2'] = PoolLayer(net['conv2_2'], 2)

    # pool 3
    net['conv3_1'] = ConvLayer(
        net['pool2'], 256, 3, pad='same', flip_filters=False)
    net['conv3_2'] = ConvLayer(
        net['conv3_1'], 256, 3, pad='same', flip_filters=False)
    net['conv3_3'] = ConvLayer(
        net['conv3_2'], 256, 3, pad='same', flip_filters=False)
    net['pool3'] = PoolLayer(net['conv3_3'], 2)

    # pool 4
    net['conv4_1'] = ConvLayer(
        net['pool3'], 512, 3, pad='same', flip_filters=False)
    net['conv4_2'] = ConvLayer(
        net['conv4_1'], 512, 3, pad='same', flip_filters=False)
    net['conv4_3'] = ConvLayer(
        net['conv4_2'], 512, 3, pad='same', flip_filters=False)
    net['pool4'] = PoolLayer(net['conv4_3'], 2)

    # pool 5
    net['conv5_1'] = ConvLayer(
        net['pool4'], 512, 3, pad='same', flip_filters=False)
    net['conv5_2'] = ConvLayer(
        net['conv5_1'], 512, 3, pad='same', flip_filters=False)
    net['conv5_3'] = ConvLayer(
        net['conv5_2'], 512, 3, pad='same', flip_filters=False)
    net['pool5'] = PoolLayer(net['conv5_3'], 2)

    # fc6
    net['fc6'] = ConvLayer(
        net['pool5'], 4096, 7, pad='valid', flip_filters=False)
    net['fc6_dropout'] = DropoutLayer(net['fc6'], p=dropout)

    # fc7
    net['fc7'] = ConvLayer(
        net['fc6_dropout'], 4096, 1, pad='valid', flip_filters=False)
    net['fc7_dropout'] = DropoutLayer(net['fc7'], p=dropout)

    net['score_fr'] = ConvLayer(
        net['fc7_dropout'], n_classes, 1, pad='valid', flip_filters=False)

    # Upsampling path

    # Unpool
    net['score2'] = DeconvLayer(net['score_fr'], n_classes, 4, stride=2,
                                crop='valid', nonlinearity=linear)
    net['score_pool4'] = ConvLayer(net['pool4'], n_classes, 1,
                                   pad='same')
    net['score_fused'] = ElemwiseSumLayer((net['score2'],
                                           net['score_pool4']),
                                          cropping=[None, None, 'center',
                                                    'center'])

    # Unpool
    net['score4'] = DeconvLayer(net['score_fused'], n_classes, 4,
                                stride=2, crop='valid', nonlinearity=linear)
    net['score_pool3'] = ConvLayer(net['pool3'], n_classes, 1,
                                   pad='valid')
    net['score_final'] = ElemwiseSumLayer((net['score4'],
                                           net['score_pool3']),
                                          cropping=[None, None, 'center',
                                                    'center'])
    # Unpool
    net['upsample'] = DeconvLayer(net['score_final'], n_classes, 16,
                                  stride=8, crop='valid', nonlinearity=linear)
    upsample_shape = lasagne.layers.get_output_shape(net['upsample'])[1]
    net['input_tmp'] = InputLayer((None, upsample_shape, None,
                                   None), input_var)

    net['score'] = ElemwiseMergeLayer((net['input_tmp'], net['upsample']),
                                      merge_function=lambda input, deconv:
                                      deconv,
                                      cropping=[None, None, 'center',
                                                'center'])

    # Final dimshuffle, reshape and softmax
    net['final_dimshuffle'] = \
        lasagne.layers.DimshuffleLayer(net['score'], (0, 2, 3, 1))
    laySize = lasagne.layers.get_output(net['final_dimshuffle']).shape
    net['final_reshape'] = \
        lasagne.layers.ReshapeLayer(net['final_dimshuffle'],
                                    (T.prod(laySize[0:3]),
                                     laySize[3]))
    net['probs'] = lasagne.layers.NonlinearityLayer(net['final_reshape'],
                                                    nonlinearity=softmax)

    # Load weights
    if load_weights:
        if pascal:
            path_weights = '/data/lisatmp4/erraqabi/data/att-segm/' + \
                          'pre_trained_weights/pascal-fcn8s-tvg-dag.mat'
            if 'tvg' in path_weights:
                str_filter = 'f'
                str_bias = 'b'
            else:
                str_filter = '_filter'
                str_bias = '_bias'

            W = sio.loadmat(path_weights)

            # Load the parameter values into the net
            num_params = W.get('params').shape[1]
            for i in range(num_params):
                # Get layer name from the saved model
                name = str(W.get('params')[0][i][0])[3:-2]
                # Get parameter value
                param_value = W.get('params')[0][i][1]

                # Load weights
                if name.endswith(str_filter):
                    raw_name = name[:-len(str_filter)]
                    if 'score' not in raw_name and \
                       'upsample' not in raw_name and \
                       'final' not in raw_name and \
                       'probs' not in raw_name:

                        # print 'Initializing layer ' + raw_name
                        param_value = param_value.T
                        param_value = np.swapaxes(param_value, 2, 3)
                        net[raw_name].W.set_value(param_value)

                # Load bias terms
                if name.endswith(str_bias):
                    raw_name = name[:-len(str_bias)]
                    if 'score' not in raw_name and \
                       'upsample' not in raw_name and \
                       'final' not in raw_name and \
                       'probs' not in raw_name:

                        param_value = np.squeeze(param_value)
                        net[raw_name].b.set_value(param_value)
        else:
            with np.load(path_weights) as f:
                param_values = [f['arr_%d' % i] for i in range(len(f.files))]
            lasagne.layers.set_all_param_values(net['probs'], param_values)

    # Do not train
    if not trainable:
        model_helpers.freezeParameters(net['probs'], single=False)

    # Go back to 4D
    net['probs_reshape'] = ReshapeLayer(net['probs'], (laySize[0], laySize[1],
                                                       laySize[2], n_classes))

    net['probs_dimshuffle'] = DimshuffleLayer(net['probs_reshape'],
                                              (0, 3, 1, 2))

    # Apply temperature
    if load_weights:
        soft_value = net['upsample'].W.get_value() / temperature
        net['upsample'].W.set_value(soft_value)
        soft_value = net['upsample'].b.get_value() / temperature
        net['upsample'].b.set_value(soft_value)

    return [net[el] for el in layer]
예제 #24
0
    def cnn_fn(self, max_length):
        """Build the theano tensor

        Using the attributes of the class, build the theano graph
        using lasagne to get the neural networks which can
        then be passed into theano.function to get the values
        given input.

        :param max_len: (int) max length of the language we are using

        :return l_final: (tensor) theano tensor specifying the output of the cnn"""

        l_in = InputLayer((None, max_length, self.vocab_size))
        l_in_T = DimshuffleLayer(l_in, (0, 2, 1))
        l_causal_conv = DilatedConv1DLayer(
            l_in_T,
            num_filters=self.nn_residual_channels,
            dilation=1,
            nonlinearity=None)
        l_prev = l_causal_conv

        skip_layers = []

        for h in range(len(self.nn_dilations)):

            l_filter = DilatedConv1DLayer(
                l_prev,
                num_filters=self.nn_dilation_channels,
                dilation=self.nn_dilations[h],
                nonlinearity=tanh)

            l_gate = DilatedConv1DLayer(l_prev,
                                        num_filters=self.nn_dilation_channels,
                                        dilation=self.nn_dilations[h],
                                        nonlinearity=sigmoid)

            l_merge = ElemwiseMergeLayer([l_filter, l_gate],
                                         merge_function=T.mul)

            l_dense = Conv1DLayer(l_merge,
                                  num_filters=self.nn_residual_channels,
                                  filter_size=1,
                                  nonlinearity=None)

            l_residual = ElemwiseSumLayer([l_prev, l_dense])

            l_skip = Conv1DLayer(l_merge,
                                 num_filters=self.nn_residual_channels,
                                 filter_size=1,
                                 nonlinearity=None)

            skip_layers.append(l_skip)

            l_prev = l_residual

        l_skip_sum = NonlinearityLayer(ElemwiseSumLayer(skip_layers),
                                       nonlinearity=elu)

        l_final = DimshuffleLayer(l_skip_sum, (0, 2, 1))

        return l_final