示例#1
0
def test_merge_mask_2d():
    rand = lambda *shape: np.asarray(np.random.random(shape) > 0.5,
                                     dtype='int32')

    # inputs
    input_a = layers.Input(shape=(3, ))
    input_b = layers.Input(shape=(3, ))

    # masks
    masked_a = layers.Masking(mask_value=0)(input_a)
    masked_b = layers.Masking(mask_value=0)(input_b)

    # three different types of merging
    merged_sum = legacy_layers.merge([masked_a, masked_b], mode='sum')
    merged_concat = legacy_layers.merge([masked_a, masked_b],
                                        mode='concat',
                                        concat_axis=1)
    merged_concat_mixed = legacy_layers.merge([masked_a, input_b],
                                              mode='concat',
                                              concat_axis=1)

    # test sum
    model_sum = models.Model([input_a, input_b], [merged_sum])
    model_sum.compile(loss='mse', optimizer='sgd')
    model_sum.fit([rand(2, 3), rand(2, 3)], [rand(2, 3)], epochs=1)

    # test concatenation
    model_concat = models.Model([input_a, input_b], [merged_concat])
    model_concat.compile(loss='mse', optimizer='sgd')
    model_concat.fit([rand(2, 3), rand(2, 3)], [rand(2, 6)], epochs=1)

    # test concatenation with masked and non-masked inputs
    model_concat = models.Model([input_a, input_b], [merged_concat_mixed])
    model_concat.compile(loss='mse', optimizer='sgd')
    model_concat.fit([rand(2, 3), rand(2, 3)], [rand(2, 6)], epochs=1)
示例#2
0
def test_merge_mask_2d():
    rand = lambda *shape: np.asarray(np.random.random(shape) > 0.5, dtype='int32')

    # inputs
    input_a = layers.Input(shape=(3,))
    input_b = layers.Input(shape=(3,))

    # masks
    masked_a = layers.Masking(mask_value=0)(input_a)
    masked_b = layers.Masking(mask_value=0)(input_b)

    # three different types of merging
    merged_sum = legacy_layers.merge([masked_a, masked_b], mode='sum')
    merged_concat = legacy_layers.merge([masked_a, masked_b], mode='concat', concat_axis=1)
    merged_concat_mixed = legacy_layers.merge([masked_a, input_b], mode='concat', concat_axis=1)

    # test sum
    model_sum = models.Model([input_a, input_b], [merged_sum])
    model_sum.compile(loss='mse', optimizer='sgd')
    model_sum.fit([rand(2, 3), rand(2, 3)], [rand(2, 3)], epochs=1)

    # test concatenation
    model_concat = models.Model([input_a, input_b], [merged_concat])
    model_concat.compile(loss='mse', optimizer='sgd')
    model_concat.fit([rand(2, 3), rand(2, 3)], [rand(2, 6)], epochs=1)

    # test concatenation with masked and non-masked inputs
    model_concat = models.Model([input_a, input_b], [merged_concat_mixed])
    model_concat.compile(loss='mse', optimizer='sgd')
    model_concat.fit([rand(2, 3), rand(2, 3)], [rand(2, 6)], epochs=1)
示例#3
0
def covariance_block_residual(input_tensor,
                              nb_class,
                              stage,
                              block,
                              epsilon=0,
                              parametric=[],
                              denses=[],
                              wv=True,
                              wv_param=None,
                              activation='relu',
                              o2tconstraints=None,
                              vectorization='wv',
                              **kwargs):
    if epsilon > 0:
        cov_name_base = 'cov' + str(stage) + block + '_branch_epsilon' + str(
            epsilon)
    else:
        cov_name_base = 'cov' + str(stage) + block + ''
    o2t_name_base = 'o2t' + str(stage) + block
    dense_name_base = 'dense' + str(stage) + block + ''

    second_layer = SecondaryStatistic(name=cov_name_base,
                                      eps=epsilon,
                                      **kwargs)
    x = second_layer(input_tensor)

    cov_dim = second_layer.out_dim
    input_cov = x

    for id, param in enumerate(parametric):
        x = O2Transform(param, activation='relu',
                        name=o2t_name_base + str(id))(x)
        if not param == cov_dim:
            x = O2Transform(cov_dim,
                            activation='relu',
                            name=o2t_name_base + str(id) + 'r')(x)
        x = merge([x, input_cov],
                  mode='sum',
                  name='residualsum_{}_{}'.format(str(id), str(block)))

    if wv:
        if wv_param is None:
            wv_param = nb_class
        x = WeightedVectorization(wv_param,
                                  activation=activation,
                                  name='wv' + str(stage) + block)(x)
    else:
        x = Flatten()(x)
    for id, param in enumerate(denses):
        x = Dense(param, activation=activation,
                  name=dense_name_base + str(id))(x)

    x = Dense(nb_class, activation=activation, name=dense_name_base)(x)
    return x
示例#4
0
    def build(timeSteps, variables, classes, nlstms):
        #CONV=>POOL
        inputNet = Input(shape=(1, timeSteps,
                                variables))  #,batch_shape=(32,1, 7, 5))
        conv1 = Conv2D(20, (2, 5), padding="same")(inputNet)
        conv1 = Activation("relu")(conv1)
        conv1 = AveragePooling2D(pool_size=(2, 1), strides=(1, 1))(conv1)
        conv2 = Conv2D(nlstms, (3, 3), padding="same")(conv1)
        conv2 = AveragePooling2D(pool_size=(2, 1), strides=(1, 1))(conv2)
        conv2 = Activation("relu")(conv2)
        channels = Dropout(0.40)(conv2)
        lstmsVec = []
        for x in range(0, nlstms):
            filterImg = Lambda(lambda element: element[:, x, :, :])(channels)

            #lstm=Bidirectional(LSTM(50),merge_mode='concat')(filterImg)

            #lstm=LSTM(50,stateful=True)(filterImg) #batch_shape=(512, 7, 5)
            #denselayers=Dense(400)(lstm)
            #denselayers=Activation("relu")(denselayers)
            #denselayers=Dropout(0.5)(denselayers)
            #denselayers=Dense(150)(denselayers)
            #denselayers=Activation("relu")(denselayers)
            #denselayers=Dropout(0.8)(denselayers)
            #classificationLayer=Dense(classes)(lstm)
            #classificationLayer=Activation("softmax")(classificationLayer)
            #lstmsVec.append(classificationLayer)

            lstmsVec.append(filterImg)
        #base example for 2 lstms
        #x0 = Lambda(lambda x : x[:,0,:,:])(out1)
        #x1 = Lambda(lambda x : x[:,1,:,:])(out1)
        #lstm1=LSTM(40,input_shape=(5,5))(x0)
        #lstm2=LSTM(40,input_shape=(5,5))(x1)

        merged = merge(lstmsVec, mode='concat', concat_axis=2)

        # a softmax classifier
        #flat=Flatten()(merged)
        lstm = Bidirectional(LSTM(20, dropout=0.35),
                             merge_mode='concat')(merged)
        #lstm=LSTM(20,dropout=0.35,batch_input_shape=(4, 5, 50),stateful=True)(merged)
        classificationLayer = Dense(classes)(lstm)
        classificationLayer = Activation("softmax")(classificationLayer)

        model = Model(inputNet, classificationLayer)
        #model=Model(inputNet,lstmsVec)
        return model
示例#5
0
def get_f_strided(args, data_format, input_shape, x_shape):
    inpt = Input(input_shape)
    hidden = inpt
    hidden = Conv2D(args.n_kernels, (5, 5),
                    padding='same',
                    data_format=data_format,
                    kernel_initializer=args.initializer,
                    activation=args.activation)(hidden)
    hidden = Conv2D(args.n_kernels, (5, 5),
                    padding='same',
                    strides=(2, 2),
                    data_format=data_format,
                    kernel_initializer=args.initializer,
                    activation=args.activation)(hidden)
    hidden = Conv2D(args.n_kernels, (5, 5),
                    padding='same',
                    data_format=data_format,
                    kernel_initializer=args.initializer,
                    activation=args.activation)(hidden)
    hidden = Conv2DTranspose(args.n_kernels, (5, 5),
                             padding='same',
                             strides=(2, 2),
                             data_format=data_format,
                             kernel_initializer=args.initializer,
                             activation=args.activation)(hidden)
    hidden = Lambda(
        lambda x: x[:, :x_shape[0], :x_shape[1], :],
        # Cut off additional rows
        output_shape=x_shape[:2] + (args.n_kernels, ))(hidden)
    hidden = Conv2D(args.n_kernels, (5, 5),
                    padding='same',
                    data_format=data_format,
                    kernel_initializer=args.initializer,
                    activation=args.activation)(hidden)
    hidden = merge([inpt, hidden], mode='concat', concat_axis=-1)
    hidden = Conv2D(args.n_kernels, (1, 1),
                    padding='same',
                    data_format=data_format,
                    kernel_initializer=args.initializer,
                    activation=args.activation)(hidden)
    output = Conv2D(3, (1, 1),
                    padding='same',
                    data_format=data_format,
                    kernel_initializer=args.initializer,
                    activation='sigmoid')(hidden)
    f = Model(inpt, output, name='f')
    return f
示例#6
0
def get_f_simple(args, data_format, input_shape):
    # Same as D , but without the 1x1-Convolution before the merge
    inpt = Input(input_shape)
    hidden = inpt
    hidden = Conv2D(args.n_kernels, (5, 5),
                    padding='same',
                    data_format=data_format,
                    kernel_initializer=args.initializer,
                    activation=args.activation)(hidden)
    hidden = Conv2D(args.n_kernels, (5, 5),
                    padding='same',
                    data_format=data_format,
                    kernel_initializer=args.initializer,
                    activation=args.activation)(hidden)
    hidden = Conv2D(args.n_kernels, (5, 5),
                    padding='same',
                    data_format=data_format,
                    kernel_initializer=args.initializer,
                    activation=args.activation)(hidden)
    hidden = Conv2D(args.n_kernels, (7, 7),
                    padding='same',
                    dilation_rate=(1, 1),
                    data_format=data_format,
                    kernel_initializer=args.initializer,
                    activation=args.activation)(hidden)
    hidden = Conv2D(args.n_kernels, (5, 5),
                    padding='same',
                    data_format=data_format,
                    kernel_initializer=args.initializer,
                    activation=args.activation)(hidden)
    hidden = merge([inpt, hidden], mode='concat', concat_axis=-1)
    hidden = Conv2D(args.n_kernels, (1, 1),
                    padding='same',
                    data_format=data_format,
                    kernel_initializer=args.initializer,
                    activation=args.activation)(hidden)
    output = Conv2D(3, (1, 1),
                    padding='same',
                    data_format=data_format,
                    kernel_initializer=args.initializer,
                    activation='sigmoid')(hidden)
    f = Model(inpt, output, name='f')
    return f
示例#7
0
def get_f_dilated(args, data_format, input_shape):
    inpt = Input(input_shape)
    hidden = inpt
    hidden = Conv2D(args.n_kernels, (5, 5),
                    padding='same',
                    data_format=data_format,
                    kernel_initializer=args.initializer,
                    activation=args.activation)(hidden)
    hidden = Conv2D(args.n_kernels, (5, 5),
                    padding='same',
                    data_format=data_format,
                    kernel_initializer=args.initializer,
                    activation=args.activation)(hidden)
    hidden = Conv2D(args.n_kernels, (5, 5),
                    padding='same',
                    data_format=data_format,
                    kernel_initializer=args.initializer,
                    activation=args.activation)(hidden)
    hidden = Conv2D(args.n_kernels, (7, 7),
                    padding='same',
                    dilation_rate=(2, 2),
                    data_format=data_format,
                    kernel_initializer=args.initializer,
                    activation=args.activation)(hidden)
    hidden = Conv2D(args.n_kernels, (5, 5),
                    padding='same',
                    data_format=data_format,
                    kernel_initializer=args.initializer,
                    activation=args.activation)(hidden)
    hidden = merge([inpt, hidden], mode='concat', concat_axis=-1)
    hidden = Conv2D(args.n_kernels, (1, 1),
                    padding='same',
                    data_format=data_format,
                    kernel_initializer=args.initializer,
                    activation=args.activation)(hidden)
    output = Conv2D(3, (1, 1),
                    padding='same',
                    data_format=data_format,
                    kernel_initializer=args.initializer,
                    activation='sigmoid')(hidden)
    f = Model(inpt, output, name='f')
    return f
示例#8
0
def test_merge_mask_3d():
    rand = lambda *shape: np.asarray(np.random.random(shape) > 0.5, dtype='int32')

    # embeddings
    input_a = layers.Input(shape=(3,), dtype='int32')
    input_b = layers.Input(shape=(3,), dtype='int32')
    embedding = layers.Embedding(3, 4, mask_zero=True)
    embedding_a = embedding(input_a)
    embedding_b = embedding(input_b)

    # rnn
    rnn = layers.SimpleRNN(3, return_sequences=True)
    rnn_a = rnn(embedding_a)
    rnn_b = rnn(embedding_b)

    # concatenation
    merged_concat = legacy_layers.merge([rnn_a, rnn_b], mode='concat', concat_axis=-1)
    model = models.Model([input_a, input_b], [merged_concat])
    model.compile(loss='mse', optimizer='sgd')
    model.fit([rand(2, 3), rand(2, 3)], [rand(2, 3, 6)])
示例#9
0
def test_merge_mask_3d():
    rand = lambda *shape: np.asarray(np.random.random(shape) > 0.5,
                                     dtype='int32')

    # embeddings
    input_a = layers.Input(shape=(3, ), dtype='int32')
    input_b = layers.Input(shape=(3, ), dtype='int32')
    embedding = layers.Embedding(3, 4, mask_zero=True)
    embedding_a = embedding(input_a)
    embedding_b = embedding(input_b)

    # rnn
    rnn = layers.SimpleRNN(3, return_sequences=True)
    rnn_a = rnn(embedding_a)
    rnn_b = rnn(embedding_b)

    # concatenation
    merged_concat = legacy_layers.merge([rnn_a, rnn_b],
                                        mode='concat',
                                        concat_axis=-1)
    model = models.Model([input_a, input_b], [merged_concat])
    model.compile(loss='mse', optimizer='sgd')
    model.fit([rand(2, 3), rand(2, 3)], [rand(2, 3, 6)])
示例#10
0
def test_merge():
    # test modes: 'sum', 'mul', 'concat', 'ave', 'cos', 'dot'.
    input_shapes = [(3, 2), (3, 2)]
    inputs = [np.random.random(shape) for shape in input_shapes]

    # test functional API
    for mode in ['sum', 'mul', 'concat', 'ave', 'max']:
        print(mode)
        input_a = layers.Input(shape=input_shapes[0][1:])
        input_b = layers.Input(shape=input_shapes[1][1:])
        merged = legacy_layers.merge([input_a, input_b], mode=mode)
        model = models.Model([input_a, input_b], merged)
        model.compile('rmsprop', 'mse')

        expected_output_shape = model.compute_output_shape(input_shapes)
        actual_output_shape = model.predict(inputs).shape
        assert expected_output_shape == actual_output_shape

        config = model.get_config()
        model = models.Model.from_config(config)
        model.compile('rmsprop', 'mse')

        # test Merge (#2460)
        merged = legacy_layers.Merge(mode=mode)([input_a, input_b])
        model = models.Model([input_a, input_b], merged)
        model.compile('rmsprop', 'mse')

        expected_output_shape = model.compute_output_shape(input_shapes)
        actual_output_shape = model.predict(inputs).shape
        assert expected_output_shape == actual_output_shape

    # test lambda with output_shape lambda
    input_a = layers.Input(shape=input_shapes[0][1:])
    input_b = layers.Input(shape=input_shapes[1][1:])
    merged = legacy_layers.merge(
        [input_a, input_b],
        mode=lambda tup: K.concatenate([tup[0], tup[1]]),
        output_shape=lambda tup: tup[0][:-1] + (tup[0][-1] + tup[1][-1], ))
    model = models.Model([input_a, input_b], merged)
    expected_output_shape = model.compute_output_shape(input_shapes)
    actual_output_shape = model.predict(inputs).shape
    assert expected_output_shape == actual_output_shape

    config = model.get_config()
    model = models.Model.from_config(config)
    model.compile('rmsprop', 'mse')

    # test function with output_shape function
    def fn_mode(tup):
        x, y = tup
        return K.concatenate([x, y], axis=1)

    def fn_output_shape(tup):
        s1, s2 = tup
        return (s1[0], s1[1] + s2[1]) + s1[2:]

    input_a = layers.Input(shape=input_shapes[0][1:])
    input_b = layers.Input(shape=input_shapes[1][1:])
    merged = legacy_layers.merge([input_a, input_b],
                                 mode=fn_mode,
                                 output_shape=fn_output_shape)
    model = models.Model([input_a, input_b], merged)
    expected_output_shape = model.compute_output_shape(input_shapes)
    actual_output_shape = model.predict(inputs).shape
    assert expected_output_shape == actual_output_shape

    config = model.get_config()
    model = models.Model.from_config(config)
    model.compile('rmsprop', 'mse')

    # test function with output_mask function
    # time dimension is required for masking
    input_shapes = [(4, 3, 2), (4, 3, 2)]
    inputs = [np.random.random(shape) for shape in input_shapes]

    def fn_output_mask(tup):
        x_mask, y_mask = tup
        return K.concatenate([x_mask, y_mask])

    input_a = layers.Input(shape=input_shapes[0][1:])
    input_b = layers.Input(shape=input_shapes[1][1:])
    a = layers.Masking()(input_a)
    b = layers.Masking()(input_b)
    merged = legacy_layers.merge([a, b],
                                 mode=fn_mode,
                                 output_shape=fn_output_shape,
                                 output_mask=fn_output_mask)
    model = models.Model([input_a, input_b], merged)
    expected_output_shape = model.compute_output_shape(input_shapes)
    actual_output_shape = model.predict(inputs).shape
    assert expected_output_shape == actual_output_shape

    config = model.get_config()
    model = models.Model.from_config(config)
    model.compile('rmsprop', 'mse')

    mask_inputs = (np.zeros(input_shapes[0][:-1]),
                   np.ones(input_shapes[1][:-1]))
    expected_mask_output = np.concatenate(mask_inputs, axis=-1)
    mask_input_placeholders = [
        K.placeholder(shape=input_shape[:-1]) for input_shape in input_shapes
    ]
    mask_output = model.layers[-1]._output_mask(mask_input_placeholders)
    assert np.all(
        K.function(mask_input_placeholders, [mask_output])(mask_inputs)[0] ==
        expected_mask_output)

    # test lambda with output_mask lambda
    input_a = layers.Input(shape=input_shapes[0][1:])
    input_b = layers.Input(shape=input_shapes[1][1:])
    a = layers.Masking()(input_a)
    b = layers.Masking()(input_b)
    merged = legacy_layers.merge(
        [a, b],
        mode=lambda tup: K.concatenate([tup[0], tup[1]], axis=1),
        output_shape=lambda tup:
        (tup[0][0], tup[0][1] + tup[1][1]) + tup[0][2:],
        output_mask=lambda tup: K.concatenate([tup[0], tup[1]]))
    model = models.Model([input_a, input_b], merged)
    expected_output_shape = model.compute_output_shape(input_shapes)
    actual_output_shape = model.predict(inputs).shape
    assert expected_output_shape == actual_output_shape

    config = model.get_config()
    model = models.Model.from_config(config)
    model.compile('rmsprop', 'mse')

    mask_output = model.layers[-1]._output_mask(mask_input_placeholders)
    assert np.all(
        K.function(mask_input_placeholders, [mask_output])(mask_inputs)[0] ==
        expected_mask_output)

    # test with arguments
    input_shapes = [(3, 2), (3, 2)]
    inputs = [np.random.random(shape) for shape in input_shapes]

    def fn_mode(tup, a, b):
        x, y = tup
        return x * a + y * b

    input_a = layers.Input(shape=input_shapes[0][1:])
    input_b = layers.Input(shape=input_shapes[1][1:])
    merged = legacy_layers.merge([input_a, input_b],
                                 mode=fn_mode,
                                 output_shape=lambda s: s[0],
                                 arguments={
                                     'a': 0.7,
                                     'b': 0.3
                                 })
    model = models.Model([input_a, input_b], merged)
    output = model.predict(inputs)

    config = model.get_config()
    model = models.Model.from_config(config)

    assert np.all(model.predict(inputs) == output)
示例#11
0
def dcov_model_wrapper_v2(base_model,
                          parametrics=[],
                          mode=0,
                          nb_classes=1000,
                          basename='',
                          cov_mode='channel',
                          cov_branch='o2transform',
                          cov_branch_output=None,
                          freeze_conv=False,
                          cov_regularizer=None,
                          nb_branch=1,
                          concat='concat',
                          last_conv_feature_maps=[],
                          upsample_method='conv',
                          regroup=False,
                          **kwargs):
    """
    Wrapper for any base model, attach right after the last layer of given model

    Parameters
    ----------
    base_model
    parametrics
    mode
    nb_classes
    input_shape
    load_weights
    cov_mode
    cov_branch
    cov_branch_output
    cov_block_mode
    last_avg
    freeze_conv

    Returns
    -------

    """
    cov_branch_mode = cov_branch
    # Function name
    covariance_block = get_cov_block(cov_branch)

    if cov_branch_output is None:
        cov_branch_output = nb_classes

    x = base_model.output

    x = upsample_wrapper_v1(x,
                            last_conv_feature_maps,
                            upsample_method,
                            kernel=[1, 1])

    def split_keras_tensor_according_axis(x, nb_split, axis, axis_dim):
        outputs = []
        split_dim = axis_dim / nb_split
        split_loc = [split_dim * i for i in range(nb_split)]
        split_loc.append(-1)
        for i in range(nb_split):
            outputs.append(x[:, :, :, split_loc[i]:split_loc[i + 1]])
        return outputs

    cov_input = SeparateConvolutionFeatures(nb_branch)(x)
    if regroup:
        with tf.device('/gpu:0'):
            cov_input = Regrouping(None)(cov_input)
    cov_outputs = []
    for ind, x in enumerate(cov_input):
        if mode == 0:
            x = Flatten()(x)
            for ind, param in enumerate(parametrics):
                x = Dense(param, activation='relu', name='fc{}'.format(ind))(x)
            # x = Dense(nb_classes, activation='softmax')(x)

        if mode == 1:
            cov_branch = covariance_block(x,
                                          cov_branch_output,
                                          stage=5,
                                          block=str(ind),
                                          parametric=parametrics,
                                          cov_mode=cov_mode,
                                          cov_regularizer=cov_regularizer,
                                          **kwargs)
            x = cov_branch
            # x = Dense(nb_classes, activation='softmax', name='predictions')(cov_branch)

        elif mode == 2:
            cov_branch = covariance_block(x,
                                          cov_branch_output,
                                          stage=5,
                                          block=str(ind),
                                          parametric=parametrics,
                                          cov_regularizer=cov_regularizer,
                                          **kwargs)
            x = Flatten()(x)
            x = Dense(nb_classes, activation='relu', name='fc')(x)
            x = merge([x, cov_branch], mode='concat', name='concat')
            # x = Dense(nb_classes, activation='softmax', name='predictions')(x)
        elif mode == 3:
            cov_branch = covariance_block(x,
                                          cov_branch_output,
                                          stage=5,
                                          block=str(ind),
                                          parametric=parametrics,
                                          cov_mode=cov_mode,
                                          cov_regularizer=cov_regularizer,
                                          o2t_constraints='UnitNorm',
                                          **kwargs)
            x = cov_branch
        cov_outputs.append(x)

    if concat == 'concat':
        if cov_branch_mode == 'o2t_no_wv' or cov_branch_mode == 'corr_no_wv':
            x = MatrixConcat(cov_outputs,
                             name='Matrix_diag_concat')(cov_outputs)
            x = WeightedVectorization(cov_branch_output * nb_branch,
                                      name='WV_big')(x)
        else:
            x = merge(cov_outputs, mode='concat', name='merge')
    elif concat == 'sum':
        x = merge(cov_outputs, mode='sum', name='sum')
        if cov_branch_mode == 'o2t_no_wv':
            x = WeightedVectorization(cov_branch_output, name='wv_sum')(x)
    elif concat == 'ave':
        x = merge(cov_outputs, mode='ave', name='ave')
        if cov_branch_mode == 'o2t_no_wv':
            x = WeightedVectorization(cov_branch_output, name='wv_sum')(x)
    else:
        raise RuntimeError("concat mode not support : " + concat)

    if freeze_conv:
        toggle_trainable_layers(base_model, not freeze_conv)

    # x = Dense(cov_branch_output * nb_branch, activation='relu', name='Dense_b')(x)
    x = Dense(nb_classes, activation='softmax')(x)

    model = Model(base_model.input, x, name=basename)
    return model
示例#12
0
def dcov_model_wrapper_v1(base_model,
                          parametrics=[],
                          mode=0,
                          nb_classes=1000,
                          basename='',
                          cov_mode='channel',
                          cov_branch='o2transform',
                          cov_branch_output=None,
                          freeze_conv=False,
                          cov_regularizer=None,
                          nb_branch=1,
                          concat='concat',
                          last_conv_feature_maps=[],
                          upsample_method='conv',
                          regroup=False,
                          **kwargs):
    """
    Wrapper for any base model, attach right after the last layer of given model

    Parameters
    ----------
    base_model
    parametrics
    mode
    nb_classes
    input_shape
    load_weights
    cov_mode
    cov_branch
    cov_branch_output
    cov_block_mode
    last_avg
    freeze_conv

    Returns
    -------

    """

    # Function name
    covariance_block = get_cov_block(cov_branch)

    if cov_branch_output is None:
        cov_branch_output = nb_classes

    x = base_model.output

    x = upsample_wrapper_v1(x,
                            last_conv_feature_maps,
                            upsample_method,
                            kernel=[1, 1])

    cov_input = x
    if mode == 0:
        x = Flatten()(x)
        for ind, param in enumerate(parametrics):
            x = Dense(param, activation='relu', name='fc{}'.format(ind))(x)
        x = Dense(nb_classes, activation='softmax')(x)

    if mode == 1:
        if nb_branch == 1:
            cov_branch = covariance_block(cov_input,
                                          cov_branch_output,
                                          stage=5,
                                          block='a',
                                          parametric=parametrics,
                                          cov_mode=cov_mode,
                                          cov_regularizer=cov_regularizer,
                                          **kwargs)
            x = Dense(nb_classes, activation='softmax',
                      name='predictions')(cov_branch)
        elif nb_branch > 1:
            pass

    elif mode == 2:
        cov_branch = covariance_block(cov_input,
                                      cov_branch_output,
                                      stage=5,
                                      block='a',
                                      parametric=parametrics,
                                      cov_regularizer=cov_regularizer,
                                      **kwargs)
        x = Flatten()(x)
        x = Dense(nb_classes, activation='relu', name='fc')(x)
        x = merge([x, cov_branch], mode='concat', name='concat')
        x = Dense(nb_classes, activation='softmax', name='predictions')(x)
    elif mode == 3:
        if nb_branch == 1:

            cov_branch = covariance_block(cov_input,
                                          cov_branch_output,
                                          stage=5,
                                          block='a',
                                          parametric=parametrics,
                                          cov_mode=cov_mode,
                                          cov_regularizer=cov_regularizer,
                                          o2t_constraints='UnitNorm',
                                          **kwargs)
            x = Dense(nb_classes, activation='softmax',
                      name='predictions')(cov_branch)
        elif nb_branch > 1:
            pass

    if freeze_conv:
        toggle_trainable_layers(base_model, not freeze_conv)

    model = Model(base_model.input, x, name=basename)
    return model
示例#13
0
def covariance_block_sobn_multi_o2t(input_tensor,
                                    nb_class,
                                    stage,
                                    block,
                                    epsilon=0,
                                    parametric=[],
                                    activation='relu',
                                    cov_mode='channel',
                                    cov_regularizer=None,
                                    vectorization=None,
                                    o2t_constraints=None,
                                    nb_o2t=1,
                                    o2t_concat='concat',
                                    so_mode=2,
                                    **kwargs):
    if epsilon > 0:
        cov_name_base = 'cov' + str(stage) + block + '_branch_epsilon' + str(
            epsilon)
    else:
        cov_name_base = 'cov' + str(stage) + block + '_branch'
    o2t_name_base = 'o2t' + str(stage) + block + '_branch'
    dense_name_base = 'fc' + str(stage) + block + '_branch'
    wp_name_base = 'wp' + str(stage) + block + '_branch'

    x = SecondaryStatistic(name=cov_name_base,
                           eps=epsilon,
                           cov_mode=cov_mode,
                           cov_regularizer=cov_regularizer,
                           **kwargs)(input_tensor)

    # Try to implement multiple o2t layers out of the same x.
    cov_input = x
    cov_br = []
    for i in range(nb_o2t):
        x = cov_input
        for id, param in enumerate(parametric):
            x = SecondOrderBatchNormalization(so_mode=so_mode,
                                              momentum=0.8,
                                              axis=-1)(x)
            x = O2Transform(param,
                            activation='relu',
                            name=o2t_name_base + str(id) + '_' + str(i))(x)
        if vectorization == 'wv':
            x = WeightedVectorization(nb_class,
                                      activation=activation,
                                      name=wp_name_base + str(id) + '_' +
                                      str(i))(x)
        elif vectorization == 'dense':
            x = Flatten()(x)
            x = Dense(nb_class, activation=activation, name=dense_name_base)(x)
        elif vectorization == 'flatten':
            x = Flatten()(x)
        elif vectorization == 'mat_flatten':
            x = FlattenSymmetric()(x)
        elif vectorization is None:
            pass
        else:
            ValueError("vectorization parameter not recognized : {}".format(
                vectorization))

        cov_br.append(x)

    if o2t_concat == 'concat' and vectorization is None:
        # use matrix concat
        x = MatrixConcat(cov_br)(cov_br)
        x = WeightedVectorization(nb_class * nb_o2t / 2)(x)
    elif o2t_concat == 'concat':
        # use vector concat
        x = merge(cov_br, mode='concat')
    else:
        raise NotImplementedError

    return x
示例#14
0
def dcov_multi_out_model_wrapper(base_model,
                                 parametrics=[],
                                 mode=0,
                                 nb_classes=1000,
                                 basename='',
                                 cov_mode='channel',
                                 cov_branch='o2t_no_wv',
                                 cov_branch_output=None,
                                 freeze_conv=False,
                                 cov_regularizer=None,
                                 nb_branch=1,
                                 concat='concat',
                                 last_conv_feature_maps=[],
                                 upsample_method='conv',
                                 regroup=False,
                                 **kwargs):
    """
    Wrapper for any multi output base model, attach right after the last layer of given model

    Parameters
    ----------
    base_model
    parametrics
    mode
    nb_classes
    input_shape
    load_weights
    cov_mode
    cov_branch
    cov_branch_output
    cov_block_mode
    last_avg
    freeze_conv

    mode 1: 1x1 reduce dim

    Returns
    -------

    """
    cov_branch_mode = cov_branch
    # Function name
    covariance_block = get_cov_block(cov_branch)

    if cov_branch_output is None:
        cov_branch_output = nb_classes
    # 256, 512, 512
    block1, block2, block3 = outputs = base_model.outputs
    print("===================")
    cov_outputs = []
    if mode == 1:
        print("Model design : ResNet_o2_multi_branch 1x1 conv to reduce dim ")
        """ 1x1 conv to reduce dim """
        # Starting from block3
        block3 = upsample_wrapper_v1(block3, [1024, 512])
        block2 = upsample_wrapper_v1(block2, [512])
        block2 = MaxPooling2D()(block2)
        block1 = MaxPooling2D(pool_size=(4, 4))(block1)
        outputs = [block1, block2, block3]
        for ind, x in enumerate(outputs):
            cov_branch = covariance_block(x,
                                          cov_branch_output,
                                          stage=5,
                                          block=str(ind),
                                          parametric=parametrics,
                                          cov_mode=cov_mode,
                                          cov_regularizer=cov_regularizer,
                                          **kwargs)
            x = cov_branch
            cov_outputs.append(x)
    elif mode == 2 or mode == 3:
        """ Use branchs to reduce dim """
        block3 = SeparateConvolutionFeatures(4)(block3)
        block2 = SeparateConvolutionFeatures(2)(block2)
        block1 = MaxPooling2D()(block1)
        block1 = [block1]
        outputs = [block1, block2, block3]
        for ind, outs in enumerate(outputs):
            block_outs = []
            for ind2, x in enumerate(outs):
                cov_branch = covariance_block(x,
                                              cov_branch_output,
                                              stage=5,
                                              block=str(ind) + '_' + str(ind2),
                                              parametric=parametrics,
                                              cov_mode=cov_mode,
                                              cov_regularizer=cov_regularizer,
                                              **kwargs)
                x = cov_branch
                block_outs.append(x)
            if mode == 3:
                """ Sum block covariance output """
                if len(block_outs) > 1:
                    o = merge(block_outs,
                              mode='sum',
                              name='multibranch_sum_{}'.format(ind))
                    o = WeightedVectorization(cov_branch_output)(o)
                    cov_outputs.append(o)
                else:
                    a = block_outs[0]
                    if 'o2t' in a.name:
                        a = WeightedVectorization(cov_branch_output)(a)
                    cov_outputs.append(a)
            else:
                cov_outputs.extend(block_outs)
    elif mode == 4:
        """ Use the similar structure to Feature Pyramid Network """
        # supplimentary stream
        block1 = upsample_wrapper_v1(block1, [256], stage='block1')
        block2 = upsample_wrapper_v1(block2, [256], stage='block2')
        # main stream
        block3 = upsample_wrapper_v1(block3, [512], stage='block3')

        cov_input = SeparateConvolutionFeatures(nb_branch)(block3)
        cov_outputs = []
        for ind, x in enumerate(cov_input):

            cov_branch = covariance_block(x,
                                          cov_branch_output,
                                          stage=5,
                                          block=str(ind),
                                          parametric=parametrics,
                                          cov_mode=cov_mode,
                                          cov_regularizer=cov_regularizer,
                                          normalization=False,
                                          **kwargs)
            x = cov_branch
            cov_outputs.append(x)

        x = MatrixConcat(cov_outputs, name='Matrix_diag_concat')(cov_outputs)
        x = O2Transform(64, activation='relu', name='o2t_mainst_1')(x)

        block2 = SecondaryStatistic(name='cov_block2',
                                    cov_mode='pmean',
                                    robust=False,
                                    eps=1e-5)(block2)
        block2 = O2Transform(64, activation='relu', name='o2t_block2')(block2)

        # fuse = merge([block2, x], mode='sum')
        # fuse = O2Transform(64, activation='relu', name='o2t_mainst_2')(fuse)

        block1 = SecondaryStatistic(name='cov_block1',
                                    cov_mode='pmean',
                                    robust=False,
                                    eps=1e-5)(block1)
        block1 = O2Transform(64, activation='relu', name='o2t_block1')(block1)

        # fuse = merge([fuse, block1], mode='sum')

        x = MatrixConcat([x, block1, block2],
                         name='Matrix_diag_concat_all')([x, block1, block2])
        x = WeightedVectorization(128, activation='relu', name='wv_fuse')(x)

        # Merge the last matrix for matrix concat

    if freeze_conv:
        toggle_trainable_layers(base_model, not freeze_conv)

    x = Dense(nb_classes, activation='softmax')(x)

    model = Model(base_model.input, x, name=basename)
    return model
示例#15
0
input_img = Input(
    shape=(1, 28,
           28))  # adapt this if using `channels_first` image data format

x = Conv2D(5, (3, 3), activation='relu', padding='same')(input_img)
#x = MaxPooling2D((2, 2), padding='same')(x)
#x = Conv2D(8, (3, 3), activation='relu', padding='same')(x)
#x = MaxPooling2D((2, 2), padding='same')(x)
#x = Conv2D(8, (3, 3), activation='relu', padding='same')(x)
encoded = AveragePooling2D(pool_size=(2, 1), strides=(2, 1),
                           padding='valid')(x)
lstmsVec = []
for x in range(0, 5):
    filterImg = Lambda(lambda element: element[:, x, :, :])(encoded)
    lstmsVec.append(filterImg)
merged = merge(lstmsVec, mode='concat', concat_axis=2)

# at this point the representation is (4, 4, 8) i.e. 128-dimensional

x = Conv2D(5, (3, 3), activation='relu', padding='same')(encoded)
#x = UpSampling2D((2, 2))(x)
#x = Conv2D(8, (3, 3), activation='relu', padding='same')(x)
#x = UpSampling2D((2, 2))(x)
#x = Conv2D(16, (3, 3), activation='relu')(x)
x = UpSampling2D((2, 1))(x)
decoded = Conv2D(1, (3, 3), activation='sigmoid', padding='same')(x)

encoder = Model(input_img, encoded)

encoderHstack = Model(input_img, merged)
示例#16
0
def test_merge():
    # test modes: 'sum', 'mul', 'concat', 'ave', 'cos', 'dot'.
    input_shapes = [(3, 2), (3, 2)]
    inputs = [np.random.random(shape) for shape in input_shapes]

    # test functional API
    for mode in ['sum', 'mul', 'concat', 'ave', 'max']:
        print(mode)
        input_a = layers.Input(shape=input_shapes[0][1:])
        input_b = layers.Input(shape=input_shapes[1][1:])
        merged = legacy_layers.merge([input_a, input_b], mode=mode)
        model = models.Model([input_a, input_b], merged)
        model.compile('rmsprop', 'mse')

        expected_output_shape = model.compute_output_shape(input_shapes)
        actual_output_shape = model.predict(inputs).shape
        assert expected_output_shape == actual_output_shape

        config = model.get_config()
        model = models.Model.from_config(config)
        model.compile('rmsprop', 'mse')

        # test Merge (#2460)
        merged = legacy_layers.Merge(mode=mode)([input_a, input_b])
        model = models.Model([input_a, input_b], merged)
        model.compile('rmsprop', 'mse')

        expected_output_shape = model.compute_output_shape(input_shapes)
        actual_output_shape = model.predict(inputs).shape
        assert expected_output_shape == actual_output_shape

    # test lambda with output_shape lambda
    input_a = layers.Input(shape=input_shapes[0][1:])
    input_b = layers.Input(shape=input_shapes[1][1:])
    merged = legacy_layers.merge(
        [input_a, input_b],
        mode=lambda tup: K.concatenate([tup[0], tup[1]]),
        output_shape=lambda tup: tup[0][:-1] + (tup[0][-1] + tup[1][-1],))
    model = models.Model([input_a, input_b], merged)
    expected_output_shape = model.compute_output_shape(input_shapes)
    actual_output_shape = model.predict(inputs).shape
    assert expected_output_shape == actual_output_shape

    config = model.get_config()
    model = models.Model.from_config(config)
    model.compile('rmsprop', 'mse')

    # test function with output_shape function
    def fn_mode(tup):
        x, y = tup
        return K.concatenate([x, y], axis=1)

    def fn_output_shape(tup):
        s1, s2 = tup
        return (s1[0], s1[1] + s2[1]) + s1[2:]

    input_a = layers.Input(shape=input_shapes[0][1:])
    input_b = layers.Input(shape=input_shapes[1][1:])
    merged = legacy_layers.merge([input_a, input_b],
                                 mode=fn_mode,
                                 output_shape=fn_output_shape)
    model = models.Model([input_a, input_b], merged)
    expected_output_shape = model.compute_output_shape(input_shapes)
    actual_output_shape = model.predict(inputs).shape
    assert expected_output_shape == actual_output_shape

    config = model.get_config()
    model = models.Model.from_config(config)
    model.compile('rmsprop', 'mse')

    # test function with output_mask function
    # time dimension is required for masking
    input_shapes = [(4, 3, 2), (4, 3, 2)]
    inputs = [np.random.random(shape) for shape in input_shapes]

    def fn_output_mask(tup):
        x_mask, y_mask = tup
        return K.concatenate([x_mask, y_mask])

    input_a = layers.Input(shape=input_shapes[0][1:])
    input_b = layers.Input(shape=input_shapes[1][1:])
    a = layers.Masking()(input_a)
    b = layers.Masking()(input_b)
    merged = legacy_layers.merge([a, b], mode=fn_mode, output_shape=fn_output_shape, output_mask=fn_output_mask)
    model = models.Model([input_a, input_b], merged)
    expected_output_shape = model.compute_output_shape(input_shapes)
    actual_output_shape = model.predict(inputs).shape
    assert expected_output_shape == actual_output_shape

    config = model.get_config()
    model = models.Model.from_config(config)
    model.compile('rmsprop', 'mse')

    mask_inputs = (np.zeros(input_shapes[0][:-1]), np.ones(input_shapes[1][:-1]))
    expected_mask_output = np.concatenate(mask_inputs, axis=-1)
    mask_input_placeholders = [K.placeholder(shape=input_shape[:-1]) for input_shape in input_shapes]
    mask_output = model.layers[-1]._output_mask(mask_input_placeholders)
    assert np.all(K.function(mask_input_placeholders, [mask_output])(mask_inputs)[0] == expected_mask_output)

    # test lambda with output_mask lambda
    input_a = layers.Input(shape=input_shapes[0][1:])
    input_b = layers.Input(shape=input_shapes[1][1:])
    a = layers.Masking()(input_a)
    b = layers.Masking()(input_b)
    merged = legacy_layers.merge(
        [a, b], mode=lambda tup: K.concatenate([tup[0], tup[1]], axis=1),
        output_shape=lambda tup: (tup[0][0], tup[0][1] + tup[1][1]) + tup[0][2:],
        output_mask=lambda tup: K.concatenate([tup[0], tup[1]]))
    model = models.Model([input_a, input_b], merged)
    expected_output_shape = model.compute_output_shape(input_shapes)
    actual_output_shape = model.predict(inputs).shape
    assert expected_output_shape == actual_output_shape

    config = model.get_config()
    model = models.Model.from_config(config)
    model.compile('rmsprop', 'mse')

    mask_output = model.layers[-1]._output_mask(mask_input_placeholders)
    assert np.all(K.function(mask_input_placeholders, [mask_output])(mask_inputs)[0] == expected_mask_output)

    # test with arguments
    input_shapes = [(3, 2), (3, 2)]
    inputs = [np.random.random(shape) for shape in input_shapes]

    def fn_mode(tup, a, b):
        x, y = tup
        return x * a + y * b

    input_a = layers.Input(shape=input_shapes[0][1:])
    input_b = layers.Input(shape=input_shapes[1][1:])
    merged = legacy_layers.merge([input_a, input_b], mode=fn_mode, output_shape=lambda s: s[0], arguments={'a': 0.7, 'b': 0.3})
    model = models.Model([input_a, input_b], merged)
    output = model.predict(inputs)

    config = model.get_config()
    model = models.Model.from_config(config)

    assert np.all(model.predict(inputs) == output)