Пример #1
0
def simple_second_model(input_tensor):
    x = slim.conv2d(
        input_tensor,
        32, [3, 3],
        scope='conv1',
        biases_initializer=tf.constant_initializer(0),
        weights_initializer=tf.truncated_normal_initializer(stddev=0.05))

    x = slim.conv2d(x, 32, [32, 32], scope='conv2')
    x = slim.max_pool2d(x, [2, 2], scope='pool1')
    x = slim.dropout(x, keep_prob=0.75, scope='dropout')

    x = slim.conv2d(x, 64, [3, 3], scope='conv3')
    x = slim.conv2d(x, 64, [3, 3], scope='conv4')
    x = slim.max_pool2d(x, [2, 2], scope='pool2')
    x = slim.dropout(x, keep_prob=0.75, scope='dropout2')
    with tf.name_scope('SecondStatistics'):
        x = SecondaryStatistic(activation='relu', dim_ordering='tf')(x)
    with tf.name_scope('O2T_1'):
        x = O2Transform(50, activation='relu')(x)
    with tf.name_scope("Matrix_RELU_1"):
        x = MatrixReLU(1e-4)(x)
    with tf.name_scope('O2T_2'):
        x = O2Transform(50, activation='relu')(x)
    with tf.name_scope("Matrix_RELU_2"):
        x = MatrixReLU(1e-4)(x)
    with tf.name_scope("LogTransform"):
        x = LogTransform(0.001)(x)
    # with tf.name_scope("WeightedVectorization"):
    #     x = WeightedVectorization(10, activation='relu')(x)
    x = slim.flatten(x, scope='flatten')
    x = slim.fully_connected(x, 10, scope='predictions')
    # x = slim.fully_connected(x, 10, activation_fn=nn.softmax, scope='predictions')
    return x
Пример #2
0
def covariance_block_residual(input_tensor,
                              nb_class,
                              stage,
                              block,
                              epsilon=0,
                              parametric=[],
                              denses=[],
                              wv=True,
                              wv_param=None,
                              activation='relu',
                              o2tconstraints=None,
                              vectorization='wv',
                              **kwargs):
    if epsilon > 0:
        cov_name_base = 'cov' + str(stage) + block + '_branch_epsilon' + str(
            epsilon)
    else:
        cov_name_base = 'cov' + str(stage) + block + ''
    o2t_name_base = 'o2t' + str(stage) + block
    dense_name_base = 'dense' + str(stage) + block + ''

    second_layer = SecondaryStatistic(name=cov_name_base,
                                      eps=epsilon,
                                      **kwargs)
    x = second_layer(input_tensor)

    cov_dim = second_layer.out_dim
    input_cov = x

    for id, param in enumerate(parametric):
        x = O2Transform(param, activation='relu',
                        name=o2t_name_base + str(id))(x)
        if not param == cov_dim:
            x = O2Transform(cov_dim,
                            activation='relu',
                            name=o2t_name_base + str(id) + 'r')(x)
        x = merge([x, input_cov],
                  mode='sum',
                  name='residualsum_{}_{}'.format(str(id), str(block)))

    if wv:
        if wv_param is None:
            wv_param = nb_class
        x = WeightedVectorization(wv_param,
                                  activation=activation,
                                  name='wv' + str(stage) + block)(x)
    else:
        x = Flatten()(x)
    for id, param in enumerate(denses):
        x = Dense(param, activation=activation,
                  name=dense_name_base + str(id))(x)

    x = Dense(nb_class, activation=activation, name=dense_name_base)(x)
    return x
Пример #3
0
def simple_log_model(input_tensor):
    x = slim.conv2d(
        input_tensor,
        8, [3, 3],
        scope='conv1',
        biases_initializer=tf.constant_initializer(0),
        weights_initializer=tf.truncated_normal_initializer(stddev=0.05))

    x = slim.conv2d(x, 8, [32, 32], scope='conv2')
    x = slim.max_pool2d(x, [2, 2], scope='pool1')
    # x = slim.dropout(x, keep_prob=0.75, scope='dropout')

    x = slim.conv2d(x, 16, [3, 3], scope='conv3')
    x = slim.conv2d(x, 16, [3, 3], scope='conv4')
    x = slim.max_pool2d(x, [2, 2], scope='pool2')
    # x = slim.dropout(x, keep_prob=0.75, scope='dropout2')
    with tf.name_scope('SecondStatistics'):
        x = SecondaryStatistic(activation='relu', dim_ordering='tf')(x)
    x = O2Transform(16, activation='relu')(x)
    x = O2Transform(16, activation='relu')(x)

    # Register the gradient for SVD
    # from kyu.tensorflow.ops.svd_gradients import gradient_eig_for_log
    # do the log transform here.
    # s, u = tf.self_adjoint_eig(x)
    # inner = s + 1e-4
    # inner = tf.log(inner)
    # inner = tf.where(tf.is_nan(inner), tf.zeros_like(inner), inner)
    # inner = tf.matrix_diag(inner)
    # x = tf.matmul(u, tf.matmul(inner, tf.transpose(u, [0, 2, 1])))

    with tf.name_scope("LogTransform"):
        x = LogTransform(0.001)(x)
    # with tf.name_scope("WeightedVectorization"):
    #     x = WeightedVectorization(16, activation='relu')(x)
    x = slim.flatten(x, scope='flatten')
    x = slim.fully_connected(x, 10, scope='predictions')
    # x = slim.fully_connected(x, 10, activation_fn=nn.softmax, scope='predictions')
    return x
Пример #4
0
def covariance_block_batch(input_tensor,
                           nb_class,
                           stage,
                           block,
                           epsilon=0,
                           parametric=[],
                           activation='relu',
                           cov_mode='pmean',
                           cov_regularizer=None,
                           vectorization='wv',
                           o2tconstraints=None,
                           **kwargs):
    if epsilon > 0:
        cov_name_base = 'cov' + str(stage) + block + '_branch_epsilon' + str(
            epsilon)
    else:
        cov_name_base = 'cov' + str(stage) + block + '_branch'
    o2t_name_base = 'o2t' + str(stage) + block + '_branch'
    pow_name_base = 'pow' + str(stage) + block + '_branch'
    dense_name_base = 'fc' + str(stage) + block + '_branch'
    wp_name_base = 'wp' + str(stage) + block + '_branch'

    x = SecondaryStatistic(name=cov_name_base,
                           eps=epsilon,
                           cov_mode=cov_mode,
                           cov_regularizer=cov_regularizer,
                           **kwargs)(input_tensor)

    # Try the power transform before and after.

    for id, param in enumerate(parametric):
        x = O2Transform(param, activation='relu',
                        name=o2t_name_base + str(id))(x)
        x = ExpandDims()(x)
        x = BatchNormalization_v2(axis=-1)(x)
        x = Squeeze()(x)

    if vectorization == 'wv':
        x = WeightedVectorization(nb_class,
                                  activation=activation,
                                  name=wp_name_base)(x)
    elif vectorization == 'dense':
        x = Flatten()(x)
        x = Dense(nb_class, activation=activation, name=dense_name_base)(x)
    elif vectorization == 'flatten':
        x = Flatten()(x)
    else:
        ValueError("vectorization parameter not recognized : {}".format(
            vectorization))
    return x
Пример #5
0
def covariance_block_pow(input_tensor,
                         nb_class,
                         stage,
                         block,
                         epsilon=0,
                         parametric=[],
                         activation='relu',
                         cov_mode='channel',
                         cov_regularizer=None,
                         vectorization='mat_flatten',
                         o2tconstraints=None,
                         cov_beta=0.3,
                         **kwargs):
    # if epsilon > 0:
    #     cov_name_base = 'cov' + str(stage) + block + '_branch_epsilon' + str(epsilon)
    # else:
    #     cov_name_base = 'cov' + str(stage) + block + '_branch'
    cov_name_base = get_cov_name_base(stage, block)
    o2t_name_base = 'o2t' + str(stage) + block + '_branch'
    pow_name_base = 'pow' + str(stage) + block + '_branch'
    dense_name_base = 'fc' + str(stage) + block + '_branch'
    wp_name_base = 'wp' + str(stage) + block + '_branch'

    x = SecondaryStatistic(name=cov_name_base,
                           eps=epsilon,
                           cov_beta=cov_beta,
                           cov_mode=cov_mode,
                           cov_regularizer=cov_regularizer)(input_tensor)

    # Try the power transform before and after.
    x = PowTransform(alpha=0.5, name=pow_name_base, normalization=None)(x)
    for id, param in enumerate(parametric):
        x = O2Transform(param, activation='relu',
                        name=o2t_name_base + str(id))(x)

    if vectorization == 'wv':
        x = WeightedVectorization(nb_class,
                                  activation=activation,
                                  name=wp_name_base)(x)
    elif vectorization == 'dense':
        x = Flatten()(x)
        x = Dense(nb_class, activation=activation, name=dense_name_base)(x)
    elif vectorization == 'flatten':
        x = Flatten()(x)
    elif vectorization == 'mat_flatten':
        x = FlattenSymmetric()(x)
    else:
        ValueError("vectorization parameter not recognized : {}".format(
            vectorization))
    return x
Пример #6
0
def covariance_block_aaai(input_tensor,
                          nb_class,
                          stage,
                          block,
                          epsilon=0,
                          parametric=[],
                          activation='relu',
                          cov_mode='channel',
                          cov_regularizer=None,
                          vectorization='wv',
                          **kwargs):
    if epsilon > 0:
        cov_name_base = 'cov' + str(stage) + block + '_branch_epsilon' + str(
            epsilon)
    else:
        cov_name_base = 'cov' + str(stage) + block + '_branch'
    o2t_name_base = 'o2t' + str(stage) + block + '_branch'
    relu_name_base = 'matrelu' + str(stage) + block + '_branch'
    log_name_base = 'log' + str(stage) + block + '_branch'
    dense_name_base = 'fc' + str(stage) + block + '_branch'
    wp_name_base = 'wp' + str(stage) + block + '_branch'

    x = SecondaryStatistic(name=cov_name_base,
                           eps=epsilon,
                           cov_mode=cov_mode,
                           cov_regularizer=cov_regularizer,
                           **kwargs)(input_tensor)
    for id, param in enumerate(parametric):
        x = O2Transform(param, activation='relu',
                        name=o2t_name_base + str(id))(x)
        x = MatrixReLU(epsilon=epsilon, name=relu_name_base + str(id))(x)
    # add log layer here.
    x = LogTransform(epsilon, name=log_name_base)(x)
    if vectorization == 'wv':
        x = WeightedVectorization(nb_class,
                                  activation=activation,
                                  name=wp_name_base)(x)
    elif vectorization == 'dense':
        x = Flatten()(x)
        x = Dense(nb_class, activation=activation, name=dense_name_base)(x)
    elif vectorization == 'flatten':
        x = Flatten()(x)
    else:
        ValueError("vectorization parameter not recognized : {}".format(
            vectorization))
    return x
Пример #7
0
def covariance_block_corr(input_tensor,
                          nb_class,
                          stage,
                          block,
                          epsilon=0,
                          parametric=[],
                          activation='relu',
                          cov_mode='channel',
                          cov_regularizer=None,
                          vectorization='wv',
                          o2t_constraints=None,
                          normalization=False,
                          so_mode=1,
                          **kwargs):
    if epsilon > 0:
        cov_name_base = 'cov' + str(stage) + block + '_branch_epsilon' + str(
            epsilon)
    else:
        cov_name_base = 'cov' + str(stage) + block + '_branch'
    o2t_name_base = 'o2t' + str(stage) + block + '_branch'
    wp_name_base = 'pv' + str(stage) + block + '_branch'
    with tf.name_scope(cov_name_base):
        x = SecondaryStatistic(name=cov_name_base,
                               eps=epsilon,
                               cov_mode=cov_mode,
                               cov_regularizer=cov_regularizer,
                               **kwargs)(input_tensor)
        x = Correlation()(x)
    for id, param in enumerate(parametric):
        with tf.name_scope(o2t_name_base + str(id)):
            if normalization:
                x = SecondOrderBatchNormalization(so_mode=so_mode,
                                                  momentum=0.8,
                                                  axis=-1)(x)
            x = O2Transform(param,
                            activation='relu',
                            name=o2t_name_base + str(id),
                            kernel_constraint=o2t_constraints)(x)
    with tf.name_scope(wp_name_base):
        x = WeightedVectorization(nb_class,
                                  activation=activation,
                                  name=wp_name_base)(x)
    return x
Пример #8
0
def covariance_block_original(input_tensor,
                              nb_class,
                              stage,
                              block,
                              epsilon=0,
                              parametric=[],
                              activation='relu',
                              cov_mode='channel',
                              cov_regularizer=None,
                              o2t_constraints=None,
                              o2t_regularizer=None,
                              o2t_activation='relu',
                              use_bias=False,
                              robust=False,
                              cov_alpha=0.1,
                              cov_beta=0.3,
                              **kwargs):
    cov_name_base = get_cov_name_base(stage, block)
    o2t_name_base = 'o2t' + str(stage) + '_branch' + block
    wp_name_base = 'wp' + str(stage) + '_branch' + block
    with tf.name_scope(cov_name_base):
        x = SecondaryStatistic(name=cov_name_base,
                               eps=epsilon,
                               cov_mode=cov_mode,
                               cov_regularizer=cov_regularizer,
                               robust=robust,
                               cov_alpha=cov_alpha,
                               cov_beta=cov_beta)(input_tensor)
    for id, param in enumerate(parametric):
        with tf.name_scope(o2t_name_base + str(id)):
            x = O2Transform(
                param,
                activation=o2t_activation,
                name=o2t_name_base + str(id),
                kernel_constraint=o2t_constraints,
                kernel_regularizer=o2t_regularizer,
            )(x)
    with tf.name_scope(wp_name_base):
        x = WeightedVectorization(nb_class,
                                  use_bias=use_bias,
                                  activation=activation,
                                  name=wp_name_base)(x)
    return x
Пример #9
0
def covariance_block_matbp(input_tensor,
                           nb_class,
                           stage,
                           block,
                           epsilon=0,
                           parametric=[],
                           activation='relu',
                           cov_mode='channel',
                           cov_regularizer=None,
                           vectorization='dense',
                           o2tconstraints=None,
                           **kwargs):
    if epsilon > 0:
        cov_name_base = 'cov' + str(stage) + block + '_branch_epsilon' + str(
            epsilon)
    else:
        cov_name_base = 'cov' + str(stage) + block + '_branch'
    o2t_name_base = 'o2t' + str(stage) + block + '_branch'
    log_name_base = 'log' + str(stage) + block + '_branch'
    dense_name_base = 'fc' + str(stage) + block + '_branch'

    x = SecondaryStatistic(name=cov_name_base,
                           eps=epsilon,
                           normalization='mean',
                           cov_mode=cov_mode,
                           cov_regularizer=cov_regularizer,
                           **kwargs)(input_tensor)
    # x = BiLinear()(input_tensor)
    for id, param in enumerate(parametric):
        x = O2Transform(param, activation='relu',
                        name=o2t_name_base + str(id))(x)

    # add log layer here.
    x = LogTransform(epsilon, name=log_name_base)(x)
    x = Flatten()(x)
    # if vectorization == 'dense':
    #     x = Dense(nb_class, activation=activation, name=dense_name_base)(x)
    # else:
    #     ValueError("vectorization parameter not recognized : {}".format(vectorization))
    return x
Пример #10
0
def covariance_block_mix(input_tensor,
                         nb_class,
                         stage,
                         block,
                         epsilon=0,
                         parametric=[],
                         denses=[],
                         wv=True,
                         wv_param=None,
                         activation='relu',
                         **kwargs):
    if epsilon > 0:
        cov_name_base = 'cov' + str(stage) + block + '_branch_epsilon' + str(
            epsilon)
    else:
        cov_name_base = 'cov' + str(stage) + block + ''
    o2t_name_base = 'o2t' + str(stage) + block
    dense_name_base = 'dense' + str(stage) + block + ''

    x = SecondaryStatistic(name=cov_name_base, eps=epsilon,
                           **kwargs)(input_tensor)
    for id, param in enumerate(parametric):
        x = O2Transform(param, activation='relu',
                        name=o2t_name_base + str(id))(x)
    if wv:
        if wv_param is None:
            wv_param = nb_class
        x = WeightedVectorization(wv_param,
                                  activation=activation,
                                  name='wv' + str(stage) + block)(x)
    else:
        x = Flatten()(x)
    for id, param in enumerate(denses):
        x = Dense(param, activation=activation,
                  name=dense_name_base + str(id))(x)
    x = Dense(nb_class, activation=activation, name=dense_name_base)(x)
    return x
Пример #11
0
def covariance_block_sobn_multi_o2t(input_tensor,
                                    nb_class,
                                    stage,
                                    block,
                                    epsilon=0,
                                    parametric=[],
                                    activation='relu',
                                    cov_mode='channel',
                                    cov_regularizer=None,
                                    vectorization=None,
                                    o2t_constraints=None,
                                    nb_o2t=1,
                                    o2t_concat='concat',
                                    so_mode=2,
                                    **kwargs):
    if epsilon > 0:
        cov_name_base = 'cov' + str(stage) + block + '_branch_epsilon' + str(
            epsilon)
    else:
        cov_name_base = 'cov' + str(stage) + block + '_branch'
    o2t_name_base = 'o2t' + str(stage) + block + '_branch'
    dense_name_base = 'fc' + str(stage) + block + '_branch'
    wp_name_base = 'wp' + str(stage) + block + '_branch'

    x = SecondaryStatistic(name=cov_name_base,
                           eps=epsilon,
                           cov_mode=cov_mode,
                           cov_regularizer=cov_regularizer,
                           **kwargs)(input_tensor)

    # Try to implement multiple o2t layers out of the same x.
    cov_input = x
    cov_br = []
    for i in range(nb_o2t):
        x = cov_input
        for id, param in enumerate(parametric):
            x = SecondOrderBatchNormalization(so_mode=so_mode,
                                              momentum=0.8,
                                              axis=-1)(x)
            x = O2Transform(param,
                            activation='relu',
                            name=o2t_name_base + str(id) + '_' + str(i))(x)
        if vectorization == 'wv':
            x = WeightedVectorization(nb_class,
                                      activation=activation,
                                      name=wp_name_base + str(id) + '_' +
                                      str(i))(x)
        elif vectorization == 'dense':
            x = Flatten()(x)
            x = Dense(nb_class, activation=activation, name=dense_name_base)(x)
        elif vectorization == 'flatten':
            x = Flatten()(x)
        elif vectorization == 'mat_flatten':
            x = FlattenSymmetric()(x)
        elif vectorization is None:
            pass
        else:
            ValueError("vectorization parameter not recognized : {}".format(
                vectorization))

        cov_br.append(x)

    if o2t_concat == 'concat' and vectorization is None:
        # use matrix concat
        x = MatrixConcat(cov_br)(cov_br)
        x = WeightedVectorization(nb_class * nb_o2t / 2)(x)
    elif o2t_concat == 'concat':
        # use vector concat
        x = merge(cov_br, mode='concat')
    else:
        raise NotImplementedError

    return x
Пример #12
0
def dcov_multi_out_model_wrapper(base_model,
                                 parametrics=[],
                                 mode=0,
                                 nb_classes=1000,
                                 basename='',
                                 cov_mode='channel',
                                 cov_branch='o2t_no_wv',
                                 cov_branch_output=None,
                                 freeze_conv=False,
                                 cov_regularizer=None,
                                 nb_branch=1,
                                 concat='concat',
                                 last_conv_feature_maps=[],
                                 upsample_method='conv',
                                 regroup=False,
                                 **kwargs):
    """
    Wrapper for any multi output base model, attach right after the last layer of given model

    Parameters
    ----------
    base_model
    parametrics
    mode
    nb_classes
    input_shape
    load_weights
    cov_mode
    cov_branch
    cov_branch_output
    cov_block_mode
    last_avg
    freeze_conv

    mode 1: 1x1 reduce dim

    Returns
    -------

    """
    cov_branch_mode = cov_branch
    # Function name
    covariance_block = get_cov_block(cov_branch)

    if cov_branch_output is None:
        cov_branch_output = nb_classes
    # 256, 512, 512
    block1, block2, block3 = outputs = base_model.outputs
    print("===================")
    cov_outputs = []
    if mode == 1:
        print("Model design : ResNet_o2_multi_branch 1x1 conv to reduce dim ")
        """ 1x1 conv to reduce dim """
        # Starting from block3
        block3 = upsample_wrapper_v1(block3, [1024, 512])
        block2 = upsample_wrapper_v1(block2, [512])
        block2 = MaxPooling2D()(block2)
        block1 = MaxPooling2D(pool_size=(4, 4))(block1)
        outputs = [block1, block2, block3]
        for ind, x in enumerate(outputs):
            cov_branch = covariance_block(x,
                                          cov_branch_output,
                                          stage=5,
                                          block=str(ind),
                                          parametric=parametrics,
                                          cov_mode=cov_mode,
                                          cov_regularizer=cov_regularizer,
                                          **kwargs)
            x = cov_branch
            cov_outputs.append(x)
    elif mode == 2 or mode == 3:
        """ Use branchs to reduce dim """
        block3 = SeparateConvolutionFeatures(4)(block3)
        block2 = SeparateConvolutionFeatures(2)(block2)
        block1 = MaxPooling2D()(block1)
        block1 = [block1]
        outputs = [block1, block2, block3]
        for ind, outs in enumerate(outputs):
            block_outs = []
            for ind2, x in enumerate(outs):
                cov_branch = covariance_block(x,
                                              cov_branch_output,
                                              stage=5,
                                              block=str(ind) + '_' + str(ind2),
                                              parametric=parametrics,
                                              cov_mode=cov_mode,
                                              cov_regularizer=cov_regularizer,
                                              **kwargs)
                x = cov_branch
                block_outs.append(x)
            if mode == 3:
                """ Sum block covariance output """
                if len(block_outs) > 1:
                    o = merge(block_outs,
                              mode='sum',
                              name='multibranch_sum_{}'.format(ind))
                    o = WeightedVectorization(cov_branch_output)(o)
                    cov_outputs.append(o)
                else:
                    a = block_outs[0]
                    if 'o2t' in a.name:
                        a = WeightedVectorization(cov_branch_output)(a)
                    cov_outputs.append(a)
            else:
                cov_outputs.extend(block_outs)
    elif mode == 4:
        """ Use the similar structure to Feature Pyramid Network """
        # supplimentary stream
        block1 = upsample_wrapper_v1(block1, [256], stage='block1')
        block2 = upsample_wrapper_v1(block2, [256], stage='block2')
        # main stream
        block3 = upsample_wrapper_v1(block3, [512], stage='block3')

        cov_input = SeparateConvolutionFeatures(nb_branch)(block3)
        cov_outputs = []
        for ind, x in enumerate(cov_input):

            cov_branch = covariance_block(x,
                                          cov_branch_output,
                                          stage=5,
                                          block=str(ind),
                                          parametric=parametrics,
                                          cov_mode=cov_mode,
                                          cov_regularizer=cov_regularizer,
                                          normalization=False,
                                          **kwargs)
            x = cov_branch
            cov_outputs.append(x)

        x = MatrixConcat(cov_outputs, name='Matrix_diag_concat')(cov_outputs)
        x = O2Transform(64, activation='relu', name='o2t_mainst_1')(x)

        block2 = SecondaryStatistic(name='cov_block2',
                                    cov_mode='pmean',
                                    robust=False,
                                    eps=1e-5)(block2)
        block2 = O2Transform(64, activation='relu', name='o2t_block2')(block2)

        # fuse = merge([block2, x], mode='sum')
        # fuse = O2Transform(64, activation='relu', name='o2t_mainst_2')(fuse)

        block1 = SecondaryStatistic(name='cov_block1',
                                    cov_mode='pmean',
                                    robust=False,
                                    eps=1e-5)(block1)
        block1 = O2Transform(64, activation='relu', name='o2t_block1')(block1)

        # fuse = merge([fuse, block1], mode='sum')

        x = MatrixConcat([x, block1, block2],
                         name='Matrix_diag_concat_all')([x, block1, block2])
        x = WeightedVectorization(128, activation='relu', name='wv_fuse')(x)

        # Merge the last matrix for matrix concat

    if freeze_conv:
        toggle_trainable_layers(base_model, not freeze_conv)

    x = Dense(nb_classes, activation='softmax')(x)

    model = Model(base_model.input, x, name=basename)
    return model