Пример #1
0
def deep_cnn_v1(x,
                training,
                nbof_labels,
                regularizer_rate=0,
                fmaps=[32, 64, 128]):
    def conv_layer(name, x, fmaps):
        with tf.compat.v1.variable_scope('Conv_{}'.format(name)):
            x = tf.compat.v1.layers.batch_normalization(
                tf.nn.elu(
                    layers.conv2d(x,
                                  fmaps=fmaps,
                                  kernel=3,
                                  strides=1,
                                  regularizer_rate=regularizer_rate)),
                training=training,
                momentum=0.99,
                gamma_regularizer=tf.keras.regularizers.l2(regularizer_rate),
                beta_regularizer=tf.keras.regularizers.l2(regularizer_rate))
            return x

    x = conv_layer('Input', x, fmaps[0])
    x = tf.nn.max_pool2d(x, 2, 2, 'SAME')
    for i in range(len(fmaps)):
        x = conv_layer('{}_{}'.format(i, 0), x, fmaps[i])
        x = conv_layer('{}_{}'.format(i, 1), x, fmaps[i])
        x = tf.nn.max_pool2d(x, 2, 2, 'SAME')
        x = tf.cond(training, lambda: tf.nn.dropout(x, rate=0.2 + i * 0.1),
                    lambda: x)
    x = layers.global_avg_pool(x)
    with tf.compat.v1.variable_scope('Output'):
        logit = layers.dense(x, fmaps=nbof_labels)
    return logit
 def dense_layer(x, fmaps, number):
     with tf.compat.v1.variable_scope('Dense_{}'.format(number)):
         x = layers.dense(x, fmaps=fmaps)
         x = layers.bias(x)
         if fmaps > 1:
             x = layers.leaky_relu(x)
     return x
 def dense_layer(name, x, fmaps):
     with tf.compat.v1.variable_scope('Dense_{}'.format(name)):
         x = layers.dense(x, fmaps=fmaps, regularizer_rate=regularizer_rate)
         x = BAN(x, use_bias=False, use_act=False, use_norm=False)
     return x
 def dense_layer(x, number):
     with tf.compat.v1.variable_scope('Dense_0'):
         x = layers.dense(x, fmaps=latent_size*16)
         x = tf.compat.v1.reshape(x, [-1, 4, 4, latent_size])
         x = BAN(x)
     return x
Пример #5
0
def wideresnet_se(inputs, training, nbof_labels, regularizer_rate=0):
    def batch_norm(x):
        with tf.compat.v1.variable_scope('BN'):
            # x = layers.batch_norm(x, training=training, regularizer_rate=regularizer_rate)
            x = tf.compat.v1.layers.batch_normalization(
                x,
                training=training,
                momentum=0.99,
                gamma_regularizer=tf.keras.regularizers.l2(regularizer_rate),
                beta_regularizer=tf.keras.regularizers.l2(regularizer_rate))
            # if len(x.shape)>2: x = layers.pixel_norm(x)
            return x

    def block_se(name, x, fmaps, se_ratio):
        squeezed_fmaps = max(1, int(se_ratio * fmaps))
        with tf.compat.v1.variable_scope('SEBlock_{}'.format(name)):
            with tf.compat.v1.variable_scope('Squeeze'):
                s = tf.compat.v1.reduce_mean(x, axis=[1, 2], keepdims=True)
                s = tf.nn.swish(layers.conv2d(s, squeezed_fmaps, kernel=1))
            with tf.compat.v1.variable_scope('Excite'):
                s = tf.nn.sigmoid(layers.conv2d(s, fmaps, kernel=1))
            return s * x

    act = layers.leaky_relu

    # act = tf.nn.selu
    def block_basic(name, x, fmaps, strides, dropout_rate=0.0):
        with tf.compat.v1.variable_scope('Block_{}'.format(name)):
            if x.shape[-1] == fmaps:
                r = act(batch_norm(x))
                s = x if strides == 1 else tf.nn.max_pool2d(
                    x, ksize=1, strides=2, padding='SAME')
            else:
                x = act(batch_norm(x))
                with tf.compat.v1.variable_scope('Shortcut'):
                    s = layers.conv2d(x, fmaps, kernel=1, strides=strides)
            # r = block_se(name, r, fmaps, 0.25)
            with tf.compat.v1.variable_scope('Conv2D_0'):
                r = act(
                    batch_norm(
                        layers.conv2d(r if x.shape[-1] == fmaps else x,
                                      fmaps=fmaps,
                                      kernel=3,
                                      strides=strides)))
            if dropout_rate > 0:
                r = tf.cond(training,
                            lambda: tf.nn.dropout(r, rate=dropout_rate),
                            lambda: r,
                            name='use_dropout')
            with tf.compat.v1.variable_scope('Conv2D_1'):
                r = layers.conv2d(r, fmaps=fmaps, kernel=3)
            r = block_se(name, r, fmaps, 0.25)
            return r + s

    # Inputs
    with tf.compat.v1.variable_scope('Conv2D_1'):
        x = layers.conv2d(inputs, fmaps=16, kernel=3)
    # Middle layers
    fmaps = [160, 320, 640]
    nbof_unit = [4, 4, 4]
    # nbof_unit = [1,1,1]
    strides = [1, 2, 2]
    dropouts = [0., 0., 0.]
    # dropouts  = [0.025,0.05,0.1]
    # dropouts  = [0.1,0.2,0.3]
    # strides   = [2,2,2]
    # nbof_unit = [2,3,4,6,3]
    for i in range(len(fmaps)):
        x = block_basic('{}_{}'.format(i, 0),
                        x,
                        fmaps[i],
                        strides=strides[i],
                        dropout_rate=dropouts[i])
        for j in range(nbof_unit[i] - 1):
            x = block_basic('{}_{}'.format(i, j + 1),
                            x,
                            fmaps[i],
                            strides=1,
                            dropout_rate=dropouts[i])
    # Output
    with tf.compat.v1.variable_scope('Output'):
        x = act(batch_norm(x))
        x = layers.global_avg_pool(x)
        logit = layers.dense(x, fmaps=nbof_labels)
    return logit
Пример #6
0
def wideresnet(inputs,
               training,
               nbof_labels,
               regularizer_rate=0,
               fmaps=[160, 320, 640],
               nbof_unit=[4, 4, 4],
               strides=[1, 2, 2],
               dropouts=[0., 0., 0.]):
    def batch_norm(x):
        with tf.compat.v1.variable_scope('BN'):
            # x = layers.batch_norm(x, training=training, regularizer_rate=regularizer_rate)
            x = tf.compat.v1.layers.batch_normalization(
                x,
                training=training,
                momentum=0.99,
                gamma_regularizer=tf.keras.regularizers.l2(regularizer_rate),
                beta_regularizer=tf.keras.regularizers.l2(regularizer_rate))
            # x = tf.nn.l2_normalize(x, axis=-1)
            # if len(x.shape)>2: x = layers.pixel_norm(x)
            return x

    def block_basic(name, x, fmaps, strides, dropout_rate=0.0):
        with tf.compat.v1.variable_scope('Block_{}'.format(name)):
            if x.shape[-1] == fmaps:
                r = layers.leaky_relu(batch_norm(x))
                s = x if strides == 1 else tf.nn.max_pool2d(
                    x, ksize=1, strides=2, padding='SAME')
            else:
                x = layers.leaky_relu(batch_norm(x))
                with tf.compat.v1.variable_scope('Shortcut'):
                    s = layers.conv2d(x,
                                      fmaps,
                                      kernel=1,
                                      strides=strides,
                                      regularizer_rate=regularizer_rate)
            with tf.compat.v1.variable_scope('Conv2D_0'):
                r = layers.leaky_relu(
                    batch_norm(
                        layers.conv2d(r if x.shape[-1] == fmaps else x,
                                      fmaps=fmaps,
                                      kernel=3,
                                      strides=strides,
                                      regularizer_rate=regularizer_rate)))
            if dropout_rate > 0:
                r = tf.cond(training,
                            lambda: tf.nn.dropout(r, rate=dropout_rate),
                            lambda: r,
                            name='use_dropout')
            with tf.compat.v1.variable_scope('Conv2D_1'):
                r = layers.conv2d(r,
                                  fmaps=fmaps,
                                  kernel=3,
                                  regularizer_rate=regularizer_rate)
            return r + s

    # Inputs
    with tf.compat.v1.variable_scope('Conv2D_1'):
        x = layers.conv2d(inputs,
                          fmaps=fmaps[0],
                          kernel=3,
                          regularizer_rate=regularizer_rate)
    # Middle layers
    for i in range(len(fmaps)):
        x = block_basic('{}_{}'.format(i, 0),
                        x,
                        fmaps[i],
                        strides=strides[i],
                        dropout_rate=dropouts[i])
        for j in range(nbof_unit[i] - 1):
            x = block_basic('{}_{}'.format(i, j + 1),
                            x,
                            fmaps[i],
                            strides=1,
                            dropout_rate=dropouts[i])
    # Output
    with tf.compat.v1.variable_scope('Output'):
        x = layers.leaky_relu(batch_norm(x))
        x = layers.global_avg_pool(x)
        logit = layers.dense(x,
                             fmaps=nbof_labels,
                             regularizer_rate=regularizer_rate)
    return logit
def EfficientNet(
        inputs,
        training,
        nbof_labels,
        width_coefficient=1.0,
        depth_coefficient=1.0,
        #dropout_rate=0.,
        depth_divisor=8,
        min_depth=None):

    block_args_list = get_default_block_list()

    with tf.variable_scope('Input'):
        x = layers.conv2d(inputs, 32, kernel=1, strides=1)
        x = layers.batch_norm(x, training)
        x = tf.nn.swish(x)

    # Blocks part
    for i, block_args in enumerate(block_args_list):
        assert block_args.num_repeat > 0

        # Update block input and output filters based on depth multiplier.
        block_args.input_filters = round_filters(block_args.input_filters,
                                                 width_coefficient,
                                                 depth_divisor, min_depth)
        block_args.output_filters = round_filters(block_args.output_filters,
                                                  width_coefficient,
                                                  depth_divisor, min_depth)
        block_args.num_repeat = round_repeats(block_args.num_repeat,
                                              depth_coefficient)

        # The first block needs to take care of stride and filter size increase.
        x = MBConvBlock('{}_0'.format(i), block_args.input_filters,
                        block_args.output_filters, block_args.kernel_size,
                        block_args.strides, block_args.expand_ratio,
                        block_args.se_ratio, block_args.identity_skip,
                        training)(x)

        if block_args.num_repeat > 1:
            block_args.input_filters = block_args.output_filters
            block_args.strides = 1

        for j in range(block_args.num_repeat - 1):
            x = MBConvBlock('{}_{}'.format(i, j + 1), block_args.input_filters,
                            block_args.output_filters, block_args.kernel_size,
                            block_args.strides, block_args.expand_ratio,
                            block_args.se_ratio, block_args.identity_skip,
                            training)(x)

    # Head part
    with tf.variable_scope('HeadConv'):
        x = layers.conv2d(x,
                          fmaps=round_filters(1280, width_coefficient,
                                              depth_coefficient, min_depth),
                          kernel=1)
        x = layers.batch_norm(x, training)
        x = tf.nn.swish(x)
    with tf.variable_scope('HeadDense'):
        x = layers.global_avg_pool(x)
        logit = layers.dense(x=x, fmaps=nbof_labels)
    return logit
Пример #8
0
 def dense_layer(x, fmaps, name=0, use_bias=True):
     with tf.compat.v1.variable_scope('Dense_{}'.format(name)):
         x = layers.dense(x, fmaps=fmaps, regularizer_rate=regularizer_rate)
         if use_bias: x = layers.bias(x, regularizer_rate=regularizer_rate)
     return x