Esempio n. 1
0
def exit_module(inputs,
                size=3,
                n_filters_1=[728, 1024],
                n_filters_2=[1536, 2048],
                pool_size=3,
                is_training=False,
                regularizer=None,
                kernel_init=He_normal(seed=42),
                bias_init=tf.zeros_initializer(),
                name="xception_exit_module"):
    with tf.variable_scope(name):
        shortcut = conv2d_bn(inputs,
                             size=1,
                             n_filters=n_filters_1[-1],
                             stride=2,
                             is_training=is_training,
                             regularizer=regularizer,
                             kernel_init=kernel_init,
                             bias_init=bias_init,
                             name="shortcut")

        x = inputs
        for r in range(len(n_filters_1)):
            x = tf.nn.relu(x, name="relu_1_" + str(r))
            x = separable_conv2d(x,
                                 size=size,
                                 n_filters=n_filters_1[r],
                                 stride=1,
                                 depth_multiplier=1,
                                 regularizer=regularizer,
                                 depth_init=kernel_init,
                                 pointwise_init=kernel_init,
                                 bias_init=bias_init,
                                 name="separable_conv_1_" + str(r))
            x = tf.layers.batch_normalization(x,
                                              training=is_training,
                                              name="bn_1_" + str(r))
        x = max_pool2d(x, size=pool_size, stride=2, name="max_pool")
        x = tf.add(x, shortcut, name="add_1")

        for r in range(len(n_filters_2)):
            x = separable_conv2d(x,
                                 size=size,
                                 n_filters=n_filters_2[r],
                                 stride=1,
                                 depth_multiplier=1,
                                 regularizer=regularizer,
                                 depth_init=kernel_init,
                                 pointwise_init=kernel_init,
                                 bias_init=bias_init,
                                 name="separable_conv_2_" + str(r))
            x = tf.layers.batch_normalization(x,
                                              training=is_training,
                                              name="bn_2_" + str(r))
            x = tf.nn.relu(x, name="relu_2_" + str(r))
    return x
Esempio n. 2
0
def middle_module(inputs,
                  size=3,
                  n_filters=728,
                  n_repeat=8,
                  block_size=3,
                  is_training=False,
                  regularizer=None,
                  kernel_init=He_normal(seed=42),
                  bias_init=tf.zeros_initializer(),
                  name="xception_middle_module"):
    x = inputs
    with tf.variable_scope(name):
        for r in range(n_repeat):
            shortcut = tf.identity(x, name="shortcut_" + str(r))
            for s in range(block_size):
                x = tf.nn.relu(x, name="relu_" + str(r) + "_" + str(s))
                x = separable_conv2d(x,
                                     size=size,
                                     n_filters=n_filters,
                                     stride=1,
                                     depth_multiplier=1,
                                     regularizer=regularizer,
                                     depth_init=kernel_init,
                                     pointwise_init=kernel_init,
                                     bias_init=bias_init,
                                     name="separable_conv_" + str(r) + "_" +
                                     str(s))
                x = tf.layers.batch_normalization(x,
                                                  training=is_training,
                                                  name="bn_" + str(r) + "_" +
                                                  str(s))
            x = tf.add(x, shortcut, name="add_" + str(r))
    return x
Esempio n. 3
0
def entry_block(inputs,
                n_filters,
                n_repeat=2,
                conv_size=3,
                pool_size=3,
                init_activation=tf.nn.relu,
                regularizer=None,
                kernel_init=He_normal(seed=42),
                bias_init=tf.zeros_initializer(),
                is_training=False,
                name="xception_entry_block"):

    with tf.variable_scope(name):
        shortcut = conv2d_bn(inputs,
                             size=1,
                             n_filters=n_filters,
                             stride=2,
                             is_training=is_training,
                             regularizer=regularizer,
                             kernel_init=kernel_init,
                             bias_init=bias_init,
                             name="shortcut")
        x = inputs
        for r in range(n_repeat):
            if r == 0:
                activation = init_activation
            else:
                activation = tf.nn.relu
            if activation is not None:
                x = activation(x)
            x = separable_conv2d(x,
                                 size=conv_size,
                                 n_filters=n_filters,
                                 stride=1,
                                 depth_multiplier=1,
                                 regularizer=regularizer,
                                 depth_init=kernel_init,
                                 pointwise_init=kernel_init,
                                 bias_init=bias_init,
                                 name="separable_conv_" + str(r))
            x = tf.layers.batch_normalization(x,
                                              training=is_training,
                                              name="bn_" + str(r))
        x = max_pool2d(x, size=pool_size, stride=2, name="max_pool")
        outputs = tf.add(x, shortcut, name="add")
    return outputs
Esempio n. 4
0
def mobilenet_block(inputs,
                    n_filters,
                    stride=1,
                    conv_size=3,
                    alpha=1.0,
                    activation=tf.nn.relu,
                    regularizer=None,
                    kernel_init=He_normal(seed=42),
                    bias_init=tf.zeros_initializer(),
                    is_training=False,
                    name="mobilenet_block"):

    in_filt = inputs.shape[3].value
    n_filters_sep = int(in_filt * alpha)
    n_filters_conv = int(n_filters * alpha)
    with tf.variable_scope(name):
        x = separable_conv2d(inputs,
                             size=conv_size,
                             n_filters=n_filters_sep,
                             stride=stride,
                             regularizer=regularizer,
                             depth_init=kernel_init,
                             pointwise_init=kernel_init,
                             bias_init=bias_init,
                             name="separable_conv")
        x = tf.layers.batch_normalization(x,
                                          training=is_training,
                                          name="batch_norm_1")
        if activation is not None:
            x = activation(x, name="activation_1")
        x = conv2d(x,
                   size=1,
                   n_filters=n_filters_conv,
                   stride=1,
                   regularizer=regularizer,
                   kernel_init=kernel_init,
                   bias_init=bias_init,
                   name="conv")
        x = tf.layers.batch_normalization(x,
                                          training=is_training,
                                          name="batch_norm_2")
        if activation is not None:
            x = activation(x, name="activation_2")
    return x
Esempio n. 5
0
def shuffle_unit(inputs,
                 n_filters,
                 size=3,
                 stride=1,
                 activation=tf.nn.relu,
                 reduction_ratio=0.25,
                 n_groups=8,
                 is_training=False,
                 regularizer=None,
                 kernel_init=He_normal(seed=42),
                 bias_init=tf.zeros_initializer(),
                 name="shuffle_unit"):
    with tf.variable_scope(name):
        n_filters_reduction = int(n_filters * reduction_ratio)

        if stride == 1:
            shortcut = tf.identity(inputs, name="shortcut")
        else:
            shortcut = avg_pool2d(inputs,
                                  size=3,
                                  stride=stride,
                                  name="shortcut_pool")

        x = group_conv2d(inputs,
                         size=1,
                         cardinality=n_groups,
                         n_filters=n_filters_reduction,
                         stride=1,
                         regularizer=regularizer,
                         kernel_init=kernel_init,
                         bias_init=bias_init,
                         name="group_conv2d_1")
        x = tf.layers.batch_normalization(x,
                                          training=is_training,
                                          name="batch_norm_1")
        x = activation(x, name="activation_1")

        x = shuffle_channels(x, n_groups=n_groups, name="shuffle")

        x = separable_conv2d(x,
                             size=size,
                             n_filters=n_filters,
                             stride=stride,
                             regularizer=regularizer,
                             depth_init=kernel_init,
                             pointwise_init=kernel_init,
                             bias_init=bias_init,
                             name="separable_conv")
        x = tf.layers.batch_normalization(x,
                                          training=is_training,
                                          name="batch_norm_2")

        x = group_conv2d(x,
                         size=1,
                         cardinality=n_groups,
                         n_filters=n_filters if stride == 1 else n_filters -
                         inputs.shape[3].value,
                         stride=1,
                         regularizer=regularizer,
                         kernel_init=kernel_init,
                         bias_init=bias_init,
                         name="group_conv2d_2")
        x = tf.layers.batch_normalization(x,
                                          training=is_training,
                                          name="batch_norm_3")

        if stride == 1:
            x = tf.add(x, shortcut, name="add")
        else:
            x = tf.concat([x, shortcut], axis=3, name="concat")
        x = activation(x, name="activation_2")

    return x
Esempio n. 6
0
def inverted_residual(inputs,
                      n_filters,
                      expand_ratio=1.0,
                      size=3,
                      stride=1,
                      activation=tf.nn.relu6,
                      regularizer=None,
                      kernel_init=He_normal(seed=42),
                      bias_init=tf.zeros_initializer(),
                      is_training=False,
                      name="inverted_residual"):

    n_filters_expand = int(n_filters * expand_ratio)
    with tf.variable_scope(name):
        if stride == 1:
            if inputs.shape[3] != n_filters:
                shortcut = conv2d_bn(inputs,
                                     size=1,
                                     n_filters=n_filters,
                                     stride=1,
                                     is_training=is_training,
                                     regularizer=regularizer,
                                     kernel_init=kernel_init,
                                     bias_init=bias_init,
                                     name="shortcut")
            else:
                shortcut = tf.identity(inputs, name="shortcut")
        else:
            shortcut = None

        # pointwise
        x = conv2d_bn_act(inputs,
                          size=1,
                          n_filters=n_filters_expand,
                          stride=1,
                          activation=activation,
                          is_training=is_training,
                          regularizer=regularizer,
                          kernel_init=kernel_init,
                          bias_init=bias_init,
                          name="conv_1")

        # depthwise
        x = separable_conv2d(x,
                             size=size,
                             n_filters=n_filters_expand,
                             stride=stride,
                             regularizer=regularizer,
                             depth_init=kernel_init,
                             pointwise_init=kernel_init,
                             bias_init=bias_init,
                             name="separable_conv")
        x = tf.layers.batch_normalization(x,
                                          training=is_training,
                                          name="batch_norm_1")
        if activation is not None:
            x = activation(x, name="activation")

        # pointwise
        x = conv2d_bn(inputs,
                      size=1,
                      n_filters=n_filters,
                      stride=1,
                      is_training=is_training,
                      regularizer=regularizer,
                      kernel_init=kernel_init,
                      bias_init=bias_init,
                      name="conv_2")

        if shortcut is not None:
            x = tf.add(x, shortcut, name="add")
    return x
Esempio n. 7
0
def Reduction_A(inputs,
                p,
                n_filters,
                is_training=False,
                regularizer=None,
                activation=tf.nn.relu,
                kernel_init=He_normal(seed=42),
                bias_init=tf.zeros_initializer(),
                name="nasnet_normal_a"):

    with tf.variable_scope(name):
        # adjust to the reduction
        p = adjust(p, ref=inputs, n_filters=n_filters, name="adjust")

        # squeeze inputs to match dimensions
        h = squeeze(inputs,
                    n_filters=n_filters,
                    is_training=is_training,
                    regularizer=regularizer,
                    kernel_init=kernel_init,
                    bias_init=bias_init,
                    name="squeeze")

        with tf.variable_scope("block_1"):
            x1_1 = separable_conv2d(h,
                                    size=5,
                                    n_filters=n_filters,
                                    stride=2,
                                    regularizer=regularizer,
                                    depth_init=kernel_init,
                                    pointwise_init=kernel_init,
                                    bias_init=bias_init,
                                    name="separable_conv_1_5x5")
            x1_2 = separable_conv2d(p,
                                    size=7,
                                    n_filters=n_filters,
                                    stride=2,
                                    regularizer=regularizer,
                                    depth_init=kernel_init,
                                    pointwise_init=kernel_init,
                                    bias_init=bias_init,
                                    name="separable_conv_1_7x7")
            x1 = tf.add(x1_1, x1_2, name="add_1")

        with tf.variable_scope("block_2"):
            x2_1 = max_pool2d(h, size=3, stride=2, name="max_pool_2")
            x2_2 = separable_conv2d(p,
                                    size=7,
                                    n_filters=n_filters,
                                    stride=2,
                                    regularizer=regularizer,
                                    depth_init=kernel_init,
                                    pointwise_init=kernel_init,
                                    bias_init=bias_init,
                                    name="separable_conv_2")
            x2 = tf.add(x2_1, x2_2, name="add_2")

        with tf.variable_scope("block_3"):
            x3_1 = avg_pool2d(h, size=3, stride=2, name="avg_pool_3")
            x3_2 = separable_conv2d(p,
                                    size=5,
                                    n_filters=n_filters,
                                    stride=2,
                                    regularizer=regularizer,
                                    depth_init=kernel_init,
                                    pointwise_init=kernel_init,
                                    bias_init=bias_init,
                                    name="separable_conv_3")
            x3 = tf.add(x3_1, x3_2, name="add_3")

        with tf.variable_scope("block_4"):
            x4_1 = max_pool2d(h, size=3, stride=2, name="max_pool_4")
            x4_2 = separable_conv2d(x1,
                                    size=3,
                                    n_filters=n_filters,
                                    stride=1,
                                    regularizer=regularizer,
                                    depth_init=kernel_init,
                                    pointwise_init=kernel_init,
                                    bias_init=bias_init,
                                    name="separable_conv_4")
            x4 = tf.add(x4_1, x4_2, name="add_4")

        with tf.variable_scope("block_5"):
            x5 = avg_pool2d(x1, size=3, stride=1, name="avg_pool_5")
            x5 = tf.add(x2, x5, name="add_5")

        outputs = tf.concat([x2, x3, x4, x5], axis=3, name="concat")
    return outputs, inputs