Пример #1
0
def resample_feature_map(x,
                         level,
                         target_level,
                         convolution="conv2d",
                         target_level_filters=256,
                         normalization=dict(normalization="batch_norm",
                                            momentum=0.9,
                                            epsilon=1e-3,
                                            axis=-1,
                                            trainable=True),
                         kernel_regularizer=None,
                         name="resample"):
    input_filters = tf.keras.backend.int_shape(x)[-1]
    if input_filters != target_level_filters:
        x = conv_block(convolution,
                       filters=target_level_filters,
                       kernel_size=(1, 1),
                       strides=(1, 1),
                       kernel_regularizer=kernel_regularizer,
                       normalization=normalization,
                       activation=None,
                       name=name + "/" + convolution)(x)
    if level < target_level:
        strides = int(2**(target_level - level))
        x = tf.keras.layers.MaxPool2D(pool_size=strides,
                                      strides=strides,
                                      padding="same",
                                      name=name + "/pool")(x)
    elif target_level > level:
        size = int(2**(level - target_level_filters))
        x = tf.keras.layers.UpSampling2D(size=size, name=name + "/upsample")(x)

    return x
Пример #2
0
def relu_conv2d_bn(x,
                   convolution,
                   filters=256,
                   kernel_size=(3, 3),
                   normalization=dict(normalization="batch_norm",
                                      momentum=0.9,
                                      epsilon=1e-3,
                                      axis=-1,
                                      trainable=True),
                   kernel_regularizer=None,
                   activation="relu",
                   name="relu_conv2d_bn"):
    x = tf.keras.layers.Activation(activation, name=name + "/" + activation)(x)
    x = conv_block(convolution=convolution,
                   filters=filters,
                   kernel_size=kernel_size,
                   strides=(1, 1),
                   kernel_regularizer=kernel_regularizer,
                   normalization=normalization,
                   activation=None,
                   name=name)(x)
    return x
Пример #3
0
def path_aggregation_neck(inputs,
                          convolution="conv2d",
                          normalization=dict(normalization="batch_norm",
                                             momentum=0.9,
                                             epsilon=1e-3,
                                             axis=-1,
                                             trainable=True),
                          activation=dict(activation="relu"),
                          feat_dims=64,
                          min_level=3,
                          max_level=7,
                          add_extra_conv=False,
                          dropblock=None,
                          weight_decay=0.,
                          use_multiplication=False,
                          name="path_aggregation_neck"):
    kernel_regularizer = (tf.keras.regularizers.l2(weight_decay)
                          if weight_decay is not None and weight_decay > 0 else
                          None)
    num_outputs = max_level - min_level + 1
    output_filters = [output_filters] * num_outputs \
        if isinstance(output_filters, int) else output_filters
    features = []
    num_inputs = len(inputs)
    for i, features in enumerate(inputs):
        x = conv_block(convolution="conv2d",
                       filters=feat_dims,
                       kernel_size=(1, 1),
                       strides=(1, 1),
                       kernel_regularizer=kernel_regularizer,
                       normalization=normalization,
                       activation=activation,
                       dropblock=dropblock,
                       name="top_down_conv2d_%d" % (i + 1))(features)
        features.append(x)

    for i in range(num_inputs - 1, 0, -1):
        top = tf.keras.layers.UpSampling2D(
            (2, 2), interpolation="nearest")(features[i + 1])
        if use_multiplication:
            features[i] = tf.keras.layers.Multiply()([features[i], top])
        else:
            features[i] = tf.keras.layers.Add()([features[i], top])

    for i in range(1, num_inputs):
        x = conv_block(convolution="conv2d",
                       filters=feat_dims,
                       kernel_size=(3, 3),
                       strides=(2, 2),
                       kernel_regularizer=kernel_regularizer,
                       normalization=normalization,
                       activation=activation,
                       dropblock=dropblock,
                       name="bottom_up_conv2d_%d" % (i + 1))(features[i - 1])
        if use_multiplication:
            features[i] = tf.keras.layers.Multiply()([x, features[i]])
        else:
            features[i] = tf.keras.layers.Add()([x, features[i]])

    for i in range(num_inputs, num_outputs):
        if add_extra_conv:
            features.append(
                (conv_block(convolution,
                            filters=output_filters[i],
                            kernel_size=(3, 3),
                            strides=(2, 2),
                            kernel_regularizer=kernel_regularizer,
                            normalization=normalization,
                            group=group,
                            activation=activation,
                            name="extra_conv2d_%d" % (i + 1))(features[-1])))
        else:
            features.append(
                tf.keras.layers.MaxPool2D(pool_size=(2, 2),
                                          strides=(2, 2))(features[-1]))

    return features
Пример #4
0
def nas_fpn(inputs,
            convolution="conv2d",
            normalization=dict(normalization="batch_norm",
                               momentum=0.9,
                               epsilon=1e-3,
                               axis=-1,
                               trainable=True),
            activation=dict(activation="relu"),
            feat_dims=256,
            min_level=3,
            max_level=7,
            weight_decay=0.,
            dropblock=None,
            name="nas_fpn_neck"):
    kernel_regularizer = (tf.keras.regularizers.l2(weight_decay)
                          if weight_decay is not None and weight_decay > 0 else
                          None)
    num_outputs = max_level - min_level + 1
    assert num_outputs == 5, "Only support 5 stage, i.e. P3, P4, P5, P6, P7."

    if min_level == 3 and max_level == 7:
        model_config = [
            3,
            1,
            1,
            3,
            3,
            0,
            1,
            5,
            4,
            0,
            0,
            6,  # Output to level 3.
            3,
            0,
            6,
            7,  # Output to level 4.
            2,
            1,
            7,
            8,  # Output to level 5.
            0,
            1,
            6,
            9,  # Output to level 7.
            1,
            1,
            9,
            10
        ]  # Output to level 6.
    else:
        raise ValueError("The NAS-FPN with min level {} and max level {} "
                         "is not supported.".format(min_level, max_level))
    config = Config(model_config, min_level, max_level)

    num_inputs = len(inputs)
    if num_inputs < num_outputs:
        features = []
        for i in range(num_inputs):
            feat = resample_feature_map(x=features[i],
                                        level=i,
                                        target_level=i,
                                        convolution=convolution,
                                        target_level_filters=feat_dims,
                                        kernel_regularizer=kernel_regularizer,
                                        name=name + "/resample_" + str(i))
            features.append(feat)
        for i in range(num_inputs, num_outputs):
            feat = resample_feature_map(x=features[-1],
                                        level=i,
                                        target_level=i,
                                        convolution=convolution,
                                        target_level_filters=feat_dims,
                                        normalization=normalization,
                                        name=name + "/resample_" + str(i))
            features.append(feat)
    else:
        features = inputs

    for i, sub_policy in enumerate(config.nodes):
        num_output_connections = [0] * len(features)
        new_level = sub_policy["level"]
        feature_levels = list(range(min_level, max_level + 1))

        # Checks the range of input_offsets
        for input_offset in sub_policy["input_offsets"]:
            if input_offset >= len(features):
                raise ValueError("input_offset ({}) is larger than number of "
                                 "features({})".format(input_offset,
                                                       len(features)))
            input0 = sub_policy["input_offsets"][0]
            input1 = sub_policy["input_offsets"][1]

            # Update graph with inputs.
            node0 = features[input0]
            node0_level = feature_levels[input0]
            num_output_connections[input0] += 1
            node0 = resample_feature_map(x=node0,
                                         level=node0_level,
                                         target_level=new_level,
                                         convolution=convolution,
                                         target_level_filters=feat_dims,
                                         normalization=normalization,
                                         kernel_regularizer=kernel_regularizer,
                                         name=name + "/resample_node" +
                                         str(input0))

            node1 = features[input1]
            node1_level = feature_levels[input1]
            num_output_connections[input1] += 1
            node1 = resample_feature_map(x=node1,
                                         level=node1_level,
                                         target_level=new_level,
                                         convolution=convolution,
                                         target_level_filters=feat_dims,
                                         normalization=normalization,
                                         kernel_regularizer=kernel_regularizer,
                                         name=name + "resample_node" +
                                         str(input1))

            # Combine node0 and node1 to create new feature.
            if sub_policy["combine_method"] == COMBINATION_OPS.SUM:
                new_node = tf.keras.layers.Add(
                    name=name + "/sum_node%d_node%d" % (input0, input1))(
                        [node0, node1])
            elif sub_policy[
                    "combine_method"] == COMBINATION_OPS.GLOBAL_ATTENTION:
                if node0_level >= node1_level:
                    new_node = global_attention(
                        node0,
                        node1,
                        name=name + "/global_attention_node%d_node%d" %
                        (node0_level, node1_level))
                else:
                    new_node = global_attention(
                        node1,
                        node0,
                        name=name + "/global_attention_node%d_node%d" %
                        (node1_level, node0_level))
            else:
                raise ValueError("Unknown combine_method {}.".format(
                    sub_policy["combine_method"]))

            # Add intermediate nodes that do not have any connections to output
            if sub_policy["node_type"] == NODE_TYPES.OUTPUT:
                for j, (feat, feat_level, num_output) in enumerate(
                        zip(features, feature_levels, num_output_connections)):
                    if num_output == 0 and feat_level == new_level:
                        num_output_connections[j] += 1
                        new_node = tf.keras.layers.Add(name=name +
                                                       "/sum_%d_%d" % (i, j))(
                                                           [new_node, feat])

            new_node = tf.keras.layers.Activation(
                activation,
                name=name + "/" + activation + "_" + str(i))(new_node)
            new_node = conv_block(convolution,
                                  filters=feat_dims,
                                  kernel_size=(3, 3),
                                  strides=(1, 1),
                                  kernel_regularizer=kernel_regularizer,
                                  normalization=normalization,
                                  activation=None,
                                  dropblock=dropblock,
                                  name=name + "/conv_" + str(i))(new_node)

            features.append(new_node)
            feature_levels.append(new_level)
            num_output_connections.append(0)

    outputs = []
    for i in range(len(features) - num_outputs, len(features)):
        # level = feature_levels[i]
        outputs.append(features[i])

    return outputs
Пример #5
0
def fpn(inputs,
        convolution="separable_conv2d",
        normalization=dict(normalization="batch_norm",
                           momentum=0.9,
                           epsilon=1e-3,
                           axis=-1,
                           trainable=True),
        activation=dict(activation="relu"),
        feat_dims=64,
        min_level=3,
        max_level=7,
        use_multiplication=False,
        dropblock=None,
        weight_decay=0.,
        add_extra_conv=False,
        name="fpn_neck",
        **Kwargs):
    laterals = []
    num_outputs = max_level - min_level + 1
    num_inputs = len(inputs)
    kernel_regularizer = (tf.keras.regularizers.l2(weight_decay)
                          if weight_decay is not None and weight_decay > 0 else
                          None)

    for i, feat in enumerate(inputs):
        laterals.append(
            conv_block(convolution="conv2d",
                       filters=feat_dims,
                       kernel_size=(1, 1),
                       strides=(1, 1),
                       kernel_regularizer=kernel_regularizer,
                       normalization=normalization,
                       activation=activation,
                       dropblock=dropblock,
                       name=name + "/lateral_%s_%d" % (convolution, i))(feat))

    for i in range(num_inputs - 1, 0, -1):
        top = tf.keras.layers.UpSampling2D(size=(2, 2),
                                           name=name + "/upsample_%d" % i)(
                                               laterals[i])
        if use_multiplication:
            laterals[i - 1] = tf.keras.layers.Multiply(
                name=name + "/multiply_" + str(i))([laterals[i - 1], top])
        else:
            laterals[i - 1] = tf.keras.layers.Add(
                name=name + "/sum_" + str(i))([laterals[i - 1], top])

    # Adds post-hoc 3x3 convolution kernel.
    for i in range(num_inputs):
        laterals[i] = conv_block(convolution=convolution,
                                 filters=feat_dims,
                                 kernel_size=(3, 3),
                                 strides=(1, 1),
                                 kernel_regularizer=kernel_regularizer,
                                 normalization=normalization,
                                 activation=activation,
                                 dropblock=dropblock,
                                 name=name + "/post_hoc_%s_%d" %
                                 (convolution, i))(laterals[i])

    for i in range(num_inputs, num_outputs):
        if add_extra_conv:
            laterals.append(
                conv_block(convolution=convolution,
                           filters=feat_dims,
                           kernel_size=(3, 3),
                           strides=(2, 2),
                           kernel_regularizer=kernel_regularizer,
                           normalization=normalization,
                           activation=activation,
                           dropblock=dropblock,
                           name=name + "/post_hoc_conv2d_" + str(i))(
                               laterals[-1]))
        else:
            laterals.append(
                tf.keras.layers.MaxPool2D(
                    (2, 2), (2, 2),
                    "same",
                    name=name + "/max_pool2d_" + str(i))(laterals[-1]))

    return laterals