Exemplo n.º 1
0
    def __init__(
        self,
        inputs,
        blocks,
        block,
        include_top=True,
        classes=1000,
        freeze_bn=True,
        numerical_names=None,
        *args,
        **kwargs
    ):
        if tf.keras.backend.image_data_format() == "channels_last":
            axis = 3
        else:
            axis = 1

        if numerical_names is None:
            numerical_names = [True] * len(blocks)

        x = tf.keras.layers.ZeroPadding1D(padding=3, name="padding_conv1")(inputs)
        x = tf.keras.layers.Conv1D(64, (7, 7), strides=(2, 2), use_bias=False, name="conv1")(x)
        x = layers.BatchNormalization(axis=axis, epsilon=1e-5, freeze=freeze_bn, name="bn_conv1")(x)
        x = tf.keras.layers.Activation("relu", name="conv1_relu")(x)
        x = tf.keras.layers.MaxPooling1D((3, 3), strides=(2, 2), padding="same", name="pool1")(x)

        features = 64

        outputs = []

        for stage_id, iterations in enumerate(blocks):
            for block_id in range(iterations):
                x = block(
                    features,
                    stage_id,
                    block_id,
                    numerical_name=(block_id > 0 and numerical_names[stage_id]),
                    freeze_bn=freeze_bn
                )(x)

            features *= 2

            outputs.append(x)

        if include_top:
            assert classes > 0

            x = tf.keras.layers.GlobalAveragePooling1D(name="pool5")(x)
            x = tf.keras.layers.Dense(classes, activation="softmax", name="fc1000")(x)

            super(ResNet1D, self).__init__(inputs=inputs, outputs=x, *args, **kwargs)
        else:
            # Else output each stages features
            super(ResNet1D, self).__init__(inputs=inputs, outputs=outputs, *args, **kwargs)
Exemplo n.º 2
0
    def f(x):
        y = tf.keras.layers.TimeDistributed(
            tf.keras.layers.ZeroPadding2D(padding=1),
            name="padding{}{}_branch2a".format(stage_char, block_char))(x)

        y = tf.keras.layers.TimeDistributed(
            tf.keras.layers.Conv2D(filters,
                                   kernel_size,
                                   strides=stride,
                                   use_bias=False,
                                   **parameters),
            name="res{}{}_branch2a".format(stage_char, block_char))(y)

        y = tf.keras.layers.TimeDistributed(
            layers.BatchNormalization(axis=axis,
                                      epsilon=1e-5,
                                      freeze=freeze_bn),
            name="bn{}{}_branch2a".format(stage_char, block_char))(y)

        y = tf.keras.layers.TimeDistributed(
            tf.keras.layers.Activation("relu"),
            name="res{}{}_branch2a_relu".format(stage_char, block_char))(y)

        y = tf.keras.layers.TimeDistributed(
            tf.keras.layers.ZeroPadding2D(padding=1),
            name="padding{}{}_branch2b".format(stage_char, block_char))(y)

        y = tf.keras.layers.TimeDistributed(
            tf.keras.layers.Conv2D(filters,
                                   kernel_size,
                                   use_bias=False,
                                   **parameters),
            name="res{}{}_branch2b".format(stage_char, block_char))(y)

        y = tf.keras.layers.TimeDistributed(
            layers.BatchNormalization(axis=axis,
                                      epsilon=1e-5,
                                      freeze=freeze_bn),
            name="bn{}{}_branch2b".format(stage_char, block_char))(y)

        if block == 0:
            shortcut = tf.keras.layers.TimeDistributed(
                tf.keras.layers.Conv2D(filters, (1, 1),
                                       strides=stride,
                                       use_bias=False,
                                       **parameters),
                name="res{}{}_branch1".format(stage_char, block_char))(x)

            shortcut = tf.keras.layers.TimeDistributed(
                layers.BatchNormalization(axis=axis,
                                          epsilon=1e-5,
                                          freeze=freeze_bn),
                name="bn{}{}_branch1".format(stage_char, block_char))(shortcut)
        else:
            shortcut = x

        y = tf.keras.layers.Add(name="res{}{}".format(stage_char, block_char))(
            [y, shortcut])

        y = tf.keras.layers.TimeDistributed(tf.keras.layers.Activation("relu"),
                                            name="res{}{}_relu".format(
                                                stage_char, block_char))(y)

        return y
Exemplo n.º 3
0
    def __init__(self,
                 inputs,
                 blocks,
                 block,
                 freeze_bn=True,
                 numerical_names=None,
                 *args,
                 **kwargs):
        if tf.keras.backend.image_data_format() == "channels_last":
            axis = 3
        else:
            axis = 1

        if numerical_names is None:
            numerical_names = [True] * len(blocks)

        x = tf.keras.layers.Conv2D(64, (7, 7),
                                   strides=(2, 2),
                                   use_bias=False,
                                   name="conv1",
                                   padding="same")(inputs)
        x = layers.BatchNormalization(axis=axis,
                                      epsilon=1e-5,
                                      freeze=freeze_bn,
                                      name="bn_conv1")(x)
        x = tf.keras.layers.Activation("relu", name="conv1_relu")(x)
        x = tf.keras.layers.MaxPooling2D((3, 3),
                                         strides=(2, 2),
                                         padding="same",
                                         name="pool1")(x)

        features = 64

        outputs = []

        for stage_id, iterations in enumerate(blocks):
            for block_id in range(iterations):
                x = block(features,
                          stage_id,
                          block_id,
                          numerical_name=(block_id > 0
                                          and numerical_names[stage_id]),
                          freeze_bn=freeze_bn)(x)

            features *= 2

            outputs.append(x)

        c2, c3, c4, c5 = outputs

        pyramid_5 = tf.keras.layers.Conv2D(filters=256,
                                           kernel_size=1,
                                           strides=1,
                                           padding="same",
                                           name="c5_reduced")(c5)

        upsampled_p5 = tf.keras.layers.UpSampling2D(interpolation="bilinear",
                                                    name="p5_upsampled",
                                                    size=(2, 2))(pyramid_5)

        pyramid_4 = tf.keras.layers.Conv2D(filters=256,
                                           kernel_size=1,
                                           strides=1,
                                           padding="same",
                                           name="c4_reduced")(c4)

        pyramid_4 = tf.keras.layers.Add(name="p4_merged")(
            [upsampled_p5, pyramid_4])

        upsampled_p4 = tf.keras.layers.UpSampling2D(interpolation="bilinear",
                                                    name="p4_upsampled",
                                                    size=(2, 2))(pyramid_4)

        pyramid_4 = tf.keras.layers.Conv2D(filters=256,
                                           kernel_size=3,
                                           strides=1,
                                           padding="same",
                                           name="p4")(pyramid_4)

        pyramid_3 = tf.keras.layers.Conv2D(filters=256,
                                           kernel_size=1,
                                           strides=1,
                                           padding="same",
                                           name="c3_reduced")(c3)

        pyramid_3 = tf.keras.layers.Add(name="p3_merged")(
            [upsampled_p4, pyramid_3])

        upsampled_p3 = tf.keras.layers.UpSampling2D(interpolation="bilinear",
                                                    name="p3_upsampled",
                                                    size=(2, 2))(pyramid_3)

        pyramid_3 = tf.keras.layers.Conv2D(filters=256,
                                           kernel_size=3,
                                           strides=1,
                                           padding="same",
                                           name="p3")(pyramid_3)

        pyramid_2 = tf.keras.layers.Conv2D(filters=256,
                                           kernel_size=1,
                                           strides=1,
                                           padding="same",
                                           name="c2_reduced")(c2)

        pyramid_2 = tf.keras.layers.Add(name="p2_merged")(
            [upsampled_p3, pyramid_2])

        pyramid_2 = tf.keras.layers.Conv2D(filters=256,
                                           kernel_size=3,
                                           strides=1,
                                           padding="same",
                                           name="p2")(pyramid_2)

        pyramid_6 = tf.keras.layers.MaxPooling2D(strides=2,
                                                 name="p6")(pyramid_5)

        outputs = [pyramid_2, pyramid_3, pyramid_4, pyramid_5, pyramid_6]

        super(FPN2D, self).__init__(inputs=inputs,
                                    outputs=outputs,
                                    *args,
                                    **kwargs)
Exemplo n.º 4
0
def TimeDistributedResNet(inputs,
                          blocks,
                          block,
                          include_top=True,
                          classes=1000,
                          freeze_bn=True,
                          *args,
                          **kwargs):
    """
    Constructs a time distributed `keras.models.Model` object using the given block count.

    :param inputs: input tensor (e.g. an instance of `keras.layers.Input`)

    :param blocks: the network’s residual architecture

    :param block: a time distributed residual block (e.g. an instance of `keras_resnet.blocks.time_distributed_basic_2d`)

    :param include_top: if true, includes classification layers

    :param classes: number of classes to classify (include_top must be true)

    :param freeze_bn: if true, freezes BatchNormalization layers (ie. no updates are done in these layers)

    :return model: Time distributed ResNet model with encoding output (if `include_top=False`) or classification output (if `include_top=True`)

    Usage:

        >>> import keras_resnet.blocks
        >>> import keras_resnet.models

        >>> shape, classes = (224, 224, 3), 1000

        >>> x = keras.layers.Input(shape)

        >>> blocks = [2, 2, 2, 2]

        >>> blocks = keras_resnet.blocks.time_distributed_basic_2d

        >>> y = keras_resnet.models.TimeDistributedResNet(x, classes, blocks, blocks)

        >>> y = keras.layers.TimeDistributed(keras.layers.Flatten())(y.output)

        >>> y = keras.layers.TimeDistributed(keras.layers.Dense(classes, activation="softmax"))(y)

        >>> model = keras.models.Model(x, y)

        >>> model.compile("adam", "categorical_crossentropy", ["accuracy"])
    """
    if tf.keras.backend.image_data_format() == "channels_last":
        axis = 3
    else:
        axis = 1

    x = tf.keras.layers.TimeDistributed(
        tf.keras.layers.ZeroPadding2D(padding=3), name="padding_conv1")(inputs)
    x = tf.keras.layers.TimeDistributed(tf.keras.layers.Conv2D(64, (7, 7),
                                                               strides=(2, 2),
                                                               use_bias=False),
                                        name="conv1")(x)
    x = tf.keras.layers.TimeDistributed(layers.BatchNormalization(
        axis=axis, epsilon=1e-5, freeze=freeze_bn),
                                        name="bn_conv1")(x)
    x = tf.keras.layers.TimeDistributed(tf.keras.layers.Activation("relu"),
                                        name="conv1_relu")(x)
    x = tf.keras.layers.TimeDistributed(tf.keras.layers.MaxPooling2D(
        (3, 3), strides=(2, 2), padding="same"),
                                        name="pool1")(x)

    features = 64

    outputs = []

    for stage_id, iterations in enumerate(blocks):
        for block_id in range(iterations):
            x = block(features,
                      stage_id,
                      block_id,
                      numerical_name=(blocks[stage_id] > 6),
                      freeze_bn=freeze_bn)(x)

        features *= 2
        outputs.append(x)

    if include_top:
        assert classes > 0

        x = tf.keras.layers.TimeDistributed(
            tf.keras.layers.GlobalAveragePooling2D(), name="pool5")(x)
        x = tf.keras.layers.TimeDistributed(tf.keras.layers.Dense(
            classes, activation="softmax"),
                                            name="fc1000")(x)

        return tf.keras.models.Model(inputs=inputs, outputs=x, *args, **kwargs)
    else:
        # Else output each stages features
        return tf.keras.models.Model(inputs=inputs,
                                     outputs=outputs,
                                     *args,
                                     **kwargs)