Пример #1
0
    def _build_feature_network(self, load_pretrained=True):
        # Build resnet50
        feature_extractor = tf.keras.applications.ResNet50(
            include_top=False,
            weights='imagenet' if load_pretrained else None,
            pooling=None,
            input_shape=(self.data.image_size, self.data.image_size, 3))

        # Collect the layers whose outputs will be used
        emb_set_output_layers = self.config.get_list("emb_set_output_layer")
        output_layers = []
        for layer in feature_extractor.layers:
            if layer.name in emb_set_output_layers:
                output_layers.append(layer.output)

        # Declare a new model based on the new outputs
        feature_extractor = tf.keras.models.Model(feature_extractor.input,
                                                  output_layers)
        output_shape = feature_extractor.output_shape
        feature_extractor.summary()

        # Do the upscaling
        if self.config.get_bool("unet_style"):
            # Go over all upscale layers
            self.upscale_layers = []
            last_channels = output_layers[-1].shape[-1]
            for filters, output_layer in zip([128, 64, 16, 4],
                                             output_layers[::-1][1:]):
                # Determine input shape
                in_shape = list(output_layer.shape)
                in_shape[-1] += last_channels
                # Build one resnet stack for that level
                inp = tf.keras.layers.Input(shape=in_shape[1:])
                x = stack1(inp, filters, 1, stride1=1, name='conv1')
                self.upscale_layers.append(tf.keras.models.Model(inp, x))
                self.upscale_layers[-1].summary()
                last_channels = x.shape[-1]

        return feature_extractor, output_shape
Пример #2
0
def stacks(x):
    x = resnet.stack1(x, 64, 3, stride1=1, name='conv2')
    x = resnet.stack1(x, 128, 4, name='conv3')
    x = resnet.stack1(x, 256, 6, name='conv4')
    x = resnet.stack1(x, 512, 3, name='conv5')
    return x
Пример #3
0
def build_model(image_shape=(256, 256), renset_blocks=2, resnet_filters=64):
    # Build U-Net model
    picture = layers.Input((None, None, 3))
    landmarks = layers.Input((None, None, 3))

    inputs = layers.concatenate([picture, landmarks])
    c1 = layers.Conv2D(32, (3, 3), activation='relu', kernel_initializer='he_normal', padding='same')(inputs)
    c1 = layers.BatchNormalization()(c1)
    c1 = layers.Dropout(0.1)(c1)
    c1 = layers.Conv2D(32, (3, 3), activation='relu', kernel_initializer='he_normal', padding='same')(c1)
    c1 = layers.BatchNormalization()(c1)
    p1 = layers.MaxPooling2D((2, 2))(c1)

    c2 = layers.Conv2D(64, (3, 3), activation='relu', kernel_initializer='he_normal', padding='same')(p1)
    c2 = layers.BatchNormalization()(c2)
    c2 = layers.Dropout(0.1)(c2)
    c2 = layers.Conv2D(64, (3, 3), activation='relu', kernel_initializer='he_normal', padding='same')(c2)
    c2 = layers.BatchNormalization()(c2)
    p2 = layers.MaxPooling2D((2, 2))(c2)

    c3 = layers.Conv2D(128, (3, 3), activation='relu', kernel_initializer='he_normal', padding='same')(p2)
    c3 = layers.BatchNormalization()(c3)
    c3 = layers.Dropout(0.2)(c3)
    c3 = layers.Conv2D(128, (3, 3), activation='relu', kernel_initializer='he_normal', padding='same')(c3)
    c3 = layers.BatchNormalization()(c3)
    p3 = layers.MaxPooling2D((2, 2))(c3)

    c4 = layers.Conv2D(256, (3, 3), activation='relu', kernel_initializer='he_normal', padding='same')(p3)
    c4 = layers.BatchNormalization()(c4)
    c4 = layers.Dropout(0.2)(c4)
    c4 = layers.Conv2D(256, (3, 3), activation='relu', kernel_initializer='he_normal', padding='same')(c4)
    c4 = layers.BatchNormalization()(c4)
    p4 = layers.MaxPooling2D(pool_size=(2, 2))(c4)

    c5 = layers.Conv2D(512, (3, 3), activation='relu', kernel_initializer='he_normal', padding='same')(p4)
    c5 = layers.BatchNormalization()(c5)
    c5 = layers.Dropout(0.3)(c5)
    c5 = layers.Conv2D(512, (3, 3), activation='relu', kernel_initializer='he_normal', padding='same')(c5)
    c5 = layers.BatchNormalization()(c5)

    u6 = layers.Conv2DTranspose(128, (2, 2), strides=(2, 2), padding='same')(c5)
    u6 = layers.concatenate([u6, c4])
    c6 = layers.Conv2D(256, (3, 3), activation='relu', kernel_initializer='he_normal', padding='same')(u6)
    c6 = layers.BatchNormalization()(c6)
    c6 = layers.Dropout(0.2)(c6)
    c6 = layers.Conv2D(256, (3, 3), activation='relu', kernel_initializer='he_normal', padding='same')(c6)
    c6 = layers.BatchNormalization()(c6)

    u7 = layers.Conv2DTranspose(64, (2, 2), strides=(2, 2), padding='same')(c6)
    u7 = layers.concatenate([u7, c3])
    c7 = layers.Conv2D(128, (3, 3), activation='relu', kernel_initializer='he_normal', padding='same')(u7)
    c7 = layers.BatchNormalization()(c7)
    c7 = layers.Dropout(0.2)(c7)
    c7 = layers.Conv2D(128, (3, 3), activation='relu', kernel_initializer='he_normal', padding='same')(c7)
    c7 = layers.BatchNormalization()(c7)

    u8 = layers.Conv2DTranspose(32, (2, 2), strides=(2, 2), padding='same')(c7)
    u8 = layers.concatenate([u8, c2])
    c8 = layers.Conv2D(64, (3, 3), activation='relu', kernel_initializer='he_normal', padding='same')(u8)
    c8 = layers.BatchNormalization()(c8)
    c8 = layers.Dropout(0.1)(c8)
    c8 = layers.Conv2D(64, (3, 3), activation='relu', kernel_initializer='he_normal', padding='same')(c8)
    c8 = layers.BatchNormalization()(c8)

    u9 = layers.Conv2DTranspose(16, (2, 2), strides=(2, 2), padding='same')(c8)
    u9 = layers.concatenate([u9, c1])
    c9 = layers.Conv2D(32, (3, 3), activation='relu', kernel_initializer='he_normal', padding='same')(u9)
    c9 = layers.BatchNormalization()(c9)
    c9 = layers.Dropout(0.1)(c9)
    c9 = layers.Conv2D(32, (3, 3), activation='relu', kernel_initializer='he_normal', padding='same')(c9)
    c9 = layers.BatchNormalization()(c9)

    # outputs = layers.Dense(3, activation='sigmoid', kernel_initializer='he_normal')(c9)
    # outputs = layers.Conv2D(3, (1, 1), strides=(1, 1), activation='sigmoid')(c9)
    # pre_outputs = layers.concatenate([c9, picture])
    details = resnet.stack1(picture, filters=resnet_filters, blocks=renset_blocks, stride1=1, name='resnet')
    # details = resnet.block1(picture, filters=32, name='resnet')
    c10 = layers.concatenate([details, c9])

    c10 = layers.Conv2D(32, (1, 1), strides=(1, 1))(c10)
    outputs = layers.Conv2D(3, (1, 1), strides=(1, 1), activation='sigmoid')(c10)
    # outputs = tf.clip_by_value(outputs, 0.0, 1.0)
    model = tf.keras.Model(inputs=[picture, landmarks], outputs=[outputs])
    return model
Пример #4
0
 def stack_fn(x):
     x = resnet.stack1(x, 64, 1, stride1=1, name="conv2")
     x = resnet.stack1(x, 128, 1, name="conv3")
     x = resnet.stack1(x, 256, 1, name="conv4")
     return resnet.stack1(x, 512, 1, name="conv5")
Пример #5
0
 def stack_fn(x):
     x = stack1(x, 64, 3, stride1=1, name='conv2')
     x = stack1(x, 128, 4, name='conv3')
     x = stack1(x, 256, 6, name='conv4')
     return stack1(x, 512, 3, name='conv5')
Пример #6
0
 def stack_fn(x):
     c2 = resnet.stack1(x, 64, 3, stride1=1, name='conv2')
     c3 = resnet.stack1(c2, 128, 4, name='conv3')
     c4 = resnet.stack1(c3, 256, 6, name='conv4')
     c5 = resnet.stack1(c4, 512, 3, name='conv5')
     return [c2, c3, c4, c5]