Example #1
0
    def __init__(self, output_channels, size_height, size_width):
        self.output_channels = output_channels
        self.size_height = size_height
        self.size_width = size_width

        base_model = tf.keras.applications.MobileNetV2(
            input_shape=[self.size_height, self.size_width, 3],
            include_top=False)

        # Use the activations of these layers
        layer_names = [
            'block_1_expand_relu',  # 64x64
            'block_3_expand_relu',  # 32x32
            'block_6_expand_relu',  # 16x16
            'block_13_expand_relu',  # 8x8
            'block_16_project',  # 4x4
        ]
        layers = [base_model.get_layer(name).output for name in layer_names]

        # Create the feature extraction model
        self.down_stack = tf.keras.Model(inputs=base_model.input,
                                         outputs=layers)

        self.down_stack.trainable = True

        self.up_stack = [
            pix2pix.upsample(512, 3),  # 4x4 -> 8x8
            pix2pix.upsample(256, 3),  # 8x8 -> 16x16
            pix2pix.upsample(128, 3),  # 16x16 -> 32x32
            pix2pix.upsample(64, 3),  # 32x32 -> 64x64
        ]
Example #2
0
    def __init__(self):
        super(UNet, self).__init__()
        # self.input_shape = (None, None, 3)
        backbone = tf.keras.applications.MobileNetV2(include_top=False,
                                                     input_shape=(None, None,
                                                                  3),
                                                     weights="imagenet")
        layer_names = [
            "block_1_expand_relu",
            "block_3_expand_relu",
            "block_6_expand_relu",
            "block_13_expand_relu",
            "block_16_project",
        ]
        layers = [backbone.get_layer(name).output for name in layer_names]

        self.down_stack = tf.keras.Model(inputs=backbone.input, outputs=layers)
        self.down_stack.trainable = True

        self.up_stack = [
            pix2pix.upsample(512, 3),  # 4x4 -> 8x8
            pix2pix.upsample(256, 3),  # 8x8 -> 16x16
            pix2pix.upsample(128, 3),  # 16x16 -> 32x32
            pix2pix.upsample(64, 3),  # 32x32 -> 64x64
        ]
        self.last = tf.keras.layers.Conv2DTranspose(
            1, 3, strides=2, padding="same")  # 64x64 -> 128x128
Example #3
0
    def build_model(self):
        model_weights_loc =  self.download_weights()

        base_model = tf.keras.applications.MobileNetV2(input_shape=[128, 128, 3], include_top=False, weights=model_weights_loc)

        # Use the activations of these layers
        layer_names = [
            'block_1_expand_relu',   # 64x64
            'block_3_expand_relu',   # 32x32
            'block_6_expand_relu',   # 16x16
            'block_13_expand_relu',  # 8x8
            'block_16_project',      # 4x4
        ]
        layers = [base_model.get_layer(name).output for name in layer_names]

        # Create the feature extraction model
        self.down_stack = tf.keras.Model(inputs=base_model.input, outputs=layers)

        self.down_stack.trainable = False

        self.up_stack = [
        pix2pix.upsample(512, 3),  # 4x4 -> 8x8
        pix2pix.upsample(256, 3),  # 8x8 -> 16x16
        pix2pix.upsample(128, 3),  # 16x16 -> 32x32
        pix2pix.upsample(64, 3),   # 32x32 -> 64x64
        ]

        model = self.unet_model(self.context.get_hparam("OUTPUT_CHANNELS"))
        model = self.context.wrap_model(model)

        model.compile(optimizer='adam',
              loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
              metrics=['accuracy'])
        return model
Example #4
0
def create_up_stack():
    up_stack = [
        pix2pix.upsample(512, 3),  # 4x4 -> 8x8
        pix2pix.upsample(256, 3),  # 8x8 -> 16x16
        pix2pix.upsample(128, 3),  # 16x16 -> 32x32
        pix2pix.upsample(64, 3)]   # 32x32 -> 64x64
    return up_stack
    def create_model(self):
        logging.info("Creating model")
        base_model = tf.keras.applications.MobileNetV2(
            input_shape=[128, 128, 3], include_top=False)

        # Use the activations of these layers
        layer_names = [
            'block_1_expand_relu',  # 64x64
            'block_3_expand_relu',  # 32x32
            'block_6_expand_relu',  # 16x16
            'block_13_expand_relu',  # 8x8
            'block_16_project',  # 4x4
        ]
        layers = [base_model.get_layer(name).output for name in layer_names]

        # Create the feature extraction model
        self.down_stack = tf.keras.Model(inputs=base_model.input,
                                         outputs=layers)

        self.down_stack.trainable = False

        self.up_stack = [
            pix2pix.upsample(512, 3),  # 4x4 -> 8x8
            pix2pix.upsample(256, 3),  # 8x8 -> 16x16
            pix2pix.upsample(128, 3),  # 16x16 -> 32x32
            pix2pix.upsample(64, 3),  # 32x32 -> 64x64
        ]

        self.model = self.unet_model(self.output_channels)
        self.model.compile(optimizer='adam',
                           loss=tf.keras.losses.SparseCategoricalCrossentropy(
                               from_logits=True),
                           metrics=['accuracy'])

        logging.info("Model created and compiled.")
    def build(self):
        """ Builds the Keras model based """
        layer_names = [
            'block_1_expand_relu',  # 64x64
            'block_3_expand_relu',  # 32x32
            'block_6_expand_relu',  # 16x16
            'block_13_expand_relu',  # 8x8
            'block_16_project',  # 4x4
        ]
        layers = [
            self.base_model.get_layer(name).output for name in layer_names
        ]

        # Create the feature extraction model
        down_stack = tf.keras.Model(inputs=self.base_model.input,
                                    outputs=layers)

        down_stack.trainable = False

        up_stack = [
            pix2pix.upsample(self.config.model.up_stack.layer_1,
                             self.config.model.up_stack.kernels),  # 4x4 -> 8x8
            pix2pix.upsample(
                self.config.model.up_stack.layer_2,
                self.config.model.up_stack.kernels),  # 8x8 -> 16x16
            pix2pix.upsample(
                self.config.model.up_stack.layer_3,
                self.config.model.up_stack.kernels),  # 16x16 -> 32x32
            pix2pix.upsample(
                self.config.model.up_stack.layer_4,
                self.config.model.up_stack.kernels),  # 32x32 -> 64x64
        ]

        inputs = tf.keras.layers.Input(shape=self.config.model.input)
        x = inputs

        # Downsampling through the model
        skips = down_stack(x)
        x = skips[-1]
        skips = reversed(skips[:-1])

        # Upsampling and establishing the skip connections
        for up, skip in zip(up_stack, skips):
            x = up(x)
            concat = tf.keras.layers.Concatenate()
            x = concat([x, skip])

        # This is the last layer of the model
        last = tf.keras.layers.Conv2DTranspose(
            self.output_channels,
            self.config.model.up_stack.kernels,
            strides=2,
            padding='same')  # 64x64 -> 128x128

        x = last(x)

        self.model = tf.keras.Model(inputs=inputs, outputs=x)

        LOG.info('Keras Model was built successfully')
Example #7
0
def mnist_unet_generator(norm_type='batchnorm'):
    """Modified u-net generator model (https://arxiv.org/abs/1611.07004).

    Args:
    output_channels: Output channels
    norm_type: Type of normalization. Either 'batchnorm' or 'instancenorm'.

    Returns:
    Generator model
    """

    down_stack = [
        downsample(32, 4, norm_type, apply_norm=False),  # (bs, 128, 128, 64)
        downsample(64, 4, norm_type),  # (bs, 64, 64, 128)
        downsample(128, 4, norm_type),  # (bs, 32, 32, 256)
        downsample(256, 4, norm_type),  # (bs, 16, 16, 512)
        downsample(512, 4, norm_type),  # (bs, 16, 16, 512)
    ]

    up_stack = [
        upsample(256, 4, norm_type),  # (bs, 16, 16, 1024)
        upsample(128, 4, norm_type),  # (bs, 16, 16, 1024)
        upsample(64, 4, norm_type),  # (bs, 32, 32, 512)
        upsample(32, 4, norm_type),  # (bs, 64, 64, 256)
        upsample(16, 4, norm_type),  # (bs, 128, 128, 128)
    ]

    initializer = tf.random_normal_initializer(0., 0.02)
    last = tf.keras.layers.Conv2DTranspose(
        1,
        4,
        strides=2,
        padding='same',
        kernel_initializer=initializer,
        activation='tanh')  # (bs, 256, 256, 3)

    concat = tf.keras.layers.Concatenate()

    inputs = tf.keras.layers.Input(shape=[28, 28, 1])
    x = inputs
    x = tf.keras.layers.ZeroPadding2D(padding=2)(x)

    # Downsampling through the model
    skips = []
    for down in down_stack:
        x = down(x)
        skips.append(x)

    skips = reversed(skips[:-1])

    # Upsampling and establishing the skip connections
    for up, skip in zip(up_stack, skips):
        x = up(x)
        x = concat([x, skip])

    x = last(x)
    x = tf.keras.layers.Cropping2D(cropping=2)(x)

    return tf.keras.Model(inputs=inputs, outputs=x)
Example #8
0
def build_model():
    OUTPUT_CHANNELS = 3
    base_model = tf.keras.applications.MobileNetV2(input_shape=[128, 128, 3],
                                                   include_top=False)
    layer_names = [
        'block_1_expand_relu',  # 64x64
        'block_3_expand_relu',  # 32x32
        'block_6_expand_relu',  # 16x16
        'block_13_expand_relu',  # 8x8
        'block_16_project',  # 4x4
    ]

    base_model_outputs = [
        base_model.get_layer(name).output for name in layer_names
    ]

    # Create the feature extraction model
    down_stack = tf.keras.Model(inputs=base_model.input,
                                outputs=base_model_outputs)

    down_stack.trainable = False

    up_stack = [
        pix2pix.upsample(512, 3),  # 4x4 -> 8x8
        pix2pix.upsample(256, 3),  # 8x8 -> 16x16
        pix2pix.upsample(128, 3),  # 16x16 -> 32x32
        pix2pix.upsample(64, 3),  # 32x32 -> 64x64
    ]

    def unet_model(output_channels):
        inputs = tf.keras.layers.Input(shape=[128, 128, 3])

        # Downsampling through the model
        skips = down_stack(inputs)
        x = skips[-1]
        skips = reversed(skips[:-1])

        # Upsampling and establishing the skip connections
        for up, skip in zip(up_stack, skips):
            x = up(x)
            concat = tf.keras.layers.Concatenate()
            x = concat([x, skip])

        # This is the last layer of the model
        last = tf.keras.layers.Conv2DTranspose(
            output_channels, 3, strides=2, padding='same')  # 64x64 -> 128x128

        x = last(x)

        return tf.keras.Model(inputs=inputs, outputs=x)

    model = unet_model(OUTPUT_CHANNELS)
    model.compile(
        optimizer='adam',
        loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
        metrics=['accuracy'])
    return model
Example #9
0
def mobile_net_x_unet():
    input_shape = CONFIG.IMAGE_SIZE + [
        3,
    ]
    base_model = tf.keras.applications.MobileNetV2(input_shape=input_shape,
                                                   include_top=False)

    # activation layers to use
    layer_names = [
        'block_1_expand_relu',  # 64x64
        'block_3_expand_relu',  # 32x32
        'block_6_expand_relu',  # 16x16
        'block_13_expand_relu',  # 8x8
        'block_16_project',  # 4x4
    ]

    base_model_outputs = [
        base_model.get_layer(name).output for name in layer_names
    ]

    # feature extraction model
    down_stack = tf.keras.Model(inputs=base_model.inputs,
                                outputs=base_model_outputs)

    down_stack.trainable = False

    # upsampling layers
    up_stack = [
        pix2pix.upsample(512, 3),  # 4x4 -> 8x8
        pix2pix.upsample(256, 3),  # 8x8 -> 16x16
        pix2pix.upsample(128, 3),  # 16x16 -> 32x32
        pix2pix.upsample(64, 3),  # 32x32 -> 64x64
    ]

    # define unet_mode
    inputs = layers.Input(shape=input_shape)

    # downsampling through the model
    skips = down_stack(inputs)
    x = skips[-1]
    skips = reversed(skips[:-1])

    # upsampling and establishing skip connections
    for up, skip in zip(up_stack, skips):
        x = up(x)
        concat = layers.Concatenate()
        x = concat([x, skip])

    last = layers.Conv2DTranspose(3, 3, strides=2, padding="same")

    x = last(x)
    model = tf.keras.Model(inputs=inputs, outputs=x)
    model.compile(
        optimizer='adam',
        loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
        metrics=['accuracy'])
    return model
Example #10
0
def unet_model(output_channels, h, w):
    """
  Parameters:
    output_channels: Number of classes to predict
    h: Height of Images
    w: Width if Images
  Output:
    tf.keras Model instance
  """
    base_model = tf.keras.applications.MobileNetV2(input_shape=[h, w, 3],
                                                   include_top=False)
    # Use the activations of these layers
    layer_names = [
        'block_1_expand_relu',  # 64x64
        'block_3_expand_relu',  # 32x32
        'block_6_expand_relu',  # 16x16
        'block_13_expand_relu',  # 8x8
        'block_16_project',  # 4x4
    ]
    layers = [base_model.get_layer(name).output for name in layer_names]

    # Create the feature extraction model
    down_stack = tf.keras.Model(inputs=base_model.input, outputs=layers)

    #down_stack.trainable = False

    up_stack = [
        pix2pix.upsample(512, 3),  # 4x4 -> 8x8
        pix2pix.upsample(256, 3),  # 8x8 -> 16x16
        pix2pix.upsample(128, 3),  # 16x16 -> 32x32
        pix2pix.upsample(64, 3),  # 32x32 -> 64x64
    ]
    inputs = tf.keras.layers.Input(shape=[h, w, 3])
    x = inputs

    # Downsampling through the model
    skips = down_stack(x)
    x = skips[-1]
    skips = reversed(skips[:-1])

    # Upsampling and establishing the skip connections
    for up, skip in zip(up_stack, skips):
        x = up(x)
        concat = tf.keras.layers.Concatenate()
        x = concat([x, skip])

    # This is the last layer of the model
    last = tf.keras.layers.Conv2DTranspose(output_channels,
                                           3,
                                           strides=2,
                                           padding='same')  #64x64 -> 128x128

    x = last(x)
    x = tf.keras.layers.Lambda(lambda x: tf.keras.activations.softmax(x))(x)

    return tf.keras.Model(inputs=inputs, outputs=x)
Example #11
0
def get_unet_model_for_human_parsing(input_shape, output_channels):
    # This is the U-net model
    """
    Read this u-net article with the cute meow:
        https://towardsdatascience.com/u-net-b229b32b4a71
    """

    mobile_net_model = tf.keras.applications.MobileNetV2(
        input_shape=input_shape, include_top=False)
    mobile_net_model.trainable = False
    # Use the activations of these layers
    layer_names = [
        'block_1_expand_relu',  # 128x96
        'block_3_expand_relu',  # 64x48
        'block_6_expand_relu',  # 32x24
        'block_13_expand_relu',  # 16x12
        'block_16_project',  # 8x6
    ]
    layers = [mobile_net_model.get_layer(name).output for name in layer_names]

    # Create the feature extraction model
    wrap_mobile_net_model = tf.keras.Model(inputs=mobile_net_model.input,
                                           outputs=layers)
    wrap_mobile_net_model.trainable = False

    inputs = tf.keras.Input(shape=IMG_SHAPE)
    out4, out3, out2, out1, out0 = wrap_mobile_net_model(inputs,
                                                         training=False)

    up1_tensor = pix2pix.upsample(512, 3)(out0)

    cat1_tensor = tf.keras.layers.concatenate([up1_tensor, out1])
    up2_tensor = pix2pix.upsample(256, 3)(cat1_tensor)

    cat2_tensor = tf.keras.layers.concatenate([up2_tensor, out2])
    up3_tensor = pix2pix.upsample(128, 3)(cat2_tensor)

    cat3_tensor = tf.keras.layers.concatenate([up3_tensor, out3])
    up4_tensor = pix2pix.upsample(64, 3)(cat3_tensor)

    cat4_tensor = tf.keras.layers.concatenate([up4_tensor, out4])

    # There are 20 integer category (not one-hot-encoded), so, n channels (or neurons, or feature vectors) is 20
    out = tf.keras.layers.Conv2DTranspose(output_channels,
                                          3,
                                          strides=2,
                                          padding='same')(cat4_tensor)

    model = tf.keras.Model(inputs, out)
    model.summary()
    model.compile(
        loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
        metrics=['acc'])

    return model
def model(usersettings):

    patchSize = usersettings.hparams['patchSize']
    output_channels = usersettings.hparams['nbClasses']

    base_model = tf.keras.applications.MobileNetV2(
        input_shape=[patchSize, patchSize, 3], include_top=False)

    # Use the activations of these layers
    layer_names = [
        'block_1_expand_relu',  # 64x64
        'block_3_expand_relu',  # 32x32
        'block_6_expand_relu',  # 16x16
        'block_13_expand_relu',  # 8x8
        'block_16_project',  # 4x4
    ]

    layers = [base_model.get_layer(name).output for name in layer_names]

    # Create the feature extraction model
    down_stack = tf.keras.Model(inputs=base_model.input, outputs=layers)

    down_stack.trainable = False

    up_stack = [
        pix2pix.upsample(512, 3),  # 4x4 -> 8x8
        pix2pix.upsample(256, 3),  # 8x8 -> 16x16
        pix2pix.upsample(128, 3),  # 16x16 -> 32x32
        pix2pix.upsample(64, 3),  # 32x32 -> 64x64
    ]

    # This is the last layer of the model
    last = tf.keras.layers.Conv2DTranspose(
        output_channels, 3, strides=2, padding='same',
        activation='softmax')  #64x64 -> 128x128

    inputs = tf.keras.layers.Input(shape=[patchSize, patchSize, 3])

    x = inputs

    # Downsampling through the model
    skips = down_stack(x)
    x = skips[-1]
    skips = reversed(skips[:-1])

    # Upsampling and establishing the skip connections
    for up, skip in zip(up_stack, skips):
        x = up(x)
        concat = tf.keras.layers.Concatenate()
        x = concat([x, skip])

    x = last(x)

    return tf.keras.Model(inputs=inputs, outputs=x)
Example #13
0
def mobilenet(img_size, num_classes, model_path="mobilenet.h5"):
    """Define the model."""
    # Use mobile net
    # base_model = tf.keras.applications.MobileNetV2(input_shape=[256, 256, 3],
    #                                                include_top=False)
    base_model = load_model(model_path)

    # use the activations of these layers
    layer_names = [
        'block_1_expand_relu',  # 64x64
        'block_3_expand_relu',  # 32x32
        'block_6_expand_relu',  # 16x16
        'block_13_expand_relu',  # 8x8
        'block_16_project'
    ]  # 4x4

    layers = [base_model.get_layer(name).output for name in layer_names]

    # create the feature extraction model
    down_stack = tf.keras.Model(inputs=base_model.input, outputs=layers)
    down_stack.trainable = False

    # create the upstack
    up_stack = [
        pix2pix.upsample(512, 3),  # 4x4 -> 8x8
        pix2pix.upsample(256, 3),  # 8x8 -> 16x16
        pix2pix.upsample(128, 3),  # 16x16 -> 32x32
        pix2pix.upsample(64, 3)
    ]  # 32x32 -> 64x64

    inputs = tf.keras.layers.Input(shape=[256, 256, 3])
    x = inputs

    # Downsampling through the model
    skips = down_stack(x)
    x = skips[-1]
    skips = reversed(skips[:-1])

    # Upsampling and establishing the skip connections
    for up, skip in zip(up_stack, skips):
        x = up(x)
        concat = tf.keras.layers.Concatenate()
        x = concat([x, skip])

    # This is the last layer of the model
    last = tf.keras.layers.Conv2DTranspose(num_classes,
                                           3,
                                           strides=2,
                                           padding='same')  # 64x64 -> 128x128

    x = last(x)

    return tf.keras.Model(inputs=inputs, outputs=x)
    def __init__(
        self,
        opt=None,
        input_channels=1,
    ):
        base_model = tf.keras.applications.MobileNetV2(
            input_shape=(*opt.input_shape, 3), include_top=False, weights=opt.weights)

        # Use the activations of these layers
        layer_names = (
            'block_1_expand_relu',   # 64x64
            'block_3_expand_relu',   # 32x32
            'block_6_expand_relu',   # 16x16
            'block_13_expand_relu',  # 8x8
            'block_16_project',      # 4x4
        )
        layers = [base_model.get_layer(name).output for name in layer_names]

        # Create the feature extraction model
        down_stack = tf.keras.Model(inputs=base_model.input, outputs=layers)

        up_stack = (
            pix2pix.upsample(512, 4, apply_dropout=True),  # 4x4 -> 8x8
            pix2pix.upsample(512, 4, apply_dropout=True),  # 8x8 -> 16x16
            pix2pix.upsample(256, 4, apply_dropout=True),  # 16x16 -> 32x32
            pix2pix.upsample(64, 4),   # 32x32 -> 64x64
        )

        inputs = tf.keras.layers.Input(shape=(*opt.input_shape, input_channels))
        # adapt input_channels to 3 for MobileNet
        x = tf.keras.layers.Conv2D(3, 3, padding='same')(inputs)

        # Downsampling through the model
        skips = down_stack(x)
        x = skips[-1]
        skips = reversed(skips[:-1])

        # Upsampling and establishing the skip connections
        for up, skip in zip(up_stack, skips):
            x = up(x)
            concat = tf.keras.layers.Concatenate()
            x = concat([x, skip])

        # This is the last layer of the model
        last = tf.keras.layers.Conv2DTranspose(
            opt.output_channels, 4, strides=2,
            activation=opt.final_activation,
            padding='same')  #64x64 -> 128x128

        x = last(x)

        super().__init__(inputs=inputs, outputs=x, name=type(self).__name__)
Example #15
0
def unet_model(recg_model, output_channels):
    # Create the base model from the pre-trained model MobileNet V2
    # mobilev2 = tf.keras.applications.MobileNetV2(input_shape=IMG_SHAPE, weights='imagenet', include_top=False)
    # x = mobilev2.layers[-2].output
    # predictions = Dense(7, activation='softmax')(x)
    # base_model = Model(inputs=mobilev2.input, outputs=predictions)
    # base_model.load_weights(filepath)

    # Use the activations of these layers
    layer_names = [
        'block_1_expand_relu',  # 64x64
        'block_3_expand_relu',  # 32x32
        'block_6_expand_relu',  # 16x16
        'block_13_expand_relu',  # 8x8
        'block_16_project',  # 4x4
    ]
    layers = [recg_model.get_layer(name).output for name in layer_names]

    # Create the feature extraction model
    down_stack = tf.keras.Model(inputs=recg_model.input, outputs=layers)
    down_stack.trainable = False

    up_stack = [
        pix2pix.upsample(512, 3),  # 4x4 -> 8x8
        pix2pix.upsample(256, 3),  # 8x8 -> 16x16
        pix2pix.upsample(128, 3),  # 16x16 -> 32x32
        pix2pix.upsample(64, 3),  # 32x32 -> 64x64
    ]

    inputs = tf.keras.layers.Input(shape=IMG_SHAPE)
    x = inputs

    # Downsampling through the model
    skips = down_stack(x)
    x = skips[-1]
    skips = reversed(skips[:-1])

    # Upsampling and establishing the skip connections
    for up, skip in zip(up_stack, skips):
        x = up(x)
        concat = tf.keras.layers.Concatenate()
        x = concat([x, skip])

    # This is the last layer of the model
    last = tf.keras.layers.Conv2DTranspose(filters=1,
                                           kernel_size=3,
                                           strides=2,
                                           padding='same')  #64x64 -> 128x128

    x = last(x)

    return tf.keras.Model(inputs=inputs, outputs=x)
Example #16
0
def unet_model(output_channels, shape):
    #使用了tensorflow_exampe中的MobileNetV2模型
    base_model = tf.keras.applications.MobileNetV2(
        input_shape=[shape[0], shape[1], 3], include_top=False)

    # 使用这些层的激活设置
    layer_names = [
        'block_1_expand_relu',
        'block_3_expand_relu',
        'block_6_expand_relu',
        'block_13_expand_relu',
        'block_16_project',
    ]
    #获取我们的译码器中的神经层
    layers = [base_model.get_layer(name).output for name in layer_names]

    # 创建译码器模型
    down_stack = tf.keras.Model(inputs=base_model.input, outputs=layers)

    down_stack.trainable = False

    #创建上采样层,也就是我们的解码器部分
    up_stack = [
        pix2pix.upsample(512, 3),  # 4x4 -> 8x8
        pix2pix.upsample(256, 3),  # 8x8 -> 16x16
        pix2pix.upsample(128, 3),  # 16x16 -> 32x32
        pix2pix.upsample(64, 3),  # 32x32 -> 64x64
    ]
    inputs = tf.keras.layers.Input(shape=[shape[0], shape[1], 3])
    x = inputs

    # 得到我们的译码器输出
    skips = down_stack(x)
    x = skips[-1]
    skips = reversed(skips[:-1])

    # 上采样部分,进行残差连接
    for up, skip in zip(up_stack, skips):
        x = up(x)
        concat = tf.keras.layers.Concatenate()
        x = concat([x, skip])

# 输出,得到(w,h,output_channels)的输出结果
    last = tf.keras.layers.Conv2DTranspose(output_channels,
                                           3,
                                           strides=2,
                                           padding='same')  #64x64 -> 128x128

    x = last(x)

    return tf.keras.Model(inputs=inputs, outputs=x)
def generate_unet_512(img_size=512):
    output_channels = 4  # number of classes on the dataset
    base_model = tf.keras.applications.MobileNetV2(input_shape=[128, 128, 3],
                                                   include_top=False)
    layer_names = [
        'block_1_expand_relu',  # 64x64
        'block_3_expand_relu',  # 32x32
        'block_6_expand_relu',  # 16x16
        'block_13_expand_relu',  # 8x8
        'block_16_project',  # 4x4
    ]
    layers = [base_model.get_layer(name).output for name in layer_names]

    # Create the feature extraction model
    down_stack = tf.keras.Model(inputs=base_model.input, outputs=layers)

    down_stack.trainable = False

    up_stack = [
        pix2pix.upsample(512, 3),  # 4x4 -> 8x8
        pix2pix.upsample(256, 3),  # 8x8 -> 16x16
        pix2pix.upsample(128, 3),  # 16x16 -> 32x32
        pix2pix.upsample(64, 3),  # 32x32 -> 64x64
    ]
    inputs = tf.keras.layers.Input(shape=[img_size, img_size, 3])
    mid = tf.keras.layers.MaxPooling2D(3, (4, 4))(inputs)
    x = tf.keras.layers.Conv2D(3, (3, 3), padding='same')(mid)

    # Downsampling through the model
    skips = down_stack(x)
    x = skips[-1]
    skips = reversed(skips[:-1])

    # Upsampling and establishing the skip connections
    for up, skip in zip(up_stack, skips):
        x = up(x)
        concat = tf.keras.layers.Concatenate()
        x = concat([x, skip])

    # This is the last layer of the model
    last = tf.keras.layers.Conv2DTranspose(output_channels,
                                           3,
                                           strides=2,
                                           padding='same')(
                                               x)  #64x64 -> 128x128
    mid = tf.keras.layers.UpSampling2D((4, 4))(last)
    mid_x = tf.keras.layers.Conv2D(4, (12, 12), padding='same')(mid)
    x = tf.keras.layers.Conv2D(4, (3, 3), padding='same')(mid_x)

    return tf.keras.Model(inputs=inputs, outputs=x)
Example #18
0
def build_unet_model(input_shape, classes):
    """
    Constructs a basic UNET model.
    Based on tensorflow.org/tutorials/images/segmentation
    """

    base_model = tf.keras.applications.MobileNetV2(input_shape=input_shape,
                                                   include_top=False,
                                                   weights=None)

    layer_names = [
        'block_1_expand_relu', 'block_3_expand_relu', 'block_6_expand_relu',
        'block_13_expand_relu', 'block_16_project'
    ]

    layers = [base_model.get_layer(name).output for name in layer_names]

    down_stack = tf.keras.Model(inputs=base_model.input, outputs=layers)

    up_stack = [
        pix2pix.upsample(512, 3),
        pix2pix.upsample(256, 3),
        pix2pix.upsample(128, 3),
        pix2pix.upsample(64, 3)
    ]

    inputs = tf.keras.layers.Input(shape=input_shape)
    x = inputs

    skips = down_stack(x)
    x = skips[-1]

    skips = reversed(skips[:-1])

    for up, skip in zip(up_stack, skips):
        x = up(x)
        concat = tf.keras.layers.Concatenate()
        x = concat([x, skip])

    last = tf.keras.layers.Conv2DTranspose(
        classes,
        3,
        strides=2,
        padding='same',
    )

    x = last(x)

    return tf.keras.Model(inputs=inputs, outputs=x)
Example #19
0
def upsampling_block3(inputs, skip_inputs, n_channels, kernel_size, pool_size, activation, tag):
    deconv = pix2pix.upsample(n_channels, kernel_size)(inputs)
    skip_concat = tf.keras.layers.concatenate([deconv, skip_inputs])
    conv1 = tf.keras.layers.Conv2D(n_channels, kernel_size, activation = activation, kernel_initializer = 'he_normal', padding = 'same')(skip_concat)
    conv2 = tf.keras.layers.Conv2D(n_channels, kernel_size, activation = activation, kernel_initializer = 'he_normal', padding = 'same')(conv1)
    
    return conv2
Example #20
0
def unet():
    base_model = tf.keras.applications.MobileNetV2(
        input_shape=[IMG_HEIGHT, IMG_WIDTH, 3], include_top=False)

    # Use the activations of these layers
    layer_names = [
        'block_1_expand_relu',
        'block_3_expand_relu',
        'block_6_expand_relu',
        'block_13_expand_relu',
        'block_16_project',
    ]

    layers = [base_model.get_layer(name).output for name in layer_names]

    down_stack = Model(inputs=base_model.input, outputs=layers)

    down_stack.trainable = False

    up_stack = [
        pix2pix.upsample(512, 3),
        pix2pix.upsample(256, 3),
        pix2pix.upsample(128, 3),
        pix2pix.upsample(64, 3),
    ]

    inputs = Input(shape=[IMG_HEIGHT, IMG_WIDTH, 3])

    x = inputs

    skips = down_stack(x)
    x = skips[-1]
    skips = reversed(skips[:-1])

    for up, skip in zip(up_stack, skips):
        x = up(x)
        concat = Concatenate()
        x = concat([x, skip])

        last = Conv2DTranspose(1, 3, strides=2, activation='sigmoid',
                               padding='same')

    x = last(x)

    return Model(inputs=inputs, outputs=x)
Example #21
0
def unet_model(output_channels, down_stack):
    """generate a unet architecture, based on a input model

    Args:
        output_channels (int): number of object channels
        down_stack (model): the pretrained model

    Returns:
        model: a keras model including the pretrained model in a unet architecutre
    """

    up_stack = [
        pix2pix.upsample(512, 3),  # 4x4 -> 8x8,
        pix2pix.upsample(256, 3),  # 8x8 -> 16x16
        pix2pix.upsample(128, 3),  # 16x16 -> 32x32
        pix2pix.upsample(64, 3),  # 32x32 -> 64x64
    ]

    inputs = tf.keras.layers.Input(shape=[192, 192, 3])

    # Downsampling through the model
    skips = down_stack(inputs)
    x = skips[-1]
    skips = reversed(skips[:-1])

    # Upsampling and establishing the skip connections
    for up, skip in zip(up_stack, skips):
        x = up(x)
        concat = tf.keras.layers.Concatenate()
        x = concat([x, skip])

    # This is the last layer of the model
    last = tf.keras.layers.Conv2DTranspose(output_channels,
                                           3,
                                           strides=2,
                                           activation='sigmoid',
                                           padding='same')  #64x64 -> 192x192
    x = last(x)

    return tf.keras.Model(inputs=inputs, outputs=x)
Example #22
0
    def BuildModel(self, learning_rate=10e-4):

        base_model = tf.keras.applications.MobileNetV2(
            input_shape=[128, 128, 3], include_top=False)

        # Use the activations of these layers
        layer_names = [
            'block_1_expand_relu',  # 64x64
            'block_3_expand_relu',  # 32x32
            'block_6_expand_relu',  # 16x16
            'block_13_expand_relu',  # 8x8
            'block_16_project',  # 4x4
        ]
        base_model_outputs = [
            base_model.get_layer(name).output for name in layer_names
        ]

        # Create the feature extraction model
        down_stack = tf.keras.Model(inputs=base_model.input,
                                    outputs=base_model_outputs)

        down_stack.trainable = False

        up_stack = [
            pix2pix.upsample(512, 3),  # 4x4 -> 8x8
            pix2pix.upsample(256, 3),  # 8x8 -> 16x16
            pix2pix.upsample(128, 3),  # 16x16 -> 32x32
            pix2pix.upsample(64, 3),  # 32x32 -> 64x64
        ]

        inputs = tf.keras.layers.Input(shape=[128, 128, 3])

        # Downsampling through the model
        skips = down_stack(inputs)
        x = skips[-1]
        skips = reversed(skips[:-1])

        # Upsampling and establishing the skip connections
        for up, skip in zip(up_stack, skips):
            x = up(x)
            concat = tf.keras.layers.Concatenate()
            x = concat([x, skip])

        # This is the last layer of the model
        last = tf.keras.layers.Conv2DTranspose(
            self._output_channels, 3, strides=2,
            padding='same')  #64x64 -> 128x128

        x = last(x)
        self._model = tf.keras.Model(inputs=inputs, outputs=x)

        self._model.compile(optimizer='adam',
                            loss=tf.keras.losses.SparseCategoricalCrossentropy(
                                from_logits=True),
                            metrics=['accuracy'])

        self._model.summary()

        save_path = os.path.join(self._workdir, 'unet.png')
        tf.keras.utils.plot_model(self._model,
                                  to_file=save_path,
                                  show_shapes=True)

        return self._model
Example #23
0
                                               include_top=False)

layers_names = [
    'block_1_expand_relu',
    'block_3_expand_relu',
    'block_6_expand_relu',
    'block_13_expand_relu',
    'block_16_project',
]
layers = [base_model.get_layer(name).output for name in layers_names]

down_stack = tf.keras.Model(inputs=base_model.input, outputs=layers)
down_stack.trainable = False

up_stack = [
    pix2pix.upsample(512, 3),
    pix2pix.upsample(256, 3),
    pix2pix.upsample(128, 3),
    pix2pix.upsample(64, 3),
]


def unet_model(output_channels):
    inputs = tf.keras.layers.Input(shape=[128, 128, 3])
    x = inputs

    skips = down_stack(x)
    x = skips[-1]
    skips = reversed(skips[:-1])

    for up, skip in zip(up_stack, skips):
layer_names = [
    'block_1_expand_relu',  # 64x64
    'block_3_expand_relu',  # 32x32
    'block_6_expand_relu',  # 16x16
    'block_13_expand_relu',  # 8x8
    'block_16_project',  # 4x4
]
layers = [base_model.get_layer(name).output for name in layer_names]

# Create the feature extraction model
down_stack = tf.keras.Model(inputs=base_model.input, outputs=layers)

down_stack.trainable = False

up_stack = [
    pix2pix.upsample(512, 3),  # 4x4 -> 8x8
    pix2pix.upsample(256, 3),  # 8x8 -> 16x16
    pix2pix.upsample(128, 3),  # 16x16 -> 32x32
    pix2pix.upsample(64, 3),  # 32x32 -> 64x64
]


def unet_model(output_channels):
    inputs = tf.keras.layers.Input(shape=[128, 128, 3])
    #inputs = tf.keras.layers.Input(shape=[512, 512, 1])
    x = tf.keras.layers.Conv2D(3, 3, strides=2, padding='same')(inputs)
    x = inputs

    # Downsampling through the model
    skips = down_stack(x)
    x = skips[-1]
Example #25
0
layer_names = [
    'block_1_expand_relu',  # 64x64
    'block_3_expand_relu',  # 32x32
    'block_6_expand_relu',  # 16x16
    'block_13_expand_relu',  # 8x8
    'block_16_project',  # 4x4
]
layers = [base_model.get_layer(name).output for name in layer_names]

# Create the feature extraction model
down_stack = tf.keras.Model(inputs=base_model.input, outputs=layers)

down_stack.trainable = False

up_stack = [
    pix2pix.upsample(512, 3, apply_dropout=True),  # 4x4 -> 8x8
    pix2pix.upsample(256, 3, apply_dropout=True),  # 8x8 -> 16x16
    pix2pix.upsample(128, 3, apply_dropout=True),  # 16x16 -> 32x32
    pix2pix.upsample(64, 3, apply_dropout=True),  # 32x32 -> 64x64
]


def unet_model(output_channels):
    inputs = tf.keras.layers.Input(shape=[224, 224, 3])
    x = inputs

    # Downsampling through the model
    skips = down_stack(x)
    x = skips[-1]
    skips = reversed(skips[:-1])
Example #26
0
def get_wrap_vgg16_model(IMG_SHAPE=(256, 192, 3)):
    """
    Read this u-net article with the cute meow:
        https://towardsdatascience.com/u-net-b229b32b4a71
    """

    mobile_net_model = tf.keras.applications.MobileNetV2(input_shape=IMG_SHAPE,
                                                         include_top=False)
    # mobile_net_model.summary()
    mobile_net_model.trainable = False
    # Use the activations of these layers
    layer_names = [
        'block_1_expand_relu',  # 128x96
        'block_3_expand_relu',  # 64x48
        'block_6_expand_relu',  # 32x24
        'block_13_expand_relu',  # 16x12
        'block_16_project',  # 8x6
    ]
    layers = [mobile_net_model.get_layer(name).output for name in layer_names]

    # Create the feature extraction model
    wrap_mobile_net_model = tf.keras.Model(inputs=mobile_net_model.input,
                                           outputs=layers)
    wrap_mobile_net_model.trainable = False

    # inputs_img = tf.keras.Input(shape=(*IMG_SHAPE[:2], 7), name="inputs_img")

    inputs_cloth = tf.keras.Input(shape=(*IMG_SHAPE[:2], 3),
                                  name="inputs_cloth")
    # input_concat = tf.concat([inputs_img, inputs_cloth], axis=-1)
    # pre_conv = tf.keras.layers.Conv2D(3, (3, 3), padding='same')(input_concat)

    out4, out3, out2, out1, out0 = wrap_mobile_net_model(inputs_cloth,
                                                         training=False)

    up1_tensor = pix2pix.upsample(512, 3)(out0)

    cat1_tensor = tf.keras.layers.concatenate([up1_tensor, out1])
    up2_tensor = pix2pix.upsample(256, 3)(cat1_tensor)

    cat2_tensor = tf.keras.layers.concatenate([up2_tensor, out2])
    up3_tensor = pix2pix.upsample(128, 3)(cat2_tensor)

    cat3_tensor = tf.keras.layers.concatenate([up3_tensor, out3])
    up4_tensor = pix2pix.upsample(64, 3)(cat3_tensor)

    cat4_tensor = tf.keras.layers.concatenate([up4_tensor, out4])

    # n channels (or neurons, or feature vectors) is 4 because we are predicting 2 things:
    #       - course human image
    #       - clothing mask on the person

    # We don't use activation because we have to calculate mse, or we can use relu act
    out1 = tf.keras.layers.Conv2DTranspose(3,
                                           3,
                                           strides=2,
                                           padding='same',
                                           activation='relu')(cat4_tensor)

    # out2 = tf.keras.layers.Conv2DTranspose(
    #     1, 3, strides=2,
    #     padding='same',
    #     activation='relu'
    # ) (cat4_tensor)

    # We will not use model, we will just use it to see the summary!
    # model = tf.keras.Model([inputs_img, inputs_cloth], [out1, out2])
    mask_model = tf.keras.Model(inputs_cloth, out1)
    # mask_model.summary()
    return mask_model
Example #27
0
def main_fun(args, ctx):
  from tensorflow_examples.models.pix2pix import pix2pix
  import json
  import os
  import tensorflow_datasets as tfds
  import tensorflow as tf
  import time

  print("TensorFlow version: ", tf.__version__)
  strategy = tf.distribute.experimental.MultiWorkerMirroredStrategy()

  dataset, info = tfds.load('oxford_iiit_pet:3.0.0', with_info=True)

  def normalize(input_image, input_mask):
    input_image = tf.cast(input_image, tf.float32)/128.0 - 1
    input_mask -= 1
    return input_image, input_mask

  @tf.function
  def load_image_train(datapoint):
    input_image = tf.image.resize(datapoint['image'], (128, 128))
    input_mask = tf.image.resize(datapoint['segmentation_mask'], (128, 128))

    if tf.random.uniform(()) > 0.5:
      input_image = tf.image.flip_left_right(input_image)
      input_mask = tf.image.flip_left_right(input_mask)

    input_image, input_mask = normalize(input_image, input_mask)

    return input_image, input_mask

  def load_image_test(datapoint):
    input_image = tf.image.resize(datapoint['image'], (128, 128))
    input_mask = tf.image.resize(datapoint['segmentation_mask'], (128, 128))
    input_image, input_mask = normalize(input_image, input_mask)
    return input_image, input_mask

  TRAIN_LENGTH = info.splits['train'].num_examples
  BATCH_SIZE = args.batch_size
  BUFFER_SIZE = args.buffer_size
  STEPS_PER_EPOCH = TRAIN_LENGTH // BATCH_SIZE

  train = dataset['train'].map(load_image_train, num_parallel_calls=tf.data.experimental.AUTOTUNE)
  test = dataset['test'].map(load_image_test)

  train_dataset = train.cache().shuffle(BUFFER_SIZE).batch(BATCH_SIZE).repeat()
  train_dataset = train_dataset.prefetch(buffer_size=tf.data.experimental.AUTOTUNE)
  test_dataset = test.batch(BATCH_SIZE)

  OUTPUT_CHANNELS = 3

  with strategy.scope():
    base_model = tf.keras.applications.MobileNetV2(input_shape=[128, 128, 3], include_top=False)

    # Use the activations of these layers
    layer_names = [
        'block_1_expand_relu',   # 64x64
        'block_3_expand_relu',   # 32x32
        'block_6_expand_relu',   # 16x16
        'block_13_expand_relu',  # 8x8
        'block_16_project',      # 4x4
    ]
    layers = [base_model.get_layer(name).output for name in layer_names]

    # Create the feature extraction model
    down_stack = tf.keras.Model(inputs=base_model.input, outputs=layers)

    down_stack.trainable = False

    up_stack = [
        pix2pix.upsample(512, 3),  # 4x4 -> 8x8
        pix2pix.upsample(256, 3),  # 8x8 -> 16x16
        pix2pix.upsample(128, 3),  # 16x16 -> 32x32
        pix2pix.upsample(64, 3),   # 32x32 -> 64x64
    ]

    def unet_model(output_channels):

      # This is the last layer of the model
      last = tf.keras.layers.Conv2DTranspose(
          output_channels, 3, strides=2,
          padding='same', activation='softmax')  # 64x64 -> 128x128

      inputs = tf.keras.layers.Input(shape=[128, 128, 3])
      x = inputs

      # Downsampling through the model
      skips = down_stack(x)
      x = skips[-1]
      skips = reversed(skips[:-1])

      # Upsampling and establishing the skip connections
      for up, skip in zip(up_stack, skips):
        x = up(x)
        concat = tf.keras.layers.Concatenate()
        x = concat([x, skip])

      x = last(x)

      return tf.keras.Model(inputs=inputs, outputs=x)

    model = unet_model(OUTPUT_CHANNELS)
    model.compile(optimizer='adam', loss='sparse_categorical_crossentropy',
                  metrics=['accuracy'])

# Training only (since we're using command-line)
# def create_mask(pred_mask):
#   pred_mask = tf.argmax(pred_mask, axis=-1)
#   pred_mask = pred_mask[..., tf.newaxis]
#   return pred_mask[0]
#
#
# def show_predictions(dataset=None, num=1):
#   if dataset:
#     for image, mask in dataset.take(num):
#       pred_mask = model.predict(image)
#       display([image[0], mask[0], create_mask(pred_mask)])
#   else:
#     display([sample_image, sample_mask,
#              create_mask(model.predict(sample_image[tf.newaxis, ...]))])
#
#
# class DisplayCallback(tf.keras.callbacks.Callback):
#   def on_epoch_end(self, epoch, logs=None):
#     clear_output(wait=True)
#     show_predictions()
#     print ('\nSample Prediction after epoch {}\n'.format(epoch+1))
#

  EPOCHS = args.epochs
  VAL_SUBSPLITS = 5
  VALIDATION_STEPS = info.splits['test'].num_examples//BATCH_SIZE//VAL_SUBSPLITS

  tf.io.gfile.makedirs(args.model_dir)
  filepath = args.model_dir + "/weights-{epoch:04d}"
  ckpt_callback = tf.keras.callbacks.ModelCheckpoint(filepath=filepath, verbose=1, save_weights_only=True)

  model_history = model.fit(train_dataset, epochs=EPOCHS,
                            steps_per_epoch=STEPS_PER_EPOCH,
                            callbacks=[ckpt_callback],
                            validation_steps=VALIDATION_STEPS,
                            validation_data=test_dataset)

  if ctx.job_name == 'chief':
    # Workaround for: https://github.com/tensorflow/tensorflow/issues/30251
    print("===== saving h5py model")
    model.save(args.model_dir + ".h5")
    print("===== re-loading model w/o DistributionStrategy")
    new_model = tf.keras.models.load_model(args.model_dir + ".h5")
    print("===== exporting saved_model")
    tf.keras.experimental.export_saved_model(new_model, args.export_dir)
    print("===== done exporting")
  else:
    print("===== sleeping")
    time.sleep(90)
Example #28
0
# so that we can just predict according to parsing 1 or 0 using binary crossentropy loss.
# Please use the simple parsing dataset for this model.

from tensorflow_examples.models.pix2pix import pix2pix
vgg16_model = tf.keras.applications.VGG16(
    input_shape=IMG_SHAPE,
    include_top=False,
    weights='imagenet'
)
vgg16_model.trainable = False
# vgg16_model.summary()

inputs = tf.keras.Input(shape=IMG_SHAPE)
x = vgg16_model(inputs, training=False)

x = pix2pix.upsample(512, 3)(x)
x = pix2pix.upsample(256, 3)(x)
x = pix2pix.upsample(128, 3)(x)
x = pix2pix.upsample(64, 3)(x)
x = pix2pix.upsample(32, 3)(x)
x = pix2pix.upsample(16, 3)(x)
out = tf.keras.layers.Conv2D(
    1, 3, strides=2,
    padding='same',
    activation='sigmoid'
) (x)


model = tf.keras.Model(inputs, out)
model.summary()
model.compile(
Example #29
0
def create_segmentation_model(output_channels):
    base_model = tf.keras.applications.MobileNetV2(input_shape=IMAGE_SHAPE, include_top=False)
    # get some layers from mobilenetv2 as encoder(downsampler)
    # Use the activations of these layers
    # Note only the block_16_project has trainable params, which is fixed
    layer_names = [
        'block_1_expand_relu',
        # block_1_expand_relu (ReLU)      (None, 64, 64, 96)   0           block_1_expand_BN[0][0] - NO training params
        'block_3_expand_relu',
        # block_3_expand_relu (ReLU)      (None, 32, 32, 144)  0           block_3_expand_BN[0][0] - NO training params
        'block_6_expand_relu',
        # block_6_expand_relu (ReLU)      (None, 16, 16, 192)  0           block_6_expand_BN[0][0] - NO training params
        'block_13_expand_relu',
        # block_13_expand_relu (ReLU)     (None, 8, 8, 576)    0           block_13_expand_BN[0][0] - NO training params
        'block_16_project',
        # block_16_project (Conv2D)       (None, 4, 4, 320)    307200      block_16_depthwise_relu[0][0] - have training params
    ]
    layers = [base_model.get_layer(name).output for name in layer_names]

    # create the feature extraction model
    # note output is multiple tensors, i.e the activiations of a bunch of layers
    down_stack = tf.keras.Model(inputs=base_model.input, outputs=layers)
    down_stack.trainable = False

    up_stack = [
        pix2pix.upsample(512, 3),
        pix2pix.upsample(256, 3),
        pix2pix.upsample(128, 3),
        pix2pix.upsample(64, 3),
    ]

    inputs = tf.keras.layers.Input(shape=IMAGE_SHAPE)
    x = inputs

    # creates a Funtional layer that has (128, 128, 3) as input and outputs 5 different tensors
    # these tensors are NOT sequential or connected, they are used separately as input of other layers later
    skips = down_stack(x)

    x = skips[-1]
    # no block_16_project
    skips = reversed(skips[:-1])

    # initial x: (4, 4, 320)
    # skips:        (8 8 576)      (16 16 192)    (32 32 144)       (64 64 96)
    # ups:          (512, 3)       (256, 3)       (128, 3)          (64, 3)
    # up(x):        (8, 8, 512)    (16, 16, 256)  (32, 32, 128)     (64, 64, 64)
    # x = concat:   (8, 8, 1088)   (16, 16, 448)  (32, 32, 272)     (64, 64, 160)
    for up, skip in zip(up_stack, skips):
        x = up(x)
        concat = tf.keras.layers.Concatenate()
        x = concat([x, skip])
    # now x is (64, 64, 160)

    # Conv2dTranspose INCREASE the channel by strides, it's the reverse of Conv2D
    # (32, 32, 3) -> Conv2DTranspose(256, padding='same', strides=2) -> (64, 64, 256)
    last = tf.keras.layers.Conv2DTranspose(
        output_channels, 3, strides=2, padding='same'
    )

    x = last(x)
    # now x is (128, 128, 3)
    return tf.keras.Model(inputs=inputs, outputs=x)
Example #30
0
    'block_1_expand_relu',  # 256x256
    'block_3_expand_relu',  # 128x128
    'block_6_expand_relu',  # 64x64
    'block_13_expand_relu',  # 32x32
    'block_16_project',  # 16x16
]
layers = [base_model.get_layer(name).output for name in layer_names]

# Create the feature extraction model
down_stack = tf.keras.Model(inputs=base_model.input, outputs=layers)
down_stack.trainable = False

##### pix2pix - up_stack #####

up_stack = [
    pix2pix.upsample(512, 3),  # 16x16 -> 32x32
    pix2pix.upsample(256, 3),  # 32x32 -> 64x64
    pix2pix.upsample(128, 3),  # 64x64 -> 128x128
    pix2pix.upsample(64, 3),  # 128x128 -> 256x256
]


#### UNet Model #####
def unet_model(output_channels):
    inputs = tf.keras.layers.Input(shape=[512, 512, 3])
    x = inputs

    # Downsampling through the model
    skips = down_stack(x)
    x = skips[-1]
    skips = reversed(skips[:-1])