Exemple #1
0
    def _residual_block(self, ip, id):
        init = ip

        x = ReflectionPadding2D()(ip)
        x = Convolution2D(128,
                          self.k,
                          self.k,
                          activation='linear',
                          border_mode='valid',
                          name='res_conv_' + str(id) + '_1')(x)
        x = BatchNormalization(axis=1,
                               mode=self.mode,
                               name="res_batchnorm_" + str(id) + "_1")(x)
        x = Activation('relu', name="res_activation_" + str(id) + "_1")(x)

        x = ReflectionPadding2D()(x)
        x = Convolution2D(self.features,
                          self.k,
                          self.k,
                          activation='linear',
                          border_mode='valid',
                          name='res_conv_' + str(id) + '_2')(x)
        x = BatchNormalization(axis=1,
                               mode=self.mode,
                               name="res_batchnorm_" + str(id) + "_2")(x)

        m = merge([x, init], mode='sum', name="res_merge_" + str(id))
        #m = Activation('relu', name="res_activation_" + str(id))(m)

        return m
Exemple #2
0
def generator(num_channels=1, num_timesteps=8, num_preproc=3):
    initial_state = Input(shape=(None, None, 256))
    noise_in_update = Input(shape=(num_timesteps, None, None, 8),
                            name="noise_in_update")
    lores_in = Input(shape=(num_timesteps, None, None, num_channels),
                     name="cond_in")
    inputs = [lores_in, initial_state, noise_in_update]

    xt = TimeDistributed(ReflectionPadding2D(padding=(1, 1)))(lores_in)
    xt = TimeDistributed(
        Conv2D(256 - noise_in_update.shape[-1], kernel_size=(3, 3)))(xt)
    xt = Concatenate()([xt, noise_in_update])
    for i in range(num_preproc):
        xt = res_block(256, time_dist=True, activation='relu')(xt)

    def gen_gate(activation='sigmoid'):
        def gate(x):
            x = ReflectionPadding2D(padding=(1, 1))(x)
            x = Conv2D(256, kernel_size=(3, 3))(x)
            if activation is not None:
                x = Activation(activation)(x)
            return x

        return Lambda(gate)

    x = CustomGateGRU(update_gate=gen_gate(),
                      reset_gate=gen_gate(),
                      output_gate=gen_gate(activation=None),
                      return_sequences=True,
                      time_steps=num_timesteps)([xt, initial_state])

    h = x[:, -1, ...]

    block_channels = [256, 256, 128, 64, 32]
    for (i, channels) in enumerate(block_channels):
        if i > 0:
            x = TimeDistributed(UpSampling2D(interpolation='bilinear'))(x)
        x = res_block(channels, time_dist=True, activation='leakyrelu')(x)

    x = TimeDistributed(ReflectionPadding2D(padding=(1, 1)))(x)
    img_out = TimeDistributed(
        Conv2D(num_channels, kernel_size=(3, 3), activation='sigmoid'))(x)

    model = Model(inputs=inputs, outputs=[img_out, h])

    def noise_shapes(img_shape=(128, 128)):
        noise_shape_update = (num_timesteps, img_shape[0] // 16,
                              img_shape[1] // 16, 8)
        return [noise_shape_update]

    return (model, noise_shapes)
Exemple #3
0
def conv(x,
         n_filters,
         kernel_size=3,
         stride=1,
         relu=True,
         nb_classes=1,
         targets=None):
    '''
    Reflection padding, convolution, instance normalization and (maybe) relu.
    '''
    if not kernel_size % 2:
        raise ValueError('Expected odd kernel size.')
    pad = (kernel_size - 1) / 2
    o = ReflectionPadding2D(padding=(pad, pad))(x)
    o = Convolution2D(n_filters,
                      kernel_size,
                      kernel_size,
                      subsample=(stride, stride),
                      init=weights_init)(o)
    # o = BatchNormalization()(o)
    if nb_classes > 1:
        o = ConditionalInstanceNormalization(targets, nb_classes)(o)
    else:
        o = InstanceNormalization()(o)
    if relu:
        o = Activation('relu')(o)
    return o
Exemple #4
0
    def __init__(self,
                 base_filters=32,
                 lrelu_alpha=0.2,
                 pad_type="reflect",
                 norm_type="batch"):
        super(Discriminator, self).__init__(name="Discriminator")
        if pad_type == "reflect":
            self.flat_pad = ReflectionPadding2D()
        elif pad_type == "constant":
            self.flat_pad = ZeroPadding2D()
        else:
            raise ValueError(f"pad_type not recognized {pad_type}")

        self.flat_conv = Conv2D(base_filters, 3)
        self.flat_lru = LeakyReLU(lrelu_alpha)
        self.strided_conv1 = StridedConv(base_filters * 2,
                                         lrelu_alpha,
                                         pad_type,
                                         norm_type)
        self.strided_conv2 = StridedConv(base_filters * 4,
                                         lrelu_alpha,
                                         pad_type,
                                         norm_type)
        self.conv2 = Conv2D(base_filters * 8, 3)

        if norm_type == "instance":
            self.norm = InstanceNormalization()
        elif norm_type == "batch":
            self.norm = BatchNormalization()

        self.lrelu = LeakyReLU(lrelu_alpha)

        self.final_conv = Conv2D(1, 3)
Exemple #5
0
def conv(x,
         n_filters,
         kernel_size=3,
         stride=1,
         relu=True,
         nb_classes=1,
         targets=None):
    '''
    Reflection padding, convolution, instance normalization and (maybe) relu.
    '''
    if not kernel_size % 2:
        raise ValueError('Expected odd kernel size.')
    pad = (kernel_size - 1) / 2
    pad = int(pad)
    o = ReflectionPadding2D(padding=(pad, pad))(x)

    # initialize kernel weights with Normal dist w/ stddev 0.01 instead of 0.05
    o = Convolution2D(n_filters, (kernel_size, kernel_size),
                      strides=(stride, stride),
                      kernel_initializer=RandomNormal(stddev=0.01))(o)

    # o = BatchNormalization()(o)
    if nb_classes > 1:
        o = ConditionalInstanceNormalization(targets, nb_classes)(o)
    else:
        o = InstanceNormalization()(o)
    if relu:
        o = Activation('relu')(o)
    return o
Exemple #6
0
 def gate(x):
     x = ReflectionPadding2D(padding=(1, 1))(x)
     x = SNConv2D(256,
                  kernel_size=(3, 3),
                  kernel_initializer='he_uniform')(x)
     if activation is not None:
         x = Activation(activation)(x)
     return x
Exemple #7
0
def image_transform_net(img_width, img_height, tv_weight=1):

    x = Input(shape=(img_width, img_height, 3), name='itn_input')
    a = InputNormalize(name='itn_input_norm')(x)
    a = ReflectionPadding2D(padding=(40, 40),
                            input_shape=(img_width, img_height, 3),
                            name='itn_reflectpad')(a)

    a = Conv2D(32, (9, 9), strides=1, padding='same', name='conv_1')(a)
    a = BatchNormalization(name='batch_norm_1')(a)
    a = Activation('relu', name='act_1')(a)

    a = Conv2D(64, (3, 3), strides=2, padding='same', name='conv_2')(a)
    a = BatchNormalization(name='batch_norm_2')(a)
    a = Activation('relu', name='act_2')(a)

    a = Conv2D(128, (3, 3), strides=2, padding='same', name='conv_3')(a)
    a = BatchNormalization(name='batch_norm_3')(a)
    a = Activation('relu', name='act_3')(a)

    # Residual 1
    a = res_conv(128, 3, 3)(a)

    # Residual 2
    a = res_conv(128, 3, 3)(a)

    # Residual 3
    a = res_conv(128, 3, 3)(a)

    # Residual 4
    a = res_conv(128, 3, 3)(a)

    # Residual 5
    a = res_conv(128, 3, 3)(a)

    a = Conv2DTranspose(64, (3, 3), strides=2, padding='same',
                        name='conv_4')(a)
    a = BatchNormalization(name='batch_norm_4')(a)
    a = Activation('relu', name='act_4')(a)

    a = Conv2DTranspose(32, (3, 3), strides=2, padding='same',
                        name='conv_5')(a)
    a = BatchNormalization(name='batch_norm_5')(a)
    a = Activation('relu', name='act_5')(a)

    a = Conv2D(3, (9, 9), strides=1, padding='same', name='conv_6')(a)
    a = BatchNormalization(name='batch_norm_6')(a)
    a = Activation('tanh', name='act_6')(a)  #output_image
    # Scale output to range [0, 255] via custom Denormalize layer
    y_hat = Scale_tanh(name='transform_output')(a)

    itn_model = Model(inputs=x, outputs=y_hat)
    #itn_model.load_weights('wave_crop_weights.h5', by_name=True)
    #print(model.output.shape)
    add_total_variation_loss(itn_model.layers[-1], tv_weight)
    return itn_model
Exemple #8
0
def initial_state_model(num_preproc=3):
    initial_frame_in = Input(shape=(None, None, 1))
    noise_in_initial = Input(shape=(None, None, 8), name="noise_in_initial")

    h = ReflectionPadding2D(padding=(1, 1))(initial_frame_in)
    h = Conv2D(256 - noise_in_initial.shape[-1], kernel_size=(3, 3))(h)
    h = Concatenate()([h, noise_in_initial])
    for i in range(num_preproc):
        h = res_block(256, activation='relu')(h)

    return Model(inputs=[initial_frame_in, noise_in_initial], outputs=h)
Exemple #9
0
def mirror_model(source_model, x):
    for pos in range(len(source_model.layers) - 1, 0, -1):
        layer = source_model.layers[pos]
        # x = Lambda(lambda y : _print_tensor(y, layer.name), name = layer.name + "_debug")(x)
        if type(layer) == Conv2D:
            x = ReflectionPadding2D(padding=((1, 1), (1, 1)), name=layer.name + "_padding")(x)
            act_func = None if pos == 1 else 'relu'
            x = Conv2D(layer.input_shape[3], layer.kernel_size, activation=act_func, \
                       padding='valid', name="decoder_" + layer.name)(x)
        elif type(layer) == MaxPooling2D:
            x = UpSampling2D(size=layer.pool_size, name=layer.name.replace("pool", "upsampling"))(x)
    return x
Exemple #10
0
def image_transform_net(img_width, img_height, tv_weight=1):

    x = Input(shape=(img_width, img_height, 3))
    a = InputNormalize()(x)
    a = ReflectionPadding2D(padding=(40, 40),
                            input_shape=(img_width, img_height, 3))(a)

    a = Conv2D(32, (9, 9), strides=1, padding='same')(a)
    a = BatchNormalization()(a)
    a = Activation('relu')(a)

    a = Conv2D(64, (3, 3), strides=2, padding='same')(a)
    a = BatchNormalization()(a)
    a = Activation('relu')(a)

    a = Conv2D(128, (3, 3), strides=2, padding='same')(a)
    a = BatchNormalization()(a)
    a = Activation('relu')(a)

    for i in range(5):
        a = Conv2D(128, (3, 3), strides=1, padding='valid')(a)
        a = BatchNormalization()(a)
        a = Activation('relu')(a)
        a = Conv2D(128, (3, 3), strides=1, padding='valid')(a)
        a = BatchNormalization()(a)
        a = Activation('relu')(a)

    a = Conv2DTranspose(64, (3, 3), strides=2, padding='same')(a)
    a = BatchNormalization()(a)
    a = Activation('relu')(a)

    a = Conv2DTranspose(32, (3, 3), strides=2, padding='same')(a)
    a = BatchNormalization()(a)
    a = Activation('relu')(a)

    a = Conv2D(3, (9, 9), strides=1, padding='same')(a)
    a = BatchNormalization()(a)
    a = Activation('tanh')(a)  #output_image
    # Scale output to range [0, 255] via custom Denormalize layer
    y_hat = Scale_tanh(name='transform_output')(a)

    model = Model(inputs=x, outputs=y_hat)
    #print(model.output.shape)
    add_total_variation_loss(model.layers[-1], tv_weight)
    return model
Exemple #11
0
def image_transform_net(img_width,img_height,tv_weight=1):
    x = Input(shape=(img_width,img_height,3))
    a = InputNormalize()(x)
    a = ReflectionPadding2D(padding=(40,40),input_shape=(img_width,img_height,3))(a)
    a = conv_bn_relu(32, 9, 9, stride=(1,1))(a)
    a = conv_bn_relu(64, 9, 9, stride=(2,2))(a)
    a = conv_bn_relu(128, 3, 3, stride=(2,2))(a)
    for i in range(5):
        a = res_conv(128,3,3)(a)
    a = dconv_bn_nolinear(64,3,3)(a)
    a = dconv_bn_nolinear(32,3,3)(a)
    a = dconv_bn_nolinear(3,9,9,stride=(1,1),activation="tanh")(a)
    # Scale output to range [0, 255] via custom Denormalize layer
    y = Denormalize(name='transform_output')(a)
    
    model = Model(inputs=x, outputs=y)
    
    if tv_weight > 0:
        add_total_variation_loss(model.layers[-1],tv_weight)
        
    return model 
 def block(x):
     if norm == "batch":
         x = BatchNormalization(momentum=0.8, scale=False)(x)
     if activation == 'leakyrelu':
         x = LeakyReLU(0.2)(x)
     elif activation == 'relu':
         x = ReLU()(x)
     elif activation == 'thresholdedrelu':
         x = ThresholdedReLU()(x)
     elif activation == 'elu':
         x = ELU()(x)
     if padding == 'reflect':
         pad = tuple((s - 1) // 2 for s in conv_size)
         x = TD(ReflectionPadding2D(padding=pad))(x)
     x = TD(
         Conv(channels,
              conv_size,
              padding='valid' if padding == 'reflect' else padding,
              strides=(stride, stride),
              kernel_regularizer=(l2(1e-4)
                                  if norm != "spectral" else None)))(x)
     return x
def image_transform_net(img_width, img_height, tv_weight=1):
    """
        Image tranform
        network model.
    """
    # Input layer as an RGB image
    x = Input(shape=(img_width, img_height, 3))

    # Normalize input image
    a = InputNormalize()(x)

    # Pad image
    a = ReflectionPadding2D(padding=(40, 40),
                            input_shape=(img_width, img_height, 3))(a)

    # Extract feature maps
    a = conv_bn_relu(32, 9, 9, stride=(1, 1))(a)
    a = conv_bn_relu(64, 3, 3,
                     stride=(2, 2))(a)  # The previous kernel size was 9x9
    a = conv_bn_relu(128, 3, 3, stride=(2, 2))(a)
    for _ in range(5):
        a = res_conv(128, 3, 3)(a)
    a = dconv_bn_nolinear(64, 3, 3)(a)
    a = dconv_bn_nolinear(32, 3, 3)(a)
    a = dconv_bn_nolinear(3, 9, 9, stride=(1, 1), activation="tanh")(a)

    # Scale output to range [0, 255] via custom Denormalize layer
    y = Denormalize(name='transform_output')(a)

    # Create model
    model = Model(inputs=x, outputs=y)

    # Total variation regularizer
    if tv_weight > 0:
        add_total_variation_loss(model.layers[-1], tv_weight)

    return model
    def __init__(self, base_filters=32, lrelu_alpha=0.2, pad_type="reflect"):
        super(Discriminator, self).__init__(name="Discriminator")
        if pad_type == "reflect":
            self.flat_pad = ReflectionPadding2D()
        elif pad_type == "constant":
            self.flat_pad = ZeroPadding2D()
        else:
            raise ValueError(f"pad_type not recognized {pad_type}")

        self.flat_conv = Conv2D(base_filters, 3)
        self.flat_lru = LeakyReLU(lrelu_alpha)
        self.strided_conv1 = StridedConv(base_filters * 2,
                                         lrelu_alpha,
                                         pad_type,
                                         gp_num=32)
        self.strided_conv2 = StridedConv(base_filters * 4,
                                         lrelu_alpha,
                                         pad_type,
                                         gp_num=64)
        self.gp_norm = GroupNormalization(groups=64, axis=-1)
        self.conv2 = WeightNormalization(Conv2D(base_filters * 8, 3))
        self.lrelu = LeakyReLU(lrelu_alpha)

        self.final_conv = Conv2D(1, 3)
Exemple #15
0
    def create_model(self,
                     style_name=None,
                     train_mode=False,
                     style_image_path=None,
                     validation_path=None):
        '''
        Creates the FastNet model which can be used in train mode, predict mode and validation mode.
        If train_mode = True, this model appends the VGG model to the end of the FastNet model.
        In train mode, it requires style image path to be supplied.
        If train_mode = False and validation_path = None, this model is in predict mode.
        In predict mode, it requires a style_name to be supplied, whose weights it will try to load.
        If validation_path is not None, this model is in validation mode.
        In validation mode, it simply loads the weights provided by the validation_path and does not append VGG
        Args:
            style_name: Used in predict mode, used to load correct weights of the style
            train_mode: Used to activate train mode. Default is predict mode.
            style_image_path: Path to the style image. Necessary if in train mode.
            validation_path: Path to the validation weights that need to be loaded.
        Returns: FastNet Model (Prediction mode / Validation mode) or FastNet + VGG Model (Train mode)
        '''

        if train_mode and style_image_path is None:
            raise Exception(
                'Style reference path must be supplied if training mode is enabled'
            )

        self.mode = 2

        if K.image_dim_ordering() == "th":
            ip = Input(shape=(3, self.img_width, self.img_height),
                       name="X_input")
        else:
            ip = Input(shape=(self.img_width, self.img_height, 3),
                       name="X_input")

        c1 = ReflectionPadding2D((4, 4))(ip)

        c1 = Convolution2D(32,
                           9,
                           9,
                           activation='linear',
                           border_mode='valid',
                           name='conv1')(c1)
        c1_b = BatchNormalization(axis=1, mode=self.mode,
                                  name="batchnorm1")(c1)
        c1_b = Activation('relu')(c1_b)

        c2 = Convolution2D(self.features,
                           self.k,
                           self.k,
                           activation='linear',
                           border_mode='same',
                           subsample=(2, 2),
                           name='conv2')(c1_b)
        c2_b = BatchNormalization(axis=1, mode=self.mode,
                                  name="batchnorm2")(c2)
        c2_b = Activation('relu')(c2_b)

        c3 = Convolution2D(self.features,
                           self.k,
                           self.k,
                           activation='linear',
                           border_mode='same',
                           subsample=(2, 2),
                           name='conv3')(c2_b)
        x = BatchNormalization(axis=1, mode=self.mode, name="batchnorm3")(c3)
        x = Activation('relu')(x)

        if self.deep_model:
            c4 = Convolution2D(self.features,
                               self.k,
                               self.k,
                               activation='linear',
                               border_mode='same',
                               subsample=(2, 2),
                               name='conv4')(x)

            x = BatchNormalization(axis=1, mode=self.mode,
                                   name="batchnorm_4")(c4)
            x = Activation('relu')(x)

        r1 = self._residual_block(x, 1)
        r2 = self._residual_block(r1, 2)
        r3 = self._residual_block(r2, 3)
        r4 = self._residual_block(r3, 4)
        x = self._residual_block(r4, 5)

        if self.deep_model:
            d4 = Deconvolution2D(self.features,
                                 self.k,
                                 self.k,
                                 activation="linear",
                                 border_mode="same",
                                 subsample=(2, 2),
                                 output_shape=(1, self.features,
                                               self.img_width // 4,
                                               self.img_height // 4),
                                 name="deconv4")(x)

            x = BatchNormalization(axis=1,
                                   mode=self.mode,
                                   name="batchnorm_extra4")(d4)
            x = Activation('relu')(x)

        d3 = Deconvolution2D(self.features,
                             self.k,
                             self.k,
                             activation="linear",
                             border_mode="same",
                             subsample=(2, 2),
                             output_shape=(1, self.features,
                                           self.img_width // 2,
                                           self.img_height // 2),
                             name="deconv3")(x)

        d3 = BatchNormalization(axis=1, mode=self.mode, name="batchnorm4")(d3)
        d3 = Activation('relu')(d3)

        d2 = Deconvolution2D(self.features,
                             self.k,
                             self.k,
                             activation="linear",
                             border_mode="same",
                             subsample=(2, 2),
                             output_shape=(1, self.features, self.img_width,
                                           self.img_height),
                             name="deconv2")(d3)

        d2 = BatchNormalization(axis=1, mode=self.mode, name="batchnorm5")(d2)
        d2 = Activation('relu')(d2)

        d1 = ReflectionPadding2D((4, 4))(d2)
        d1 = Convolution2D(3,
                           9,
                           9,
                           activation='tanh',
                           border_mode='valid',
                           name='fastnet_conv')(d1)

        # Scale output to range [0, 255] via custom Denormalize layer
        d1 = Denormalize(name='fastnet_output')(d1)

        model = Model(ip, d1)

        if self.model_save_path is not None and self.model is None:
            model.save(self.model_save_path, overwrite=True)

        self.fastnet_outputs_dict = dict([(layer.name, layer.output)
                                          for layer in model.layers])
        fastnet_output_layer = model.layers[-1]

        if style_name is not None or validation_path is not None:
            try:
                if validation_path is not None:
                    path = validation_path
                else:
                    path = "weights/fastnet_%s.h5" % style_name

                model.load_weights(path)
                print('Fast Style Net weights loaded.')
            except:
                print(
                    'Weights for this style do not exist. Model weights not loaded.'
                )

        # Add VGG layers to Fast Style Model
        if train_mode:
            model = VGG(self.img_height, self.img_width).append_vgg_model(
                model.input, x_in=model.output, pool_type=self.pool_type)

            if self.model is None:
                self.model = model

            self.vgg_output_dict = dict([(layer.name, layer.output)
                                         for layer in model.layers[-18:]])

            vgg_layers = dict([(layer.name, layer)
                               for layer in model.layers[-18:]])

            style = img_utils.preprocess_image(style_image_path,
                                               self.img_width, self.img_height)
            print('Getting style features from VGG network.')

            self.style_layers = ['conv1_2', 'conv2_2', 'conv3_3', 'conv4_3']

            self.style_layer_outputs = []
            for layer in self.style_layers:
                self.style_layer_outputs.append(self.vgg_output_dict[layer])

            style_features = self.get_vgg_style_features(style)
            self.style_features = style_features

            # Style Reconstruction Loss
            if self.style_weight != 0.0:
                for i, layer_name in enumerate(self.style_layers):
                    layer = vgg_layers[layer_name]
                    style_loss = StyleReconstructionRegularizer(
                        style_feature_target=style_features[i][0],
                        weight=self.style_weight)(layer)

                    layer.add_loss(style_loss)

            # Feature Reconstruction Loss
            self.content_layer = 'conv4_2'
            self.content_layer_output = self.vgg_output_dict[
                self.content_layer]

            if self.content_weight != 0.0:
                layer = vgg_layers[self.content_layer]
                content_regularizer = FeatureReconstructionRegularizer(
                    weight=self.content_weight)(layer)
                layer.add_loss(content_regularizer)

        # Total Variation Regularization
        if self.tv_weight != 0.0:
            layer = fastnet_output_layer  # Fastnet Output layer
            tv_regularizer = TVRegularizer(img_width=self.img_width,
                                           img_height=self.img_height,
                                           weight=self.tv_weight)(layer)
            layer.add_loss(tv_regularizer)

        if self.model is None:
            self.model = model
        return model
Exemple #16
0
 def gate(x):
     x = ReflectionPadding2D(padding=(1, 1))(x)
     x = Conv2D(256, kernel_size=(3, 3))(x)
     if activation is not None:
         x = Activation(activation)(x)
     return x