def image_transform_net(img_width, img_height, tv_weight=1): x = Input(shape=(img_width, img_height, 3), name='itn_input') a = InputNormalize(name='itn_input_norm')(x) a = ReflectionPadding2D(padding=(40, 40), input_shape=(img_width, img_height, 3), name='itn_reflectpad')(a) a = Conv2D(32, (9, 9), strides=1, padding='same', name='conv_1')(a) a = BatchNormalization(name='batch_norm_1')(a) a = Activation('relu', name='act_1')(a) a = Conv2D(64, (3, 3), strides=2, padding='same', name='conv_2')(a) a = BatchNormalization(name='batch_norm_2')(a) a = Activation('relu', name='act_2')(a) a = Conv2D(128, (3, 3), strides=2, padding='same', name='conv_3')(a) a = BatchNormalization(name='batch_norm_3')(a) a = Activation('relu', name='act_3')(a) # Residual 1 a = res_conv(128, 3, 3)(a) # Residual 2 a = res_conv(128, 3, 3)(a) # Residual 3 a = res_conv(128, 3, 3)(a) # Residual 4 a = res_conv(128, 3, 3)(a) # Residual 5 a = res_conv(128, 3, 3)(a) a = Conv2DTranspose(64, (3, 3), strides=2, padding='same', name='conv_4')(a) a = BatchNormalization(name='batch_norm_4')(a) a = Activation('relu', name='act_4')(a) a = Conv2DTranspose(32, (3, 3), strides=2, padding='same', name='conv_5')(a) a = BatchNormalization(name='batch_norm_5')(a) a = Activation('relu', name='act_5')(a) a = Conv2D(3, (9, 9), strides=1, padding='same', name='conv_6')(a) a = BatchNormalization(name='batch_norm_6')(a) a = Activation('tanh', name='act_6')(a) #output_image # Scale output to range [0, 255] via custom Denormalize layer y_hat = Scale_tanh(name='transform_output')(a) itn_model = Model(inputs=x, outputs=y_hat) #itn_model.load_weights('wave_crop_weights.h5', by_name=True) #print(model.output.shape) add_total_variation_loss(itn_model.layers[-1], tv_weight) return itn_model
def _get_model(self): x = tf.keras.Input(shape=(None, None, 3)) a = InputNormalize()(x) #a = ReflectionPadding2D(padding=(40,40),input_shape=(img_width,img_height,3))(a) a = conv_bn_relu(8, 9, 9, stride=(1, 1))(a) a = conv_bn_relu(16, 3, 3, stride=(2, 2))(a) a = conv_bn_relu(32, 3, 3, stride=(2, 2))(a) for i in range(2): a = res_conv(32, 3, 3)(a) a = dconv_bn_nolinear(16, 3, 3)(a) a = dconv_bn_nolinear(8, 3, 3)(a) a = dconv_bn_nolinear(3, 9, 9, stride=(1, 1), activation="tanh")(a) # Scale output to range [0, 255] via custom Denormalize layer y = Denormalize(name='transform_output')(a) return tf.keras.Model(x, y, name="transformnet")
def image_transform_net(img_width, img_height, tv_weight=1): x = Input(shape=(img_width, img_height, 3), name="input") a = layers.InputNormalize()(x) a = layers.ReflectionPadding2D(padding=(40, 40), input_shape=(img_width, img_height, 3))(a) a = layers.conv_bn_relu(32, 9, 9, stride=(1, 1))(a) a = layers.conv_bn_relu(64, 9, 9, stride=(2, 2))(a) a = layers.conv_bn_relu(128, 3, 3, stride=(2, 2))(a) for _ in range(5): a = layers.res_conv(128, 3, 3)(a) a = layers.dconv_bn_nolinear(64, 3, 3)(a) a = layers.dconv_bn_nolinear(64, 3, 3)(a) a = layers.conv_bn_relu(3, 9, 9, stride=(1, 1), relu=False)(a) y = layers.Denormalize(name='transform_output')(a) model = Model(inputs=x, outputs=y) if tv_weight > 0: add_total_variation_loss(model.layers[-1], tv_weight) return model
def image_transform_net(img_width,img_height,tv_weight=1): x = Input(shape=(img_width,img_height,3)) a = InputNormalize()(x) a = ReflectionPadding2D(padding=(40,40),input_shape=(img_width,img_height,3))(a) a = conv_bn_relu(32, 9, 9, stride=(1,1))(a) a = conv_bn_relu(64, 9, 9, stride=(2,2))(a) a = conv_bn_relu(128, 3, 3, stride=(2,2))(a) for i in range(5): a = res_conv(128,3,3)(a) a = dconv_bn_nolinear(64,3,3)(a) a = dconv_bn_nolinear(32,3,3)(a) a = dconv_bn_nolinear(3,9,9,stride=(1,1),activation="tanh")(a) # Scale output to range [0, 255] via custom Denormalize layer y = Denormalize(name='transform_output')(a) model = Model(inputs=x, outputs=y) if tv_weight > 0: add_total_variation_loss(model.layers[-1],tv_weight) return model
def image_transform_net(img_width, img_height, tv_weight=1): """ Image tranform network model. """ # Input layer as an RGB image x = Input(shape=(img_width, img_height, 3)) # Normalize input image a = InputNormalize()(x) # Pad image a = ReflectionPadding2D(padding=(40, 40), input_shape=(img_width, img_height, 3))(a) # Extract feature maps a = conv_bn_relu(32, 9, 9, stride=(1, 1))(a) a = conv_bn_relu(64, 3, 3, stride=(2, 2))(a) # The previous kernel size was 9x9 a = conv_bn_relu(128, 3, 3, stride=(2, 2))(a) for _ in range(5): a = res_conv(128, 3, 3)(a) a = dconv_bn_nolinear(64, 3, 3)(a) a = dconv_bn_nolinear(32, 3, 3)(a) a = dconv_bn_nolinear(3, 9, 9, stride=(1, 1), activation="tanh")(a) # Scale output to range [0, 255] via custom Denormalize layer y = Denormalize(name='transform_output')(a) # Create model model = Model(inputs=x, outputs=y) # Total variation regularizer if tv_weight > 0: add_total_variation_loss(model.layers[-1], tv_weight) return model