def _get_model(self): x = tf.keras.Input(shape=(None, None, 3)) a = InputNormalize()(x) #a = ReflectionPadding2D(padding=(40,40),input_shape=(img_width,img_height,3))(a) a = conv_bn_relu(8, 9, 9, stride=(1, 1))(a) a = conv_bn_relu(16, 3, 3, stride=(2, 2))(a) a = conv_bn_relu(32, 3, 3, stride=(2, 2))(a) for i in range(2): a = res_conv(32, 3, 3)(a) a = dconv_bn_nolinear(16, 3, 3)(a) a = dconv_bn_nolinear(8, 3, 3)(a) a = dconv_bn_nolinear(3, 9, 9, stride=(1, 1), activation="tanh")(a) # Scale output to range [0, 255] via custom Denormalize layer y = Denormalize(name='transform_output')(a) return tf.keras.Model(x, y, name="transformnet")
def image_transform_net(img_width, img_height, tv_weight=1): x = Input(shape=(img_width, img_height, 3), name="input") a = layers.InputNormalize()(x) a = layers.ReflectionPadding2D(padding=(40, 40), input_shape=(img_width, img_height, 3))(a) a = layers.conv_bn_relu(32, 9, 9, stride=(1, 1))(a) a = layers.conv_bn_relu(64, 9, 9, stride=(2, 2))(a) a = layers.conv_bn_relu(128, 3, 3, stride=(2, 2))(a) for _ in range(5): a = layers.res_conv(128, 3, 3)(a) a = layers.dconv_bn_nolinear(64, 3, 3)(a) a = layers.dconv_bn_nolinear(64, 3, 3)(a) a = layers.conv_bn_relu(3, 9, 9, stride=(1, 1), relu=False)(a) y = layers.Denormalize(name='transform_output')(a) model = Model(inputs=x, outputs=y) if tv_weight > 0: add_total_variation_loss(model.layers[-1], tv_weight) return model
def image_transform_net(img_width,img_height,tv_weight=1): x = Input(shape=(img_width,img_height,3)) a = InputNormalize()(x) a = ReflectionPadding2D(padding=(40,40),input_shape=(img_width,img_height,3))(a) a = conv_bn_relu(32, 9, 9, stride=(1,1))(a) a = conv_bn_relu(64, 9, 9, stride=(2,2))(a) a = conv_bn_relu(128, 3, 3, stride=(2,2))(a) for i in range(5): a = res_conv(128,3,3)(a) a = dconv_bn_nolinear(64,3,3)(a) a = dconv_bn_nolinear(32,3,3)(a) a = dconv_bn_nolinear(3,9,9,stride=(1,1),activation="tanh")(a) # Scale output to range [0, 255] via custom Denormalize layer y = Denormalize(name='transform_output')(a) model = Model(inputs=x, outputs=y) if tv_weight > 0: add_total_variation_loss(model.layers[-1],tv_weight) return model
def image_transform_net(img_width, img_height, tv_weight=1): """ Image tranform network model. """ # Input layer as an RGB image x = Input(shape=(img_width, img_height, 3)) # Normalize input image a = InputNormalize()(x) # Pad image a = ReflectionPadding2D(padding=(40, 40), input_shape=(img_width, img_height, 3))(a) # Extract feature maps a = conv_bn_relu(32, 9, 9, stride=(1, 1))(a) a = conv_bn_relu(64, 3, 3, stride=(2, 2))(a) # The previous kernel size was 9x9 a = conv_bn_relu(128, 3, 3, stride=(2, 2))(a) for _ in range(5): a = res_conv(128, 3, 3)(a) a = dconv_bn_nolinear(64, 3, 3)(a) a = dconv_bn_nolinear(32, 3, 3)(a) a = dconv_bn_nolinear(3, 9, 9, stride=(1, 1), activation="tanh")(a) # Scale output to range [0, 255] via custom Denormalize layer y = Denormalize(name='transform_output')(a) # Create model model = Model(inputs=x, outputs=y) # Total variation regularizer if tv_weight > 0: add_total_variation_loss(model.layers[-1], tv_weight) return model