def generator(): """ Purpose of the Generator model is to images that looks real. During training, the Generator progressively becomes better at creating images that look real. The Generator model does upsampling to produces images from random noise. It takes random noise as an input, then upsamples several times until reach desired image size (in this case 28x28x1). :return: The Generator model. """ model = keras.Sequential([ layers.Dense(units=7 * 7 * 256, use_bias=False, input_shape=(GEN_NOISE_INPUT_SHAPE,)), layers.BatchNormalization(), layers.LeakyReLU(), layers.Reshape((7, 7, 256)), layers.Conv2DTranspose(filters=128, kernel_size=(5, 5), strides=(1, 1), padding="same", use_bias=False), layers.BatchNormalization(), layers.LeakyReLU(), layers.Conv2DTranspose(filters=64, kernel_size=(5, 5), strides=(2, 2), padding="same", use_bias=False), layers.BatchNormalization(), layers.LeakyReLU(), layers.Conv2DTranspose(filters=1, kernel_size=(5, 5), strides=(2, 2), padding="same", use_bias=False, activation="tanh"), ]) return model
def make_decoder_model(): """ decoder network structure. Returns: tf.keras.Model """ model = tf.keras.Sequential() model.add(layers.Dense(7 * 7 * 64, activation=tf.nn.relu)) model.add(layers.Reshape((7, 7, 64))) model.add( layers.Conv2DTranspose(64, (3, 3), strides=(2, 2), padding='same', activation=tf.nn.relu, use_bias=False)) model.add( layers.Conv2DTranspose(32, (3, 3), strides=(2, 2), padding='same', activation=tf.nn.relu, use_bias=False)) model.add( layers.Conv2DTranspose(1, (3, 3), strides=(1, 1), padding='same', activation=tf.nn.sigmoid, use_bias=False)) return model
def make_generator_model(): model = tf.keras.Sequential() model.add(layers.Dense(7 * 7 * 256, use_bias=False, input_shape=(100, ))) model.add(layers.BatchNormalization()) model.add(layers.LeakyReLU()) model.add(layers.Reshape((7, 7, 256))) assert model.output_shape == (None, 7, 7, 256 ) # Note: None is the batch size model.add( layers.Conv2DTranspose(128, (5, 5), strides=(1, 1), padding='same', use_bias=False)) assert model.output_shape == (None, 7, 7, 128) model.add(layers.BatchNormalization()) model.add(layers.LeakyReLU()) model.add( layers.Conv2DTranspose(64, (5, 5), strides=(2, 2), padding='same', use_bias=False)) assert model.output_shape == (None, 14, 14, 64) model.add(layers.BatchNormalization()) model.add(layers.LeakyReLU()) model.add( layers.Conv2DTranspose(1, (5, 5), strides=(2, 2), padding='same', use_bias=False, activation='tanh')) assert model.output_shape == (None, 28, 28, 1) return model
def _do_variational_autoencoding(input_signal, latent_dim=2): x = layers.Conv2D(filters=64, kernel_size=(3, 3), strides=2, padding='same')(input_signal) x = layers.LeakyReLU()(x) x = layers.Conv2D(filters=32, kernel_size=(3, 3), strides=2, padding='same')(x) x = layers.LeakyReLU()(x) shape_before_flattening = K.int_shape(x) x = layers.Flatten()(x) x = layers.Dense(units=32, activation='relu')(x) z_mean = layers.Dense(units=latent_dim)(x) z_log_var = layers.Dense(units=latent_dim)(x) epsilon = K.random_normal(shape=(K.shape(z_mean)[0], latent_dim), mean=0., stddev=1.) z = z_mean + K.exp(z_log_var) * epsilon x = layers.Dense(np.prod(shape_before_flattening[1:]), activation='relu')(z) x = layers.Reshape(shape_before_flattening[1:])(x) x = layers.Conv2DTranspose(filters=32, kernel_size=(3, 3), strides=2, padding='same')(x) x = layers.LeakyReLU()(x) x = layers.Conv2DTranspose(filters=64, kernel_size=(3, 3), strides=2, padding='same', activation='relu')(x) return x
def define_model(self): z = Input(shape=[self.model_parameters.latent_size]) x = layers.Dense(units=8 * 8 * 256, use_bias=False)(z) x = layers.BatchNormalization()(x) x = layers.LeakyReLU()(x) x = layers.Reshape((8, 8, 256))(x) x = layers.Conv2DTranspose(128, (5, 5), strides=(1, 1), padding='same', use_bias=False)(x) x = layers.BatchNormalization()(x) x = layers.LeakyReLU()(x) x = layers.Conv2DTranspose(64, (5, 5), strides=(2, 2), padding='same', use_bias=False)(x) x = layers.BatchNormalization()(x) x = layers.LeakyReLU()(x) x = layers.Conv2DTranspose(3, (5, 5), strides=(2, 2), padding='same', use_bias=False, activation='tanh')(x) model = Model(name=self.model_name, inputs=z, outputs=x) return model
def define_generator(latent_dim=50, nclasses=10): label = layers.Input(shape=(1, )) li = layers.Embedding(nclasses, 50)(label) li = layers.Dense(7 * 7 * 1, activation="relu")(li) li = layers.Reshape((7, 7, 1))(li) noise = layers.Input(shape=(latent_dim, )) n = layers.Dense(7 * 7 * 384, activation="relu")(noise) n = layers.Reshape((7, 7, 384))(n) input = layers.concatenate([n, li], axis=-1) x = layers.Conv2DTranspose(filters=192, kernel_size=5, strides=2, padding="same")(input) x = layers.BatchNormalization()(x) x = layers.ReLU()(x) x = layers.Conv2DTranspose(filters=1, kernel_size=5, strides=2, padding="same", activation="tanh")(x) model = tf.keras.Model([noise, label], x) return model
def create_network(Zt, Ct, filter_size=5, strides=[2, 2, 2, 2, 1], dilations=[1, 2, 3, 4, 5], img_shape=(None, None, 1), noise_shape=(None, None, 1), upscaling_filters=[512, 256, 128, 64, 32], dilations_filters=[64, 128, 256, 512]): with tf.name_scope("Gen"): # Generator Z = kl.Input(noise_shape, tensor=Zt, name="Z") C = kl.Input(img_shape, tensor=Ct, name="C") layer = Z # Upscaling for l in range(len(upscaling_filters) - 1): layer = kl.Conv2DTranspose( filters=upscaling_filters[l], kernel_size=filter_size, padding="same", strides=strides[l], kernel_regularizer=kr.l2(), activation="relu")(layer) layer = kl.BatchNormalization()(layer) layer = kl.Conv2DTranspose( filters=upscaling_filters[-1], kernel_size=filter_size, strides=strides[-1], padding="same", activation="relu", kernel_regularizer=kr.l2())(layer) layer = kl.concatenate([layer, C]) # Dilation for l in range(len(dilations_filters) - 1): layer = kl.Conv2D( filters=dilations_filters[l], kernel_size=filter_size, padding="same", dilation_rate=dilations[l], activation="relu", kernel_regularizer=kr.l2())(layer) layer = kl.BatchNormalization()(layer) G_out = kl.Conv2D( filters=img_shape[-1], kernel_size=filter_size, activation="tanh", padding="same", dilation_rate=dilations[-1], kernel_regularizer=kr.l2())(layer) model = k.Model(inputs=[Z, C], outputs=G_out, name="G") return model
def make_generator_model(): model = tf.keras.Sequential() model.add( layers.Dense(4 * 4 * 1024, use_bias=False, input_shape=(100, ), kernel_initializer=weight_initializer)) # model.add(layers.BatchNormalization()) model.add(layers.LeakyReLU(alpha=0.2)) model.add(layers.BatchNormalization()) model.add(layers.Reshape((4, 4, 1024))) assert model.output_shape == (None, 4, 4, 1024) #Upscale using Conv2DTranspose # ToDo: Look into upsampling via interpolation and 2d Convolution model.add( layers.Conv2DTranspose(512, (5, 5), strides=(2, 2), padding='same', use_bias=False, kernel_initializer=weight_initializer)) assert model.output_shape == (None, 8, 8, 512) # model.add(layers.BatchNormalization()) model.add(layers.LeakyReLU(alpha=0.2)) model.add(layers.BatchNormalization()) model.add( layers.Conv2DTranspose(256, (5, 5), strides=(2, 2), padding='same', use_bias=False, kernel_initializer=weight_initializer)) assert model.output_shape == (None, 16, 16, 256) # model.add(layers.BatchNormalization()) model.add(layers.LeakyReLU(alpha=0.2)) model.add(layers.BatchNormalization()) model.add( layers.Conv2DTranspose(128, (5, 5), strides=(2, 2), padding='same', use_bias=False, kernel_initializer=weight_initializer)) assert model.output_shape == (None, 32, 32, 128) # model.add(layers.BatchNormalization()) model.add(layers.LeakyReLU(alpha=0.2)) model.add(layers.BatchNormalization()) model.add( layers.Conv2DTranspose(3, (5, 5), strides=(2, 2), padding='same', use_bias=False, activation='tanh', kernel_initializer=weight_initializer)) assert model.output_shape == (None, 64, 64, 3) return model
def reconstructor(input_shape=(28, 28, 1)): model = tf.keras.Sequential() # Encoder Block model.add( layers.Conv2D(32, (5, 5), strides=(2, 2), padding='same', kernel_initializer=tf.keras.initializers.TruncatedNormal( stddev=0.02), input_shape=input_shape)) model.add(layers.BatchNormalization()) model.add(layers.LeakyReLU(0.2)) model.add( layers.Conv2D(64, (5, 5), strides=(2, 2), kernel_initializer=tf.keras.initializers.TruncatedNormal( stddev=0.02), padding='same')) model.add(layers.BatchNormalization()) model.add(layers.LeakyReLU(0.2)) model.add( layers.Conv2D(128, (5, 5), strides=(2, 2), kernel_initializer=tf.keras.initializers.TruncatedNormal( stddev=0.02), padding='same')) model.add(layers.BatchNormalization()) model.add(layers.LeakyReLU(0.2)) # Decoder Block model.add( layers.Conv2DTranspose( 32, (5, 5), strides=(2, 2), output_padding=(0, 0), padding='same', kernel_initializer=tf.keras.initializers.RandomNormal( stddev=0.02))) model.add(layers.BatchNormalization()) model.add(layers.ReLU()) model.add( layers.Conv2DTranspose( 16, (5, 5), strides=(2, 2), padding='same', kernel_initializer=tf.keras.initializers.RandomNormal( stddev=0.02))) model.add(layers.BatchNormalization()) model.add(layers.ReLU()) model.add( layers.Conv2DTranspose( 1, (5, 5), strides=(2, 2), padding='same', kernel_initializer=tf.keras.initializers.RandomNormal(stddev=0.02), activation='tanh')) return model
def down_scaling_loop(x1, iterations, i, conv_channels, window_len, final_shape, max_iterations, b_norm): # Define parameters for model kernel_width = min([window_len, 3]) pooling_width = min([window_len, 2]) x_shrink = final_shape[0] - (2**max_iterations) * round( final_shape[0] / (2**max_iterations)) + 1 if i == 0: x1 = layers.Conv2D(1, kernel_size=(x_shrink, 1), strides=(1, 1))(x1) conv_kernel = (kernel_width, 1) x2 = custom_layers.ReflectionPadding2D(padding=(0, 2))(x1) x2 = layers.Conv2D(conv_channels[i], kernel_size=conv_kernel, dilation_rate=(2, 1))(x2) x2 = norm_activate(x2, 'relu', b_norm) x2 = custom_layers.ReflectionPadding2D(padding=(0, 2))(x2) x3 = layers.Conv2D(conv_channels[i], kernel_size=conv_kernel, dilation_rate=(2, 1))(x2) x3 = norm_activate(x3, 'relu', b_norm) if iterations > 0: # Downscale x_down = layers.Conv2D(conv_channels[i], kernel_size=(2, 1), strides=(2, 1), padding='same')(x3) x4 = layers.Conv2D(final_shape[-1], kernel_size=(1, 4))(x1) x4 = norm_activate(x4, 'relu', b_norm) x_down = down_scaling_loop(x_down, iterations - 1, i + 1, conv_channels, window_len, final_shape, max_iterations, b_norm) x_up = layers.Conv2DTranspose(conv_channels[-1], kernel_size=conv_kernel, strides=(pooling_width, 1), padding='same')(x_down) x4 = tf.add(x4, x_up) else: x4 = layers.Conv2D(conv_channels[i], kernel_size=(1, 4))(x3) x4 = norm_activate(x4, 'relu', b_norm) if i == 0: # Recover original shape x4 = layers.Conv2DTranspose(final_shape[0], kernel_size=(x_shrink, 1))(x4) x4 = k_b.squeeze(x4, axis=2) return x4
def build_generator(input_shape=(256, 256, 3), num_blocks=9): """Generator network architecture""" x0 = layers.Input(input_shape) x = ReflectionPadding2D(padding=(3, 3))(x0) x = layers.Conv2D(filters=64, kernel_size=7, strides=1, kernel_initializer=RandomNormal(mean=0, stddev=0.02))(x) x = InstanceNormalization()(x) x = layers.ReLU()(x) # downsample x = layers.Conv2D(filters=128, kernel_size=3, strides=2, padding='same', kernel_initializer=RandomNormal(mean=0, stddev=0.02))(x) x = InstanceNormalization()(x) x = layers.ReLU()(x) x = layers.Conv2D(filters=256, kernel_size=3, strides=2, padding='same', kernel_initializer=RandomNormal(mean=0, stddev=0.02))(x) x = InstanceNormalization()(x) x = layers.ReLU()(x) # residual for _ in range(num_blocks): x = _resblock(x) # upsample x = layers.Conv2DTranspose(filters=128, kernel_size=3, strides=2, padding='same', kernel_initializer=RandomNormal(mean=0, stddev=0.02))(x) x = InstanceNormalization()(x) x = layers.ReLU()(x) x = layers.Conv2DTranspose(filters=64, kernel_size=3, strides=2, padding='same', kernel_initializer=RandomNormal(mean=0, stddev=0.02))(x) x = InstanceNormalization()(x) x = layers.ReLU()(x) # final x = ReflectionPadding2D(padding=(3, 3))(x) x = layers.Conv2D(filters=3, kernel_size=7, activation='tanh', kernel_initializer=RandomNormal(mean=0, stddev=0.02))(x) return Model(inputs=x0, outputs=x)
def make_generator_model(input_tensor=None, input_shape=(noise_dim,)): """ Returns: tf.keras.Model """ if input_tensor is None: img_input = layers.Input(shape=input_shape) else: if not backend.is_keras_tensor(input_tensor): img_input = layers.Input(tensor=input_tensor, shape=input_shape) else: img_input = input_tensor x = layers.Dense(7 * 7 * 256, activation=tf.nn.leaky_relu, use_bias=False, name='fc1')(img_input) x = layers.BatchNormalization(name='bn1')(x) x = layers.Reshape(target_shape=(7, 7, 256), name='reshape1')(x) x = layers.Conv2DTranspose(128, (5, 5), strides=(1, 1), activation=tf.nn.leaky_relu, padding='same', use_bias=False, name='deconv1')(x) x = layers.BatchNormalization(name='bn2')(x) x = layers.Conv2DTranspose(64, (5, 5), strides=(2, 2), activation=tf.nn.leaky_relu, padding='same', use_bias=False, name='deconv2')(x) x = layers.BatchNormalization(name='bn3')(x) x = layers.Conv2DTranspose(1, (5, 5), strides=(2, 2), activation=tf.nn.tanh, padding='same', use_bias=False, name='deconv3')(x) if input_tensor is not None: inputs = utils.get_source_inputs(input_tensor) else: inputs = img_input model = models.Model(inputs, x, name='Generator_model') return model
def upsample(units, input_shape=None, apply_dropout=False, layer_type='dense', output_padding=(1, 1)): initializer = random_normal_initializer(0., 0.02) seq = Sequential() if layer_type == 'dense': seq.add( layers.Dense(units, input_shape=[ input_shape, ], kernel_initializer=initializer, use_bias=False)) elif layer_type == 'conv': seq.add( layers.Conv2DTranspose(filters=units, kernel_size=3, strides=(2, 2), padding='same', input_shape=input_shape, kernel_initializer=initializer, use_bias=False, output_padding=output_padding)) else: raise ValueError('wrong layer_type!') seq.add(layers.BatchNormalization()) if apply_dropout: seq.add(layers.Dropout(0.5)) seq.add(layers.ReLU()) return seq
def create_network(Zt, Ct, channels=1, encoder_filters=[64, 128, 256], encoder_ks=[7, 3, 3], encoder_strides=[1, 2, 2], resblock_filters=[256, 256, 256, 256, 256, 256], resblock_ks=[3, 3, 3, 3, 3, 3], decoder_filters=[128, 64], decoder_ks=[3, 3, 7], decoder_strides=[2, 2, 1]): with tf.name_scope("Gen"): Z = kl.Input(( None, None, channels, ), tensor=Zt, name="Z") C = kl.Input(( None, None, channels, ), tensor=Ct, name="C") # Encoder layer = C for l in range(len(encoder_filters)): layer = kl.Conv2D(filters=encoder_filters[l], kernel_size=encoder_ks[l], padding="same", activation="relu", strides=encoder_strides[l])(layer) layer = InstanceNormalization()(layer) layer = kl.concatenate([layer, Z]) # Transformer for l in range(len(resblock_filters)): layer = ResidualBlock(resblock_filters[l] + channels, nb_layers=3, kernel_size=resblock_ks[l], normalization="instancenorm")(layer) # Decoder for l in range(len(decoder_filters)): layer = kl.Conv2DTranspose(filters=decoder_filters[l], kernel_size=decoder_ks[l], padding="same", strides=decoder_strides[l], activation="relu")(layer) layer = InstanceNormalization()(layer) G_out = kl.Conv2D(filters=channels, kernel_size=decoder_ks[-1], strides=decoder_strides[-1], activation="tanh", padding="same")(layer) model = k.Model(inputs=[Z, C], outputs=G_out, name="G") return model
def _get_upsampled_signal(x, n_filters_lst=[32, 64]): x = layers.Conv2DTranspose(filters=n_filters_lst[0], kernel_size=(5, 5), strides=2, padding='same', name='decode_convtrans_1')(x) # x = layers.BatchNormalization(name='decode_bn_1')(x) x = layers.LeakyReLU(name='decode_relu_1')(x) x = layers.Conv2DTranspose(filters=n_filters_lst[1], kernel_size=(3, 3), strides=2, padding='same', name='decode_convtrans_2')(x) # x = layers.BatchNormalization(name='decode_bn_2')(x) x = layers.LeakyReLU(name='decode_relu_2')(x) return x
def conv1d_transpose(x, filters, kernel_size=3, stride=2, padding='same', activation_=None, is_training=True): """ Args: x: input_tensor (N, L) filters: number of filters kernel_size: int stride: int padding: 'same' or 'valid' activation_: activation function is_training: True or False Returns: tensor (N, L_) """ _x = tf.expand_dims(x, axis=2) _x = activation(kl.Conv2DTranspose(filters, (kernel_size, 1), (stride, 1), padding, activation=None, trainable=is_training)(_x), activation_) _x = tf.squeeze(_x, axis=2) return _x
def create_gen(Zt, Ct, img_shape=(32, 32, 3), noise_shape=(4, 4, 1), filter_size=5, strides=[2, 2, 2], filters=[512, 256, 128]): with tf.name_scope("Gen"): # Generator Z = kl.Input(noise_shape, tensor=Zt, name="Z") Zf = kl.Flatten()(Z) C = kl.Input(img_shape, tensor=Ct, name="C") layer = kl.Dense(2048)(Zf) layer = kl.Reshape( (4, 4, 128))(layer) for l in range(len(filters)): layer = kl.Conv2DTranspose( filters=filters[l], kernel_size=filter_size, padding="same", strides=strides[l], activation="relu")(layer) layer = kl.BatchNormalization()(layer) layer = kl.concatenate([layer, C]) for l in range(len(filters)): layer = kl.Conv2DTranspose( filters=filters[l], kernel_size=filter_size, padding="same", dilation_rate=l+2, activation="relu")(layer) layer = kl.BatchNormalization()(layer) G_out = kl.Conv2D( filters=img_shape[-1], kernel_size=filter_size, activation="tanh", padding="same")(layer) model = k.Model(inputs=[Z, C], outputs=G_out) return model
def upconv3x3(channels, stride=1, kernel=(3, 3)): return layers.Conv2DTranspose(filters=channels, kernel_size=kernel, strides=stride, padding='same', use_bias=False, kernel_initializer=tf.random_normal_initializer())
def __init__(self): super().__init__(name='pix2pix_generator') self.dense_1 = layers.Dense(1024, input_shape=(74, )) self.bn_1 = layers.BatchNormalization() self.relu_1 = layers.ReLU() self.dense_2 = layers.Dense(128 * 7 * 7) self.bn_2 = layers.BatchNormalization() self.relu_2 = layers.ReLU() self.reshape = layers.Reshape((7, 7, 128)) self.convT_1 = layers.Conv2DTranspose(64, 4, 2, padding='same') self.bn_3 = layers.BatchNormalization() self.relu_3 = layers.ReLU() self.convT_2 = layers.Conv2DTranspose(1, 4, 2, padding='same', activation='sigmoid')
def __init__(self, filters, kernel_size=4, strides=2, padding='same'): super(UpConv2D, self).__init__() self.up_conv_op = layers.Conv2DTranspose( filters=filters, kernel_size=kernel_size, strides=strides, padding=padding, use_bias=False, kernel_initializer='he_normal')
def __init__(self, filters, kernel_size, strides): super(UpConv2D, self).__init__() self.up_conv_op = layers.Conv2DTranspose( filters, kernel_size=kernel_size, strides=strides, padding='same', kernel_initializer=keras.initializers.RandomNormal(stddev=0.02), use_bias=True, bias_initializer=keras.initializers.Constant(value=0.0))
def build_layer6(self, inp): layer6 = Sequential([ layers.Conv2D(256, 2), Activation('relu'), layers.Conv2D(128, 2), Activation('relu'), layers.Conv2DTranspose(64, 2, 2) ])(inp) print('layer 6 ', layer6.shape) return keras.Model(inp, layer6)
def make_generator_model(): """ Generating network structure. Returns: Sequential model. """ model = tf.keras.Sequential() model.add(layers.Dense(8 * 8 * 256, use_bias=False, input_shape=(128, ))) model.add(layers.BatchNormalization()) model.add(layers.LeakyReLU()) model.add(layers.Reshape((8, 8, 256))) assert model.output_shape == (None, 8, 8, 256 ) # Note: None is the batch size model.add( layers.Conv2DTranspose(128, (5, 5), strides=(1, 1), padding='same', use_bias=False)) assert model.output_shape == (None, 8, 8, 128) model.add(layers.BatchNormalization()) model.add(layers.LeakyReLU()) model.add( layers.Conv2DTranspose(64, (5, 5), strides=(2, 2), padding='same', use_bias=False)) assert model.output_shape == (None, 16, 16, 64) model.add(layers.BatchNormalization()) model.add(layers.LeakyReLU()) model.add( layers.Conv2DTranspose(3, (5, 5), strides=(2, 2), padding='same', use_bias=False, activation='tanh')) assert model.output_shape == (None, 32, 32, 3) return model
def res_decoder_block(input_layers, concat_tensor, num_filters): decoder = layers.Conv2DTranspose(num_filters, (2, 2), strides=(2, 2), kernel_initializer=initializer, padding="same")(input_layers) decoder = layers.concatenate([decoder, concat_tensor], axis=-1) decoder = layers.BatchNormalization()(decoder) decoder = layers.Activation("relu")(decoder) decoder = res_conv_block(decoder, num_filters) return decoder
def __init__(self, image_shape: tuple, embedding_size: int = 16, conv_layers: Optional[List[ConvLayerConfig]] = None, l2_param_penalty: float = 0.00): pn = tf.keras.regularizers.l2( l2_param_penalty) if l2_param_penalty > 0 else None if conv_layers is None: conv_layers = [ ConvLayerConfig(stride=2, filter_size=3, nr_filters=8, activation='elu', batch_norm=True), ConvLayerConfig(stride=2, filter_size=3, nr_filters=int(image_shape[-1]), activation='elu', batch_norm=True), ] img_s = [int(x) for x in image_shape[:2]] for cl in conv_layers: img_s = [s / cl.stride for s in img_s] initial_shape = (int(img_s[0]), int(img_s[1]), 1) assert np.allclose( initial_shape[:2], img_s[:2]), 'eventual size divided by strides should be an integer' encoding = layers.Input(shape=(embedding_size, ), name='embedding_input', dtype=tf.float32) e = layers.Dense(units=np.prod(initial_shape), activation='elu')(encoding) e = layers.Reshape(target_shape=initial_shape)(e) for cl in conv_layers: e = layers.Conv2DTranspose(filters=cl.nr_filters, kernel_size=(cl.filter_size, cl.filter_size), strides=(cl.stride, cl.stride), data_format='channels_last', padding='same', activation=cl.activation, kernel_regularizer=pn)(e) if cl.batch_norm: e = layers.BatchNormalization()(e) rgb_norm = e assert rgb_norm.shape[1:] == image_shape self.model = tf.keras.Model(inputs=[encoding], outputs=[rgb_norm])
def create_gen(Zt, Ct, img_shape=(28, 28, 1), noise_shape=(7, 7, 1), filter_size=3, strides=[2, 2], filters=[128, 64]): with tf.name_scope("Gen"): # Generator Z = kl.Input(noise_shape, tensor=Zt, name="Z") C = kl.Input(img_shape, tensor=Ct, name="C") Zf = kl.Flatten()(Z) layer = kl.Dense(np.prod(noise_shape) * 7)(Zf) layer = kl.Reshape( (noise_shape[0], noise_shape[1], noise_shape[-1] * 7))(layer) for l in range(len(filters)): layer = kl.Conv2DTranspose(filters=filters[l], kernel_size=filter_size, padding="same", strides=strides[l], activation="relu")(layer) layer = kl.BatchNormalization()(layer) for l in range(len(filters)): layer = kl.Conv2DTranspose(filters=filters[l], kernel_size=filter_size, padding="same", dilation_rate=l + 2, activation="relu")(layer) layer = kl.BatchNormalization()(layer) G_out = kl.Conv2DTranspose(filters=img_shape[-1], kernel_size=filter_size, activation="tanh", padding="same")(layer) model = k.Model(inputs=[Z, C], outputs=G_out) return model
def output_block(input_tensor, concat_tensor, num_filters): decoder = layers.Conv2DTranspose(num_filters, (2, 2), strides=(2, 2), padding='same')(input_tensor) decoder = layers.BatchNormalization()(decoder) decoder = layers.LeakyReLU(alpha=0.3)(decoder) decoder = layers.Conv2D(num_filters, (3, 3), padding='same')(decoder) decoder = layers.BatchNormalization()(decoder) decoder = layers.LeakyReLU(alpha=0.3)(decoder) return decoder
def decoder_block(input_tensor, concat_tensor, num_filters): decoder = layers.Conv2DTranspose(num_filters, (2, 2), strides=(2, 2), padding='same')(input_tensor) decoder = layers.concatenate([concat_tensor, decoder], axis=-1) decoder = layers.BatchNormalization()(decoder) decoder = layers.Activation('relu')(decoder) decoder = layers.Conv2D(num_filters, (3, 3), padding='same')(decoder) decoder = layers.BatchNormalization()(decoder) decoder = layers.Activation('relu')(decoder) return decoder
def get_unet_like_test_model(input_shapes): inputs = [] for i, input_shape in enumerate(input_shapes): inputs.append( tf.keras.Input(shape=input_shape[1:], name='input_{}'.format(i + 1))) # pylint: disable=unbalanced-tuple-unpacking input_1, _ = inputs conv_1 = layers.Conv2D(filters=8, kernel_size=1)(input_1) conv_2 = layers.Conv2D(filters=16, kernel_size=1)(conv_1) conv_3 = layers.Conv2D(filters=32, kernel_size=1)(conv_2) conv_t_3 = layers.Conv2DTranspose(filters=16, kernel_size=1)(conv_3) cat_1 = layers.Concatenate(0)([conv_t_3, conv_2]) conv_t_2 = layers.Conv2DTranspose(filters=8, kernel_size=1)(cat_1) cat_2 = layers.Concatenate(0)([conv_t_2, conv_1]) outputs = layers.Conv2DTranspose(filters=4, kernel_size=1)(cat_2) return tf.keras.Model(inputs=inputs, outputs=outputs)
def define_generator(latent_dim=50, nclasses=10): label = layers.Input(shape=(1, )) li = layers.Embedding(nclasses, 50)(label) li = layers.Dense(7*7)(li) li = layers.Reshape((7, 7, 1))(li) in_lat = layers.Input((latent_dim,)) lat = layers.Dense(7*7*128)(in_lat) lat = layers.Reshape((7, 7, 128))(lat) x = layers.concatenate([li, lat], axis=-1) x = layers.Conv2DTranspose(filters=128, kernel_size=4, strides=2, padding="same")(x) x = layers.LeakyReLU(alpha=0.2)(x) x = layers.Conv2DTranspose(filters=128, kernel_size=4, strides=2, padding="same")(x) x = layers.LeakyReLU(alpha=0.2)(x) out = layers.Conv2D(filters=1, kernel_size=7, activation="tanh", padding="same")(x) model = tf.keras.Model([label, in_lat], out) return model