def __init__(self, ch, out_channels, activation=tf.keras.layers.ReLU()): super(UNetGenerator, self).__init__() # encoder self.down1 = DownSample(ch, 3, apply_batchnorm=False, activation=activation) self.down2 = DownSample(ch * 2, 3, activation=activation) self.enc_attention = SNNonLocalBlock(ch*2) self.down3 = DownSample(ch * 2, 3, activation=activation) self.down4 = DownSample(ch * 4, 3, activation=activation) self.down5 = DownSample(ch * 4, 3, activation=activation) self.down6 = DownSample(ch * 8, 3, activation=activation) self.down7 = DownSample(ch * 8, 3, activation=activation) # decoder self.up1 = UpSample(ch * 8, 3, apply_dropout=True, activation=activation) self.up2 = UpSample(ch * 4, 3, apply_dropout=True, activation=activation) self.up3 = UpSample(ch * 4, 3, apply_dropout=True, activation=activation) self.up4 = UpSample(ch * 2, 3, activation=activation) self.up5 = UpSample(ch * 2, 3, activation=activation) self.dec_attention = SNNonLocalBlock(ch*4) self.up6 = UpSample(ch, 3, activation=activation) self.concat = tf.keras.layers.Concatenate() initializer = tf.keras.initializers.glorot_uniform() self.conv = SNTransposeConv2D(out_channels, kernel_size=4, strides=2, padding='SAME', kernel_initializer=initializer)
def __init__(self, ch, output_depth, activation=tf.keras.layers.ReLU()): super(UNetGenerator, self).__init__() initializer = tf.keras.initializers.Orthogonal() kernel_regularizer = conv_orthogonal_regularizer(0.0001) # encoder self.down1 = OptimizedBlock(ch) # 128 self.down2 = BlockDown(ch, ch * 2, activation=activation, downsample=True) # 64 self.down3 = BlockDown(ch * 2, ch * 2, activation=activation, downsample=True) # 32 self.enc_attention = SNNonLocalBlock(ch * 2, initializer=initializer, kernel_regularizer=kernel_regularizer) self.down4 = BlockDown(ch * 2, ch * 4, activation=activation, downsample=True) # 16 self.down5 = BlockDown(ch * 4, ch * 4, activation=activation, downsample=True) # 8 self.down6 = BlockDown(ch * 4, ch * 8, activation=activation, downsample=True) # 4 self.down7 = BlockDown(ch * 8, ch * 8, activation=activation, downsample=True) # 2 self.down8 = BlockDown(ch * 8, ch * 16, activation=activation, downsample=True) # 1 # decoder self.up1 = BlockUp(ch * 16, ch * 8, activation=activation, upsample=True) # 2 self.up2 = BlockUp(ch * 8, ch * 8, activation=activation, upsample=True) # 4 self.up3 = BlockUp(ch * 8, ch * 4, activation=activation, upsample=True) # 8 self.up4 = BlockUp(ch * 4, ch * 4, activation=activation, upsample=True) # 16 self.up5 = BlockUp(ch * 4, ch * 2, activation=activation, upsample=True) # 32 self.dec_attention = SNNonLocalBlock(ch * 2, initializer=initializer, kernel_regularizer=kernel_regularizer) self.up6 = BlockUp(ch * 2, ch * 2, activation=activation, upsample=True) # 64 self.up7 = BlockUp(ch * 2, ch, activation=activation, upsample=True) # 128 self.concat = tf.keras.layers.Concatenate() self.conv = SNTransposeConv2D(output_depth, 4, strides=2, padding='SAME', kernel_initializer=initializer, kernel_regularizer=kernel_regularizer)
def __init__(self, ch=64, n_classes=0, activation=tf.nn.relu): super(SNResNetProjectionDiscriminator, self).__init__() self.activation = activation initializer = tf.keras.initializers.glorot_uniform() self.block1 = OptimizedBlock(ch) self.self_atten = SNNonLocalBlock(ch) self.block2 = Block(ch, ch * 2, activation=activation, downsample=True) self.block3 = Block(ch * 2, ch * 4, activation=activation, downsample=True) self.block4 = Block(ch * 4, ch * 8, activation=activation, downsample=True) self.block5 = Block(ch * 8, ch * 16, activation=activation, downsample=True) self.l6 = SNDense(units=1, kernel_initializer=initializer) if n_classes > 0: self.l_y = SNEmbeeding(embedding_size=ch * 16, n_classes=n_classes, kernel_initializer=initializer)
def __init__(self, ch=64, activation=tf.keras.layers.ReLU()): super(SNResNetPatchGanDiscriminator, self).__init__() self.activation = activation initializer = tf.keras.initializers.glorot_uniform() self.block1 = OptimizedBlock(ch * 2, ksize=4) self.block2 = BlockDown(ch * 2, ch * 4, ksize=4, activation=activation, downsample=True) self.block3 = BlockDown(ch * 4, ch * 8, ksize=4, activation=activation, downsample=True) self.self_atten = SNNonLocalBlock(ch * 8) self.bn = tf.keras.layers.BatchNormalization() self.c1 = SNConv2D(filters=ch * 8, kernel_size=4, strides=1, padding="VALID", kernel_initializer=initializer, use_bias=False) self.pad = tf.keras.layers.ZeroPadding2D() self.c2 = SNConv2D(1, 4, strides=1, padding="VALID", kernel_initializer=initializer)
def __init__(self, ch, output_depth, activation=tf.keras.layers.ReLU()): super(UNetGenerator, self).__init__() initializer = tf.keras.initializers.Orthogonal() kernel_regularizer = conv_orthogonal_regularizer(0.0001) # encoder self.down1 = DownSample(ch * 2, 3, apply_batchnorm=False, activation=activation, initializer=initializer, kernel_regularizer=kernel_regularizer) self.down2 = DownSample(ch * 4, 3, activation=activation, initializer=initializer, kernel_regularizer=kernel_regularizer) self.enc_attention = SNNonLocalBlock(ch * 4, initializer=initializer, kernel_regularizer=kernel_regularizer) self.down3 = DownSample(ch * 4, 3, activation=activation, initializer=initializer, kernel_regularizer=kernel_regularizer) self.down4 = DownSample(ch * 8, 3, activation=activation, initializer=initializer, kernel_regularizer=kernel_regularizer) self.down5 = DownSample(ch * 8, 3, activation=activation, initializer=initializer, kernel_regularizer=kernel_regularizer) self.down6 = DownSample(ch * 16, 3, activation=activation, initializer=initializer, kernel_regularizer=kernel_regularizer) self.down7 = DownSample(ch * 16, 3, activation=activation, initializer=initializer, kernel_regularizer=kernel_regularizer) # decoder self.up1 = UpSample(ch * 16, 3, apply_dropout=True, activation=activation, initializer=initializer, kernel_regularizer=kernel_regularizer) self.up2 = UpSample(ch * 8, 3, apply_dropout=True, activation=activation, initializer=initializer, kernel_regularizer=kernel_regularizer) self.up3 = UpSample(ch * 8, 3, apply_dropout=True, activation=activation, initializer=initializer, kernel_regularizer=kernel_regularizer) self.up4 = UpSample(ch * 4, 3, activation=activation, initializer=initializer, kernel_regularizer=kernel_regularizer) self.up5 = UpSample(ch * 4, 3, activation=activation, initializer=initializer, kernel_regularizer=kernel_regularizer) self.dec_attention = SNNonLocalBlock(ch * 8, initializer=initializer, kernel_regularizer=kernel_regularizer) self.up6 = UpSample(ch * 2, 3, activation=activation, initializer=initializer, kernel_regularizer=kernel_regularizer) self.concat = tf.keras.layers.Concatenate() self.conv = SNTransposeConv2D(output_depth, 4, strides=2, padding='SAME', kernel_initializer=initializer, kernel_regularizer=kernel_regularizer)
def __init__(self, ch=64, dim_z=128, bottom_width=4, activation=tf.keras.layers.ReLU(), n_classes=0, distribution="normal"): super(ResNetGenerator, self).__init__() initializer = tf.keras.initializers.Orthogonal() kernel_regularizer = conv_orthogonal_regularizer(0.0001) dense_regularizer = dense_orthogonal_regularizer(0.0001) self.bottom_width = bottom_width self.activation = activation self.distribution = distribution self.dim_z = dim_z self.n_classes = n_classes self.l1 = SNDense(units=(bottom_width**2) * ch * 16, kernel_initializer=initializer, kernel_regularizer=dense_regularizer) self.block2 = Block(ch * 16, ch * 8, activation=activation, upsample=True, n_classes=n_classes) self.block3 = Block(ch * 8, ch * 8, activation=activation, upsample=True, n_classes=n_classes) self.block4 = Block(ch * 8, ch * 4, activation=activation, upsample=True, n_classes=n_classes) self.self_atten = SNNonLocalBlock( ch * 4, kernel_regularizer=kernel_regularizer) self.block5 = Block(ch * 4, ch * 2, activation=activation, upsample=True, n_classes=n_classes) self.block6 = Block(ch * 2, ch, activation=activation, upsample=True, n_classes=n_classes) self.b6 = tf.keras.layers.BatchNormalization() self.l6 = SNConv2D(3, kernel_size=3, strides=1, padding="SAME", kernel_initializer=initializer, kernel_regularizer=kernel_regularizer)
def __init__(self, ch, activation=tf.keras.layers.ReLU(), n_classes=0): super(ResnetDiscriminator, self).__init__() initializer = tf.keras.initializers.glorot_uniform() self.activation = activation self.concat = tf.keras.layers.Concatenate() self.block1 = OptimizedBlock(ch, ksize=3) self.block2 = Block(ch, ch * 2, ksize=3, downsample=True) self.sn_block = SNNonLocalBlock(ch * 2) self.block3 = Block(ch * 2, ch * 4, ksize=3, downsample=True) self.block4 = Block(ch * 4, ch * 8, ksize=3, downsample=True) self.block5 = Block(ch * 8, ch * 16, ksize=3, downsample=True) self.linear = SNDense(units=1, kernel_initializer=initializer) if n_classes > 0: self.embeddings = SNEmbeeding(embedding_size=ch * 16, n_classes=n_classes, kernel_initializer=initializer)
def __init__(self, ch=64, dim_z=128, bottom_width=4, activation=tf.keras.layers.ReLU(), n_classes=0, distribution="normal"): super(ResNetGenerator, self).__init__() initializer = tf.keras.initializers.glorot_uniform() self.bottom_width = bottom_width self.activation = activation self.distribution = distribution self.dim_z = dim_z self.n_classes = n_classes self.l1 = SNDense(units=(bottom_width**2) * ch * 16, kernel_initializer=initializer) self.block2 = Block(ch, ch, activation=activation, upsample=True, n_classes=n_classes) self.self_atten = SNNonLocalBlock(ch) self.block3 = Block(ch, ch, activation=activation, upsample=True, n_classes=n_classes) self.block4 = Block(ch, ch, activation=activation, upsample=True, n_classes=n_classes) self.b6 = tf.keras.layers.BatchNormalization() self.l6 = SNConv2D(3, kernel_size=3, strides=1, padding="SAME", kernel_initializer=initializer)