def __init__(self): initializer = initializers.HeNormal() super(Generator, self).__init__( # num of noise that becomes the seed of Generation is 100 l0z=L.Linear(100, 7 * 7 * 128, initialW=initializer), ps1=PixelShuffler(2, 128, 256), ps2=PixelShuffler(2, 64, 4), bn0=L.BatchNormalization(7 * 7 * 128), bn1=L.BatchNormalization(64), )
def wdsr(scale, filters, n_resblocks, res_block): x_in = Input(shape=(None, None, 3)) m = Conv2D(filters, 3, padding='same')(x_in) for i in range(n_resblocks): m = res_block(m, filters) m = Conv2D(3 * scale**2, 3, padding='same')(m) m = PixelShuffler(scale)(m) s = Conv2D(3 * scale**2, 5, padding='same')(x_in) s = PixelShuffler(scale)(s) x = Add()([m, s]) return Model(x_in, x)
def encoder(): inputs = Input(shape=IMAGE_SHAPE) x = inputs ##x = K.constant(inp) #x = Conv2D( 64, kernel_size=5, strides=2, padding='same' )(x) #x = LeakyReLU(0.1)(x) x = Conv2D(128, kernel_size=5, strides=2, padding='same')(x) x = LeakyReLU(0.1)(x) x = Conv2D(256, kernel_size=5, strides=2, padding='same')(x) x = LeakyReLU(0.1)(x) x = Conv2D(512, kernel_size=5, strides=2, padding='same')(x) x = LeakyReLU(0.1)(x) x = Conv2D(1024, kernel_size=5, strides=2, padding='same')(x) x = LeakyReLU(0.1)(x) x = Dense(1024)(Flatten()(x)) x = Dense(8 * 8 * 1024)(x) x = Reshape((8, 8, 1024))(x) x = PixelShuffler()(x) x = Conv2D(256 * 4, kernel_size=3, padding='same')(x) x = LeakyReLU(0.1)(x) #x = Conv2D( 256*2, kernel_size=3, padding='same' )(x) #x = LeakyReLU(0.1)(x) #return x #arr=K.eval(x) #return arr return Model(inputs, x)
def block(x): x = Conv2D(filters * 4, kernel_size=5, use_bias=False, kernel_initializer=RandomNormal(0, 0.02), padding='same')(x) x = LeakyReLU(0.1)(x) x = PixelShuffler()(x) return x
def block(x): x = Conv2D(out_shape[2] * 4, 3, padding='same')(x) x = LeakyReLU(0.1)(x) x = PixelShuffler()(x) shape = x.get_shape().as_list()[1:] cx = shape[0] - out_shape[0] cy = shape[1] - out_shape[1] if cx != 0 or cy != 0: x = Cropping2D(((0, cx), (0, cy)))(x) return x
def block(x, use_instance_norm=use_instance_norm): x = Conv2D(filters * 4, kernel_size=3, use_bias=False, kernel_initializer=RandomNormal(0, 0.02), padding='same')(x) if use_instance_norm: x = inst_norm()(x) x = LeakyReLU(0.1)(x) x = PixelShuffler()(x) return x
def decoder(): #x = K.constant(inp) inputs = Input(shape=(8, 8, 512)) x = inputs x = Conv2D(1024, kernel_size=3, padding='same')(x) x = LeakyReLU(0.1)(x) x = PixelShuffler()(x) x = Conv2D(128 * 4, kernel_size=3, padding='same')(x) x = LeakyReLU(0.1)(x) x = PixelShuffler()(x) x = Conv2D(64 * 4, kernel_size=3, padding='same')(x) x = LeakyReLU(0.1)(x) x = PixelShuffler()(x) x = Conv2D(32 * 4, kernel_size=3, padding='same')(x) x = LeakyReLU(0.1)(x) x = PixelShuffler()(x) #x = Conv2D( 16*4, kernel_size=3, padding='same' )(x) #x = LeakyReLU(0.1)(x) #x = PixelShuffler()(x) x = Conv2D(3, kernel_size=5, padding='same', activation="sigmoid")(x) return Model(inputs, x)
def residual_decoder(h, w, c=3, k=4, latent_dim=2, dropout_rate=0.1): inputs_ = Input(shape=(latent_dim, )) hidden = inputs_ transform = Dense(h * w * 512, kernel_regularizer=l2(0.001))(hidden) transform = LeakyReLU(0.1)(transform) # more nonlinearity reshape = Reshape((h, w, 512))(transform) x = reshape # 2x2@512 x = Dropout(dropout_rate)(x) # prevent overfitting x = up_bilinear()(x) # 4x4@512 x = Conv2DTranspose(128, k, padding='same')(x) # 4x4@128 x = LeakyReLU(0.2)(x) x = up_bilinear()(x) # 8x8@128 x = Conv2DTranspose(128, k, padding='same')(x) # 8x8@128 x = LeakyReLU(0.2)(x) x = up_bilinear()(x) # 16x16@128 x = Conv2DTranspose(64, k, padding='same')(x) # 16x16@64 x = LeakyReLU(0.2)(x) x = _res_conv(64, k, dropout_rate)(x) # 16x16@64 x = PixelShuffler()(x) # 32x32@16 x = Conv2DTranspose(32, k, padding='same')(x) # 32x32@32 x = LeakyReLU(0.2)(x) x = _res_conv(32, k, dropout_rate)(x) # 32x32@32 outputs = conv(c, k, 1, act='tanh')(x) # 32x32@c model = Model([inputs_], [outputs]) return model
def block(x): x = Conv2D(filters * 4, kernel_size=3, padding='same')(x) x = LeakyReLU(0.1)(x) x = PixelShuffler()(x) return x