def get_discriminator(latent_shape, image_shape, df_dim=64): w_init = tf.random_normal_initializer(stddev=0.02) gamma_init = tf.random_normal_initializer(1., 0.02) lrelu = lambda x: tf.nn.leaky_relu(x, 0.2) n1i = Input(image_shape) n1 = Conv2d(df_dim, (5, 5), (2, 2), act=lrelu, W_init=w_init)(n1i) n1 = Conv2d(df_dim * 2, (5, 5), (2, 2), W_init=w_init, b_init=None)(n1) n1 = BatchNorm2d(decay=0.9, act=lrelu, gamma_init=gamma_init)(n1) n1 = Dropout(keep=0.8)(n1) n1 = Conv2d(df_dim * 4, (5, 5), (2, 2), W_init=w_init, b_init=None)(n1) n1 = BatchNorm2d(decay=0.9, act=lrelu, gamma_init=gamma_init)(n1) n1 = Dropout(keep=0.8)(n1) n1 = Conv2d(df_dim * 8, (5, 5), (2, 2), W_init=w_init, b_init=None)(n1) n1 = BatchNorm2d(decay=0.9, act=lrelu, gamma_init=gamma_init)(n1) n1 = Dropout(keep=0.8)(n1) n1 = Flatten()(n1) # [-1,4*4*df_dim*8] n2i = Input(latent_shape) n2 = Dense(n_units=4 * 4 * df_dim * 8, W_init=w_init, b_init=None)(n2i) n2 = Dropout(keep=0.8)(n2) nn = Concat()([n1, n2]) nn = Dense(n_units=1, W_init=w_init, b_init=None)(nn) return tl.models.Model(inputs=[n1i, n2i], outputs=nn, name='discriminator')
def get_siamese_network(input_shape): """Create siamese network with shared base network as layer """ base_layer = create_base_network(input_shape).as_layer() ni_1 = Input(input_shape) ni_2 = Input(input_shape) nn_1 = base_layer(ni_1) nn_2 = base_layer(ni_2) return Model(inputs=[ni_1, ni_2], outputs=[nn_1, nn_2])
def model_G2(): ##Phase2 Generator gamma_init = tf1.random_normal_initializer(1., 0.02) w_init = tf1.random_normal_initializer(stddev=0.02) fn = tf1.nn.relu ## Input layers lr_image = Input( (None, 128, 128, 3)) ## (batch_size, height, width, channel) hr_image = Input((None, 512, 512, 3)) ## Feature extracting layers from LR image lr_feature_layer_1 = Conv2d(64, (3, 3), (1, 1), act=fn, padding='SAME', W_init=w_init)(lr_image) # Shape(1,256,256,64) lr_feature_layer_1 = BatchNorm2d(gamma_init=gamma_init)(lr_feature_layer_1) lr_feature_layer_2 = SubpixelConv2d(scale=4, act=fn)( lr_feature_layer_1) # Shape(1,256,256,16) ## Feature extracting layers from HR image hr_feature_layer_1 = Conv2d(64, (3, 3), (1, 1), act=fn, padding='SAME', W_init=w_init)(hr_image) # Shape(1,256,256,64) hr_feature_layer_1 = BatchNorm2d(gamma_init=gamma_init)(hr_feature_layer_1) ## Features Merging layers merge_layer = Concat(concat_dim=-1)( [lr_feature_layer_2, hr_feature_layer_1]) # Shape(1,256,256,128) non_linearity_layer_1 = Conv2d(64, (5, 5), (1, 1), act=fn, padding='SAME', W_init=w_init)( merge_layer) # Shape(1,256,256,256) non_linearity_layer_1 = BatchNorm2d( gamma_init=gamma_init)(non_linearity_layer_1) ## Reconstruction layers Recon_layer_1 = Conv2d(3, (5, 5), (1, 1), act=fn, padding='SAME', W_init=w_init)( non_linearity_layer_1) # Shape(1,256,256,1) Recon_layer_2 = Elementwise(combine_fn=tf1.add)([Recon_layer_1, hr_image ]) # Shape(1,256,256,1) return Model(inputs=[lr_image, hr_image], outputs=Recon_layer_2)
def get_G(a_shape=(None, flags.za_dim), c_shape=(None, flags.c_shape[0], flags.c_shape[1], flags.c_shape[2]), \ name=None): ndf = 256 na = Input(a_shape) nc = Input(c_shape) #z = Concat(-1)([na, nt]) z = na nz = ExpandDims(1)(z) nz = ExpandDims(1)(nz) nz = Tile([1, c_shape[1], c_shape[2], 1])(nz) # res block nn = Conv2d(ndf, (3, 3), (1, 1), act=None, W_init=w_init, b_init=None)(nc) nn = InstanceNorm2d(act=tf.nn.relu, gamma_init=g_init)(nn) nn = Conv2d(ndf, (3, 3), (1, 1), act=None, W_init=w_init, b_init=None)(nn) nn = InstanceNorm2d(act=None, gamma_init=g_init)(nn) n = Elementwise(tf.add)([nc, nn]) nd_tmp = flags.za_dim ndf = ndf + nd_tmp n = Concat(-1)([n, nz]) # res block *3 for i in range(1, 4): nn = Conv2d(ndf, (3, 3), (1, 1), act=None, W_init=w_init, b_init=None)(n) nn = InstanceNorm2d(act=tf.nn.relu, gamma_init=g_init)(nn) nn = Conv2d(ndf, (3, 3), (1, 1), act=None, W_init=w_init, b_init=None)(nn) nn = InstanceNorm2d(act=None, gamma_init=g_init)(nn) n = Elementwise(tf.add)([n, nn]) for i in range(2): ndf = ndf + nd_tmp n = Concat(-1)([n, nz]) nz = Tile([1, 2, 2, 1])(nz) n = DeConv2d(ndf // 2, (3, 3), (2, 2), act=tf.nn.relu, W_init=w_init, b_init=None)(n) n = InstanceNorm2d(act=tf.nn.relu, gamma_init=g_init)(n) ndf = ndf // 2 n = Concat(-1)([n, nz]) n = DeConv2d(3, (1, 1), (1, 1), act=tf.nn.tanh, W_init=w_init)(n) M = Model(inputs=[na, nc], outputs=n, name=name) return M
def model(inputs_shape, n_class=10): # In BNN, all the layers inputs are binary, with the exception of the first layer. # ref: https://github.com/itayhubara/BinaryNet.tf/blob/master/models/BNN_cifar10.py net_in = Input(inputs_shape, name='input') net = BinaryConv2d(32, (5, 5), (1, 1), padding='SAME', b_init=None, name='bcnn1')(net_in) net = MaxPool2d((2, 2), (2, 2), padding='SAME', name='pool1')(net) net = BatchNorm(act=tl.act.htanh, name='bn1')(net) net = Sign("sign1")(net) net = BinaryConv2d(64, (5, 5), (1, 1), padding='SAME', b_init=None, name='bcnn2')(net) net = MaxPool2d((2, 2), (2, 2), padding='SAME', name='pool2')(net) net = BatchNorm(act=tl.act.htanh, name='bn2')(net) net = Flatten('ft')(net) net = Sign("sign2")(net) net = BinaryDense(256, b_init=None, name='dense')(net) net = BatchNorm(act=tl.act.htanh, name='bn3')(net) net = Sign("sign3")(net) net = BinaryDense(10, b_init=None, name='bout')(net) net = BatchNorm(name='bno')(net) net = Model(inputs=net_in, outputs=net, name='binarynet') return net
def get_G(shape_z): w_init = tf.random_normal_initializer(stddev=0.02) gamma_init = tf.random_normal_initializer(1., 0.02) ni = Input(shape_z) # input size is [None, 8, 8, 128] nn = DeConv2d(128, (1, 1), (1, 1), W_init=w_init, b_init=None, act=None)(ni) nn = DeConv2d(64, (4, 4), (2, 2), W_init=w_init, b_init=None)(nn) nn = BatchNorm(decay=0.9, act=tf.nn.relu, gamma_init=gamma_init, name=None)(nn) nn = DeConv2d(32, (4, 4), (2, 2), W_init=w_init, b_init=None)(nn) nn = BatchNorm(decay=0.9, act=tf.nn.relu, gamma_init=gamma_init, name=None)(nn) nn = DeConv2d(32, (3, 3), (1, 1), W_init=w_init, b_init=None)(nn) nn = BatchNorm(decay=0.9, act=tf.nn.relu, gamma_init=gamma_init, name=None)(nn) nn = DeConv2d(32, (3, 3), (1, 1), W_init=w_init, b_init=None)(nn) nn = BatchNorm(decay=0.9, act=tf.nn.relu, gamma_init=gamma_init, name=None)(nn) nn = DeConv2d(3, (4, 4), (2, 2), act=tf.nn.tanh, W_init=w_init, b_init=None)(nn) return tl.models.Model(inputs=ni, outputs=nn)
def get_G(shape): w_init = tf.random_normal_initializer(stddev=0.02) gamma_init = tf.random_normal_initializer(1., 0.02) ni = Input(shape) nn = Dense(n_units=(2 * 2 * 448), W_init=w_init, b_init=None)(ni) nn = Reshape(shape=[-1, 2, 2, 448])(nn) nn = BatchNorm2d(decay=0.9, act=tf.nn.relu, gamma_init=gamma_init)(nn) nn = DeConv2d(n_filter=256, filter_size=(4, 4), strides=(2, 2), W_init=w_init, b_init=None)(nn) nn = BatchNorm2d(decay=0.9, act=tf.nn.relu, gamma_init=gamma_init)(nn) nn = DeConv2d(n_filter=128, filter_size=(4, 4), strides=(2, 2), act=tf.nn.relu, W_init=w_init)(nn) nn = DeConv2d(n_filter=64, filter_size=(4, 4), strides=(2, 2), act=tf.nn.relu, W_init=w_init)(nn) nn = DeConv2d(n_filter=3, filter_size=(4, 4), strides=(2, 2), act=tf.nn.tanh, W_init=w_init)(nn) return tl.models.Model(inputs=ni, outputs=nn, name='G')
def get_z_G(shape_z): w_init = tf.random_normal_initializer(stddev=0.02) gamma_init = tf.random_normal_initializer(1., 0.02) # lrelu = lambda x: tf.nn.leaky_relu(x, 0.2) nz = Input(shape_z) print(nz.shape) n = Dense(n_units=4 * 4 * 256, W_init=w_init, b_init=None, act=None)(nz) print(n.shape) n = Reshape(shape=[-1, 4, 4, 256])(n) n = BatchNorm2d(decay=0.9, act=tf.nn.relu, gamma_init=gamma_init, name=None)(n) print(n.shape) n = DeConv2d(128, (3, 3), (1, 1), W_init=w_init, padding='VALID', b_init=None)(n) n = BatchNorm2d(decay=0.9, act=tf.nn.relu, gamma_init=gamma_init, name=None)(n) print(n.shape) n = DeConv2d(128, (3, 3), (1, 1), W_init=w_init, b_init=None)(n) n = BatchNorm2d(decay=0.9, act=tf.nn.relu, gamma_init=gamma_init, name=None)(n) print(n.shape) n = DeConv2d(128, (3, 3), (1, 1), W_init=w_init, padding='VALID')(n) print(n.shape) return tl.models.Model(inputs=nz, outputs=n)
def get_generator(shape=[None, flags.z_dim], gf_dim=64, name=None): image_size = 64 s16 = image_size // 16 w_init = tf.random_normal_initializer(stddev=0.02) gamma_init = tf.random_normal_initializer(1., 0.02) ni = Input(shape) nn = Dense(n_units=(gf_dim * 8 * s16 * s16), W_init=w_init, b_init=None)(ni) nn = Reshape(shape=[-1, s16, s16, gf_dim * 8])(nn) nn = BatchNorm(decay=0.9, act=tf.nn.relu, gamma_init=gamma_init, name=None)(nn) nn = DeConv2d(gf_dim * 4, (5, 5), (2, 2), W_init=w_init, b_init=None)(nn) nn = BatchNorm2d(decay=0.9, act=tf.nn.relu, gamma_init=gamma_init)(nn) nn = DeConv2d(gf_dim * 2, (5, 5), (2, 2), W_init=w_init, b_init=None)(nn) nn = BatchNorm2d(decay=0.9, act=tf.nn.relu, gamma_init=gamma_init)(nn) nn = DeConv2d(gf_dim, (5, 5), (2, 2), W_init=w_init, b_init=None)(nn) nn = BatchNorm2d(decay=0.9, act=tf.nn.relu, gamma_init=gamma_init)(nn) nn = DeConv2d(3, (5, 5), (2, 2), act=tf.nn.tanh, W_init=w_init)(nn) return tl.models.Model(inputs=ni, outputs=nn, name=name)
def MLP(input_dim, hidden_dim_list, w_init=tf.initializers.Orthogonal(0.2), activation=tf.nn.relu, *args, **kwargs): """Multiple fully-connected layers for approximation Args: input_dim (int): size of input tensor hidden_dim_list (list[int]): a list of dimensions of hidden layers w_init (callable): initialization method for weights activation (callable): activation function of hidden layers Return: input tensor, output tensor """ l = inputs = Input([None, input_dim], name='input_layer') for i in range(len(hidden_dim_list)): l = Dense(n_units=hidden_dim_list[i], act=activation, W_init=w_init, name='mlp_layer%d' % (i + 1))(l) outputs = l return inputs, outputs
def get_G(input_shape): w_init = tf.random_normal_initializer(stddev=0.02) g_init = tf.random_normal_initializer(1., 0.02) nin = Input(input_shape) n = Conv2d(64, (3, 3), (1, 1), act=tf.nn.relu, padding='SAME', W_init=w_init)(nin) temp = n # B residual blocks for i in range(16): nn = Conv2d(64, (3, 3), (1, 1), padding='SAME', W_init=w_init, b_init=None)(n) nn = BatchNorm(act=tf.nn.relu, gamma_init=g_init)(nn) nn = Conv2d(64, (3, 3), (1, 1), padding='SAME', W_init=w_init, b_init=None)(nn) nn = BatchNorm(gamma_init=g_init)(nn) nn = Elementwise(tf.add)([n, nn]) n = nn n = Conv2d(64, (3, 3), (1, 1), padding='SAME', W_init=w_init, b_init=None)(n) n = BatchNorm(gamma_init=g_init)(n) n = Elementwise(tf.add)([n, temp]) # B residual blacks end n = Conv2d(256, (3, 3), (1, 1), padding='SAME', W_init=w_init)(n) n = SubpixelConv2d(scale=2, n_out_channels=None, act=tf.nn.relu)(n) n = Conv2d(256, (3, 3), (1, 1), act=None, padding='SAME', W_init=w_init)(n) n = SubpixelConv2d(scale=2, n_out_channels=None, act=tf.nn.relu)(n) nn = Conv2d(3, (1, 1), (1, 1), act=tf.nn.tanh, padding='SAME', W_init=w_init)(n) G = Model(inputs=nin, outputs=nn, name="generator") return G
def tam_net(self): inputs = Input(self.in_shape, name='inputs') e_in = inputs for i in range(0, 5): e_out = Conv2d(self.f_size * (2**i), (3, 3), (2, 2), act=tf.nn.relu, name=f'e{i+1}_con')(e_in) e_in = self.residual_block(i, e=True)(e_out) self.__setattr__(f'e{i+1}', e_in) d_in = e_in for i in range(4, 0, -1): d_out = DeConv2d(self.f_size * (2**(i - 1)), (3, 3), (2, 2), name=f'd{i}_con')(d_in) encoder = self.__getattribute__(f'e{i}') d_out = Concat(concat_dim=3, name=f'concat{i}')([encoder, d_out]) d_out = Conv2d(self.f_size * (2**(i - 1)), (1, 1), (1, 1), name=f'fusion{i}')(d_out) d_in = self.residual_block(i - 1, e=False)(d_out) self.__setattr__(f'd{i + 1}', d_in) outs = DeConv2d(3, (3, 3), (2, 2), name='d_con_out')(d_in) outs = Conv2d(3, (1, 1), (1, 1), act=tf.nn.sigmoid, name='outs')(outs) return Model(inputs=inputs, outputs=outs, name="TAM_Net")
def get_Ec(x_shape=(None, flags.img_size_h, flags.img_size_w, flags.c_dim), name=None): # ref: Multimodal Unsupervised Image-to-Image Translation lrelu = lambda x: tl.act.lrelu(x, 0.01) w_init = tf.random_normal_initializer(stddev=0.02) channel = 64 ni = Input(x_shape) n = Conv2d(channel, (7, 7), (1, 1), act=lrelu, W_init=w_init)(ni) for i in range(2): n = Conv2d(channel * 2, (3, 3), (2, 2), W_init=w_init)(n) n = InstanceNorm2d(act=tf.nn.relu, gamma_init=g_init)(n) channel = channel * 2 for i in range(1, 5): # res block nn = Conv2d(channel, (3, 3), (1, 1), act=None, W_init=w_init, b_init=None)(n) nn = InstanceNorm2d(act=tf.nn.relu, gamma_init=g_init)(nn) nn = Conv2d(channel, (3, 3), (1, 1), act=None, W_init=w_init, b_init=None)(nn) nn = InstanceNorm2d(act=None, gamma_init=g_init)(nn) n = Elementwise(tf.add)([n, nn]) n = GaussianNoise(is_always=False)(n) M = Model(inputs=ni, outputs=n, name=name) return M
def get_D(x_shape=(None, flags.img_size_h, flags.img_size_w, flags.c_dim), name=None): # ref: Image-to-Image Translation with Conditional Adversarial Networks # input: (batch_size_train, 256, 256, 3) # output: (batch_size_train, ) ch = 64 n_layer = 8 tch = ch ni = Input(x_shape) n = SpectralNormConv2d(ch, (3, 3), (2, 2), act=lrelu, W_init=w_init)(ni) for i in range(1, n_layer - 1): n = SpectralNormConv2d(tch * 2, (3, 3), (2, 2), act=lrelu, W_init=w_init)(n) tch *= 2 n = SpectralNormConv2d(tch * 2, (3, 3), (2, 2), act=lrelu, W_init=w_init)(n) tch *= 2 n = SpectralNormConv2d(1, (1, 1), (1, 1), act=None, padding='VALID', W_init=w_init)(n) n = Reshape([-1, 1])(n) M = Model(inputs=ni, outputs=n, name=name) return M
def get_G_cifar_10( shape_z): # Dimension of gen filters in first conv layer. [64] # input: (flags.z_dim,) w_init = tf.random_normal_initializer(stddev=0.02) ni = Input(shape_z) n = Dense(n_units=128 * 4 * 4, act=tf.nn.relu, W_init=w_init)(ni) # res blocks nn = Conv2d(128, (3, 3), (1, 1), act=None, W_init=w_init, b_init=None)(n) nn = BatchNorm2d(decay=0.9, act=tf.nn.relu, gamma_init=g_init)(nn) nn = Conv2d(128, (3, 3), (1, 1), act=None, W_init=w_init, b_init=None)(nn) nn = BatchNorm2d(decay=0.9, act=None, gamma_init=g_init)(nn) n = Elementwise(tf.add)([n, nn]) # res blocks nn = Conv2d(128, (3, 3), (1, 1), act=None, W_init=w_init, b_init=None)(n) nn = BatchNorm2d(decay=0.9, act=tf.nn.relu, gamma_init=g_init)(nn) nn = Conv2d(128, (3, 3), (1, 1), act=None, W_init=w_init, b_init=None)(nn) nn = BatchNorm2d(decay=0.9, act=None, gamma_init=g_init)(nn) n = Elementwise(tf.add)([n, nn]) # res blocks nn = Conv2d(128, (3, 3), (1, 1), act=None, W_init=w_init, b_init=None)(n) nn = BatchNorm2d(decay=0.9, act=tf.nn.relu, gamma_init=g_init)(nn) nn = Conv2d(128, (3, 3), (1, 1), act=None, W_init=w_init, b_init=None)(nn) nn = BatchNorm2d(decay=0.9, act=None, gamma_init=g_init)(nn) n = Elementwise(tf.add)([n, nn]) n = Conv2d(3, (3, 3), (1, 1), act=tf.nn.relu, W_init=w_init)(n) return tl.models.Model(inputs=nz, outputs=n, name='generator_CIFAR10')
def get_generator(shape, gf_dim=64): # Dimension of gen filters in first conv layer. [64] image_size = flags.output_size s16 = image_size // 8 # w_init = tf.glorot_normal_initializer() w_init = tf.random_normal_initializer(stddev=0.02) gamma_init = tf.random_normal_initializer(1., 0.02) lrelu = lambda x : tf.nn.leaky_relu(x, 0.2) ni = Input(shape) nn = Dense(n_units=(gf_dim * 8 * s16 * s16), W_init=w_init, b_init=None)(ni) nn = Reshape(shape=[-1, s16, s16, gf_dim*16])(nn) nn = BatchNorm2d(decay=0.9, act=tf.nn.relu, gamma_init=gamma_init, name=None)(nn) #nn = DeConv2d(gf_dim * 4, (5, 5), (2, 2), W_init=w_init, b_init=None)(nn) nn = UpSampling2d(scale=(2, 2),antialias=True)(nn) nn = Conv2d(gf_dim * 8, (5, 5), padding='SAME', b_init=None, W_init=w_init)(nn) nn = BatchNorm2d( decay=0.9, act=tf.nn.relu, gamma_init=gamma_init)(nn) # nn = DeConv2d(gf_dim * 2, (5, 5), (2, 2), W_init=w_init, b_init=None)(nn) nn = UpSampling2d(scale=(2, 2),antialias=True)(nn) nn = Conv2d(gf_dim * 4, (5, 5), padding='SAME', b_init=None, W_init=w_init)(nn) nn = BatchNorm2d(decay=0.9, act=tf.nn.relu, gamma_init=gamma_init)(nn) #nn = DeConv2d(gf_dim, (5, 5), (2, 2), W_init=w_init, b_init=None)(nn) nn = UpSampling2d(scale=(2, 2),antialias=True)(nn) nn = Conv2d(gf_dim *2, (5, 5), padding='SAME', b_init=None, W_init=w_init)(nn) nn = BatchNorm2d(decay=0.9, act=tf.nn.relu, gamma_init=gamma_init)(nn) nn = UpSampling2d(scale=(2, 2),antialias=True)(nn) nn = Conv2d(gf_dim , (5, 5), padding='SAME', b_init=None, W_init=w_init)(nn) nn = BatchNorm2d(decay=0.9, act=tf.nn.relu, gamma_init=gamma_init)(nn) nn = DeConv2d(3, (5, 5), (2, 2), act=tf.nn.tanh, W_init=w_init)(nn) return tl.models.Model(inputs=ni, outputs=nn, name='generator')
def model(input_shape, n_classes): in_net = Input(shape=input_shape, name='input') net = Conv2d(64, (5, 5), (1, 1), act=tf.nn.relu, padding='SAME', name='cnn1')(in_net) net = MaxPool2d((3, 3), (2, 2), padding='SAME', name='pool1')(net) net = LocalResponseNorm(4, 1.0, 0.001 / 9.0, 0.75, name='norm1')(net) net = TernaryConv2d(64, (5, 5), (1, 1), act=tf.nn.relu, padding='SAME', name='cnn2')(net) net = LocalResponseNorm(4, 1.0, 0.001 / 9.0, 0.75, name='norm2')(net) net = MaxPool2d((3, 3), (2, 2), padding='SAME', name='pool2')(net) net = Flatten(name='flatten')(net) net = TernaryDense(384, act=tf.nn.relu, name='d1relu')(net) net = TernaryDense(192, act=tf.nn.relu, name='d2relu')(net) net = Dense(n_classes, act=None, name='output')(net) net = Model(inputs=in_net, outputs=net, name='dorefanet') return net
def model(inputs_shape, n_class=10): net_in = Input(inputs_shape, name="input") net = QuanConv2dWithBN( n_filter=32, filter_size=(5, 5), strides=(1, 1), padding='SAME', act=tl.nn.relu, name='qconvbn1' )(net_in) net = MaxPool2d(filter_size=(2, 2), strides=(2, 2), padding='SAME', name='pool1')(net) net = QuanConv2dWithBN( n_filter=64, filter_size=(5, 5), strides=(1, 1), padding='SAME', act=tl.nn.relu, name='qconvbn2' )(net) net = MaxPool2d(filter_size=(2, 2), strides=(2, 2), padding='SAME', name='pool2')(net) net = Flatten(name='ft')(net) # net = QuanDense(256, act="relu", name='qdbn')(net) # net = QuanDense(n_class, name='qdbn_out')(net) net = QuanDenseLayerWithBN(256, act="relu", name='qdbn')(net) net = QuanDenseLayerWithBN(n_class, name='qdbn_out')(net) # net = Dense(256, act='relu', name='Dense1')(net) # net = Dense(n_class, name='Dense2')(net) net = Model(inputs=net_in, outputs=net, name='quan') return net
def create_model(inputs_shape): W_init = tl.initializers.truncated_normal(stddev=5e-2) W_init2 = tl.initializers.truncated_normal(stddev=0.04) ni = Input(inputs_shape) nn = Conv2d(64, (3, 3), (1, 1), padding='SAME', act=tf.nn.relu, W_init=W_init, name='conv1_1')(ni) nn = MaxPool2d((2, 2), (2, 2), padding='SAME', name='pool1_1')(nn) nn = Conv2d(64, (3, 3), (1, 1), padding='SAME', act=tf.nn.relu, W_init=W_init, b_init=None, name='conv1_2')(nn) nn = MaxPool2d((2, 2), (2, 2), padding='SAME', name='pool1_2')(nn) nn = Conv2d(128, (3, 3), (1, 1), padding='SAME', act=tf.nn.relu, W_init=W_init, b_init=None, name='conv2_1')(nn) nn = MaxPool2d((2, 2), (2, 2), padding='SAME', name='pool2_1')(nn) nn = Conv2d(128, (3, 3), (1, 1), padding='SAME', act=tf.nn.relu, W_init=W_init, b_init=None, name='conv2_2')(nn) nn = MaxPool2d((2, 2), (2, 2), padding='SAME', name='pool2_2')(nn) nn = Conv2d(256, (3, 3), (1, 1), padding='SAME', act=tf.nn.relu, W_init=W_init, b_init=None, name='conv3_1')(nn) nn = MaxPool2d((2, 2), (2, 2), padding='SAME', name='pool3_1')(nn) nn = Conv2d(256, (3, 3), (1, 1), padding='SAME', act=tf.nn.relu, W_init=W_init, b_init=None, name='conv3_2')(nn) nn = MaxPool2d((2, 2), (2, 2), padding='SAME', name='pool3_2')(nn) nn = Conv2d(512, (3, 3), (1, 1), padding='SAME', act=tf.nn.relu, W_init=W_init, b_init=None, name='conv4_1')(nn) nn = MaxPool2d((2, 2), (2, 2), padding='SAME', name='pool4_1')(nn) nn = Conv2d(512, (3, 3), (1, 1), padding='SAME', act=tf.nn.relu, W_init=W_init, b_init=None, name='conv4_2')(nn) nn = MaxPool2d((2, 2), (2, 2), padding='SAME', name='pool4_2')(nn) nn = Flatten(name='flatten')(nn) nn = Dense(1000, act=None, W_init=W_init2, name='output')(nn) M = Model(inputs=ni, outputs=nn, name='cnn') return M
def model(self): w_init = tf.keras.initializers.glorot_normal( seed=None ) # glorot initialization is better than uniform in practice # init_w=3e-3 # w_init = tf.random_uniform_initializer(-init_w, init_w) inputs = Input([None, self.input_dim], name=str(self.scope) + 'q_input' if self.scope is not None else 'q_input') l = Dense(n_units=self.hidden_dim, act=tf.nn.relu, W_init=w_init, name=str(self.scope) + 'v_1' if self.scope is not None else 'v_1')(inputs) for i in range(self.num_hidden_layer): l = Dense(n_units=self.hidden_dim, act=tf.nn.relu, W_init=w_init, name=str(self.scope) + 'v_1' + str(i + 2) if self.scope is not None else 'v_1' + str(i + 2))(l) outputs = Dense( n_units=1, W_init=w_init, name=str(self.scope) + 'v' + str(self.num_hidden_layer + 2) if self.scope is not None else 'v' + str(self.num_hidden_layer + 2))(l) return tl.models.Model( inputs=inputs, outputs=outputs, name=str(self.scope) + 'value_network' if self.scope is not None else 'value_network')
def get_img_D(shape): df_dim = 8 w_init = tf.random_normal_initializer(stddev=0.02) gamma_init = tf.random_normal_initializer(1., 0.02) lrelu = lambda x: tf.nn.leaky_relu(x, 0.2) ni = Input(shape) n = Conv2d(df_dim, (5, 5), (2, 2), act=None, W_init=w_init, b_init=None)(ni) n = BatchNorm2d(decay=0.9, act=lrelu, gamma_init=gamma_init)(n) n = Conv2d(df_dim * 2, (5, 5), (1, 1), act=None, W_init=w_init, b_init=None)(n) n = BatchNorm2d(decay=0.9, act=lrelu, gamma_init=gamma_init)(n) n = Conv2d(df_dim * 4, (5, 5), (2, 2), act=None, W_init=w_init, b_init=None)(n) n = BatchNorm2d(decay=0.9, act=lrelu, gamma_init=gamma_init)(n) n = Conv2d(df_dim * 8, (5, 5), (1, 1), act=None, W_init=w_init, b_init=None)(n) n = BatchNorm2d(decay=0.9, act=lrelu, gamma_init=gamma_init)(n) n = Conv2d(df_dim * 8, (5, 5), (2, 2), act=None, W_init=w_init, b_init=None)(n) n = BatchNorm2d(decay=0.9, act=lrelu, gamma_init=gamma_init)(n) nf = Flatten(name='flatten')(n) n = Dense(n_units=1, act=None, W_init=w_init)(nf) return tl.models.Model(inputs=ni, outputs=n, name='img_Discriminator')
def get_generator( shape, gf_dim=64): # Dimension of gen filters in first conv layer. [64] image_size = 64 s16 = image_size // 16 w_init = tf.random_normal_initializer(0.0, 0.02) gamma_init = tf.random_normal_initializer(1., 0.02) ni = Input(shape) nn = Dense(n_units=(gf_dim * 8 * s16 * s16), W_init=w_init, b_init=None)(ni) nn = Reshape(shape=[-1, s16, s16, gf_dim * 8])(nn) nn = BatchNorm2d(decay=0.9, act=tf.nn.relu, gamma_init=gamma_init, name=None)(nn) nn = DeConv2d(gf_dim * 4, (5, 5), (2, 2), W_init=w_init, b_init=None)(nn) nn = BatchNorm2d(decay=0.9, act=tf.nn.relu, gamma_init=gamma_init)(nn) nn = DeConv2d(gf_dim * 2, (5, 5), (2, 2), W_init=w_init, b_init=None)(nn) nn = BatchNorm2d(decay=0.9, act=tf.nn.relu, gamma_init=gamma_init)(nn) nn = DeConv2d(gf_dim, (5, 5), (2, 2), W_init=w_init, b_init=None)(nn) nn = BatchNorm2d(decay=0.9, act=tf.nn.relu, gamma_init=gamma_init)(nn) nn = DeConv2d(3, (5, 5), (2, 2), act=tf.nn.tanh, W_init=w_init)(nn) return tl.models.Model(inputs=ni, outputs=nn, name='generator')
def model(inputs_shape, n_class=10): in_net = Input(inputs_shape, name='input') net = DorefaConv2d(1, 3, 32, (5, 5), (1, 1), padding='SAME', b_init=None, name='bcnn1')(in_net) net = MaxPool2d((2, 2), (2, 2), padding='SAME', name='pool1')(net) net = BatchNorm(act=tl.act.htanh, name='bn1')(net) net = DorefaConv2d(1, 3, 64, (5, 5), (1, 1), padding='SAME', b_init=None, name='bcnn2')(net) net = MaxPool2d((2, 2), (2, 2), padding='SAME', name='pool2')(net) net = BatchNorm(act=tl.act.htanh, name='bn2')(net) net = Flatten('flatten')(net) net = DorefaDense(1, 3, 256, b_init=None, name='dense')(net) net = BatchNorm(act=tl.act.htanh, name='bn3')(net) net = Dense(n_class, b_init=None, name='bout')(net) net = BatchNorm(name='bno')(net) net = Model(inputs=in_net, outputs=net, name='dorefanet') return net
def get_G(name=None): gf_dim = 32 w_init = tf.random_normal_initializer(stddev=0.02) nx = Input((flags.batch_size, 256, 256, 3)) n = Conv2d(gf_dim, (7, 7), (1, 1), W_init=w_init)(nx) n = InstanceNorm2d(act=tf.nn.relu)(n) n = Conv2d(gf_dim * 2, (3, 3), (2, 2), W_init=w_init)(n) n = InstanceNorm2d(act=tf.nn.relu)(n) n = Conv2d(gf_dim * 4, (3, 3), (2, 2), W_init=w_init)(n) n = InstanceNorm2d(act=tf.nn.relu)(n) for i in range(9): _n = Conv2d(gf_dim * 4, (3, 3), (1, 1), W_init=w_init)(n) _n = InstanceNorm2d(act=tf.nn.relu)(_n) _n = Conv2d(gf_dim * 4, (3, 3), (1, 1), W_init=w_init)(_n) _n = InstanceNorm2d()(_n) _n = Elementwise(tf.add)([n, _n]) n = _n n = DeConv2d(gf_dim * 2, (3, 3), (2, 2), W_init=w_init)(n) n = InstanceNorm2d(act=tf.nn.relu)(n) n = DeConv2d(gf_dim, (3, 3), (2, 2), W_init=w_init)(n) n = InstanceNorm2d(act=tf.nn.relu)(n) n = Conv2d(3, (7, 7), (1, 1), act=tf.nn.tanh, W_init=w_init)(n) M = Model(inputs=nx, outputs=n, name=name) return M
def get_encoder(shape=[None, flags.output_size, flags.output_size, flags.c_dim] \ , df_dim=64, name=None): w_init = tf.random_normal_initializer(stddev=0.02) gamma_init = tf.random_normal_initializer(1., 0.02) lrelu = lambda x: tf.nn.leaky_relu(x, 0.2) ni = Input(shape) nn = Conv2d(df_dim, (5, 5), (2, 2), act=lrelu, W_init=w_init)(ni) nn = Conv2d(df_dim * 2, (5, 5), (2, 2), act=None, W_init=w_init, b_init=None)(nn) nn = BatchNorm2d(decay=0.9, act=lrelu, gamma_init=gamma_init)(nn) nn = Conv2d(df_dim * 4, (5, 5), (2, 2), act=None, W_init=w_init, b_init=None)(nn) nn = BatchNorm2d(decay=0.9, act=lrelu, gamma_init=gamma_init)(nn) nn = Conv2d(df_dim * 8, (5, 5), (2, 2), act=None, W_init=w_init, b_init=None)(nn) nn = BatchNorm2d(decay=0.9, act=lrelu, gamma_init=gamma_init)(nn) nn = Flatten()(nn) #print(nn.shape) nn = Dense(flags.z_dim, act=tf.identity, W_init=w_init)(nn) return tl.models.Model(inputs=ni, outputs=nn, name=name)
def get_z_D(shape_z): w_init = tf.random_normal_initializer(stddev=0.02) lrelu = lambda x: tf.nn.leaky_relu(x, 0.2) nz = Input(shape_z) print(nz.shape) # 8 8 128 n = SpectralNormConv2d(128, (3, 3), (1, 1), act=lrelu, W_init=w_init, padding='VALID')(nz) print(n.shape) # 6 6 128 n = SpectralNormConv2d(128, (3, 3), (1, 1), act=lrelu, W_init=w_init)(n) print(n.shape) # 6 6 128 n = SpectralNormConv2d(256, (3, 3), (1, 1), act=lrelu, W_init=w_init, padding='VALID')(n) print(n.shape) # 4 4 256 n = SpectralNormConv2d(512, (4, 4), (1, 1), act=lrelu, W_init=w_init, padding='VALID')(n) print(n.shape) # 1 1 512 n = Reshape(shape=[-1, 512])(n) n = Dense(n_units=1, act=tf.identity, W_init=w_init, b_init=None)(n) print(n.shape) return tl.models.Model(inputs=nz, outputs=n)
def get_img_D(shape): w_init = tf.random_normal_initializer(stddev=0.02) lrelu = lambda x: tf.nn.leaky_relu(x, 0.2) ndf = 64 isize = 64 n_extra_layers = flags.n_extra_layers ni = Input(shape) n = Conv2d(ndf, (4, 4), (2, 2), act=None, W_init=w_init, b_init=None)(ni) csize, cndf = isize / 2, ndf for t in range(n_extra_layers): n = SpectralNormConv2d(cndf, (3, 3), (1, 1), act=lrelu, W_init=w_init, b_init=None)(n) while csize > 4: cndf = cndf * 2 n = SpectralNormConv2d(cndf, (4, 4), (2, 2), act=lrelu, W_init=w_init, b_init=None)(n) csize = csize / 2 n = Conv2d(1, (4, 4), (1, 1), act=None, W_init=w_init, b_init=None, padding='VALID')(n) return tl.models.Model(inputs=ni, outputs=n)
def get_Ek(shape): w_init = tf.random_normal_initializer(stddev=0.02) gamma_init = tf.random_normal_initializer(1., 0.02) ngf = 64 isize = 64 n_extra_layers = flags.n_extra_layers w_init = tf.random_normal_initializer(stddev=0.02) gamma_init = tf.random_normal_initializer(1., 0.02) ngf = 64 isize = 64 n_extra_layers = flags.n_extra_layers ni = Input(shape) nn = Conv2d(ngf, (4, 4), (2, 2), W_init=w_init, act=tf.nn.relu)(ni) nn = Conv2d(ngf * 2, (4, 4), (2, 2), W_init=w_init, b_init=None)(nn) nn = BatchNorm(decay=0.9, act=tf.nn.relu, gamma_init=gamma_init, name=None)(nn) nn = Conv2d(ngf * 2, (4, 4), (2, 2), W_init=w_init, b_init=None)(nn) nn = BatchNorm(decay=0.9, act=tf.nn.relu, gamma_init=gamma_init, name=None)(nn) nn = DeConv2d(ngf // 2, (4, 4), (2, 2), W_init=w_init, b_init=None)(nn) nn = BatchNorm(decay=0.9, act=tf.nn.relu, gamma_init=gamma_init, name=None)(nn) nn = DeConv2d(ngf // 8, (1, 1), (1, 1), W_init=w_init, b_init=None)(nn) # nn = BatchNorm(decay=0.9, act=tf.nn.relu, gamma_init=gamma_init, name=None)(nn) # # nn = DeConv2d(ngf // 8, (4, 4), (2, 2), W_init=w_init, act=tf.nn.relu)(nn) return tl.models.Model(inputs=ni, outputs=nn)
def get_base(shape): w_init = tf.random_normal_initializer(stddev=0.02) gamma_init = tf.random_normal_initializer(1., 0.02) lrelu = lambda x: tf.nn.leaky_relu(x, flags.leaky_rate) ni = Input(shape) nn = Conv2d(n_filter=64, filter_size=(4, 4), strides=(2, 2), act=lrelu, W_init=w_init)(ni) nn = Conv2d(n_filter=128, filter_size=(4, 4), strides=(2, 2), W_init=w_init, b_init=None)(nn) nn = BatchNorm2d(decay=0.9, act=lrelu, gamma_init=gamma_init)(nn) nn = Conv2d(n_filter=256, filter_size=(4, 4), strides=(2, 2), W_init=w_init, b_init=None)(nn) nn = BatchNorm2d(decay=0.9, act=lrelu, gamma_init=gamma_init)(nn) nn = Flatten()(nn) return tl.models.Model(inputs=ni, outputs=nn, name='base')
def get_dwG( shape_z=(None, 100), shape_h=(0, 16)): # Dimension of gen filters in first conv layer. [64] s16 = flags.img_size_h // 16 gf_dim = 64 # Dimension of gen filters in first conv layer. [64] w_init = tf.random_normal_initializer(stddev=0.02) gamma_init = tf.random_normal_initializer(1., 0.02) n_z = Input(shape_z) n_h = Input(shape_h) n = Concat(-1)([n_z, n_h]) n = Dense(n_units=(gf_dim * 8 * s16 * s16), W_init=w_init, act=tf.identity, b_init=None)(n) n = Reshape(shape=[-1, s16, s16, gf_dim * 8])(n) n = BatchNorm(decay=0.9, act=tf.nn.relu, gamma_init=gamma_init)(n) n = DeConv2d(gf_dim * 4, (5, 5), strides=(2, 2), act=None, W_init=w_init, b_init=None)(n) n = BatchNorm(decay=0.9, act=tf.nn.relu, gamma_init=gamma_init)(n) n = DeConv2d(gf_dim * 2, (5, 5), strides=(2, 2), act=None, W_init=w_init, b_init=None)(n) n = BatchNorm(decay=0.9, act=tf.nn.relu, gamma_init=gamma_init)(n) n = DeConv2d(gf_dim, (5, 5), strides=(2, 2), act=None, W_init=w_init, b_init=None)(n) n = BatchNorm(decay=0.9, act=tf.nn.relu, gamma_init=gamma_init)(n) n = DeConv2d(flags.c_dim, (5, 5), strides=(2, 2), act=tf.nn.tanh, W_init=w_init)(n) return tl.models.Model(inputs=[n_z, n_h], outputs=n, name='generator')