Exemplo n.º 1
0
def generator(noise, reuse=False, alpha=0.2, training=True):
    """
    Generator model that takes `noise` as input. `alpha` is the
    Leaky-ReLU paramter for slope.
    """

    with tf.variable_scope('generator', reuse=reuse):

        x = dense(noise, 4 * 4 * 512)
        x = tf.reshape(x, (-1, 4, 4, 512))
        x = batch_normalization(x, training=training)
        x = tf.maximum(alpha * x, x)

        x = conv2d_transpose(x, 256, 5, 2, padding='same')
        x = batch_normalization(x, training=training)
        x = tf.maximum(alpha * x, x)

        x = conv2d_transpose(x, 128, 5, 2, padding='same')
        x = batch_normalization(x, training=training)
        x = tf.maximum(alpha * x, x)

        x = conv2d_transpose(x, 64, 5, 2, padding='same')
        x = batch_normalization(x, training=training)
        x = tf.maximum(alpha * x, x)

        logits = conv2d_transpose(x, 3, 5, 2, padding='same')
        out = tf.tanh(logits)

        return out
Exemplo n.º 2
0
def generator(noise, reuse=False, alpha=0.2, training=True):

    with tf.variable_scope('generator', reuse=reuse):

        x = dense(noise, 4 * 4 * 512)
        x = tf.reshape(x, (-1, 4, 4, 512))
        x = batch_normalization(x, training=training)
        x = tf.maximum(alpha * x, x)

        x = conv2d_transpose(x, 256, 5, 2, padding='same')
        x = batch_normalization(x, training=training)
        x = tf.maximum(alpha * x, x)

        x = conv2d_transpose(x, 128, 5, 2, padding='same')
        x = batch_normalization(x, training=training)
        x = tf.maximum(alpha * x, x)

        x = conv2d_transpose(x, 64, 5, 2, padding='same')
        x = batch_normalization(x, training=training)
        x = tf.maximum(alpha * x, x)

        logits = conv2d_transpose(x, 3, 5, 2, padding='same')
        out = tf.tanh(logits)

        return out, logits
Exemplo n.º 3
0
    def decode(self, z, x_dim, training=False):
        im_h, im_w, im_c = self.image_shape

        with tf.variable_scope("decoder", reuse=tf.AUTO_REUSE):
            h = z
            h = tfl.dense(h, units=self.h_dim, activation=tf.nn.relu)
            h = tfl.dense(h, units=self.h_dim, activation=tf.nn.relu)
            stride = 16
            h = tfl.dense(
                    h, units=im_h // stride * im_w // stride * self.kernel_num * 2,
                    activation=tf.nn.relu)
            new_shape = (-1, im_h // stride, im_w // stride, self.kernel_num * 2)
            h = tf.reshape(h, new_shape)

            h = tfl.conv2d_transpose(
                    h, self.kernel_num * 2, self.kernel_size,
                    strides=2, padding="same", activation=tf.nn.relu)

            h = tfl.conv2d_transpose(
                    h, self.kernel_num, self.kernel_size,
                    strides=2, padding="same", activation=tf.nn.relu)

            h = tfl.conv2d_transpose(
                    h, self.kernel_num, self.kernel_size,
                    strides=2, padding="same", activation=tf.nn.relu)

            h = tfl.conv2d_transpose(
                    h, im_c, self.kernel_size,
                    strides=2, padding="same", activation=tf.nn.sigmoid)

            h = tfl.flatten(h)
            y_mean = h
            return y_mean
Exemplo n.º 4
0
def generatorNet(name, inputs, is_training, use_batchNorm, reuse=None):
    idx = 0
    f = inputs
    with tf.variable_scope(name, reuse=reuse):
        f = layers.dense(f, 1024, None, name="dense_%d" % idx)
        if use_batchNorm:
            f = layers.batch_normalization(f, training=is_training, name="bn_%d" % idx)
        f = tf.nn.relu(f, "relu_%d" % idx)

        idx += 1
        f = layers.dense(f, 7 * 7 * 128, None, name="dense_%d" % idx)  # 6272
        if use_batchNorm:
            f = layers.batch_normalization(f, training=is_training, name="bn_%d" % idx)
        f = tf.nn.relu(f, "relu_%d" % idx)

        f = tf.reshape(f, [-1, 7, 7, 128], name="reshape_%d" % idx)

        idx += 1
        f = layers.conv2d_transpose(f, 64, kernel_size=4, strides=2, padding="SAME", name="deconv_%d" % idx)
        if use_batchNorm:
            f = layers.batch_normalization(f, training=is_training, name="bn_%d" % idx)
        f = tf.nn.relu(f, "relu_%d" % idx)

        idx += 1
        f = layers.conv2d_transpose(f, 1, kernel_size=4, strides=2, padding="SAME", name="deconv_%d" % idx)
        f = tf.nn.sigmoid(f, "sigmoid_%d" % idx)

    return f
Exemplo n.º 5
0
def deconv_net(inputs):
    # *** WARNING: HARDCODED SHAPES, CHANGE ACCORDINGLY ***
    dense = layers.dense(inputs, units=7 * 7 * 16, activation=tf.nn.relu)
    # input(dense)
    dense = tf.reshape(dense, [16, 7, 7, 16])

    # 1st deconv layer
    deconv1 = layers.conv2d_transpose(dense,
                                      filters=32,
                                      kernel_size=[5, 5],
                                      padding='same',
                                      activation=tf.nn.relu)
    # input(deconv1)
    # 2nd deconv layer
    deconv2 = layers.conv2d_transpose(deconv1,
                                      filters=16,
                                      kernel_size=[5, 5],
                                      padding='same',
                                      activation=tf.nn.relu)
    # input(deconv2)
    out = layers.conv2d_transpose(deconv2,
                                  filters=16,
                                  kernel_size=[3, 3],
                                  padding='same')
    return out
Exemplo n.º 6
0
    def __init__(self, sparse, guidance_map, params, reuse=False):
        with tf.variable_scope("Local", reuse=reuse) as scope:
            sparse = tf.reshape(sparse, [-1, 200, 200, 1])
            guidance_map = tf.reshape(guidance_map, [-1, 200, 200, 1])
            x = sparse + guidance_map

            x = conv2d(x, 32, (3, 3), strides=(2, 2), padding='same')
            x = relu(x)
            x = conv2d(x, 64, (3, 3), strides=(2, 2), padding='same')
            x = relu(x)
            x = conv2d_transpose(x,
                                 64, (5, 5),
                                 strides=(2, 2),
                                 padding='same',
                                 use_bias=False)
            x = batch_normalization(x)
            x = relu(x)
            x = conv2d_transpose(x,
                                 2, (5, 5),
                                 strides=(2, 2),
                                 padding='same',
                                 use_bias=False)
            self.output = x
        self.parameters = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES,
                                            scope="Local")
Exemplo n.º 7
0
 def __init__(self, images, sparse, params, reuse=False):
     with tf.variable_scope("Global", reuse=reuse) as scope:
         sparse = tf.reshape(sparse, [-1, 200, 200, 1])
         images = tf.reshape(images, [-1, 200, 200, 3])
         x = tf.concat([images, sparse], axis=3)
         x = tf.cast(x, dtype=tf.float32)
         x = conv2d(x, 32, (3, 3), strides=(2, 2), padding='same')
         x = relu(x)
         x = conv2d(x, 64, (3, 3), strides=(2, 2), padding='same')
         x = relu(x)
         x = conv2d_transpose(x,
                              64, (5, 5),
                              strides=(2, 2),
                              padding='same',
                              use_bias=False)
         x = batch_normalization(x)
         x = relu(x)
         x = conv2d_transpose(x,
                              3, (5, 5),
                              strides=(2, 2),
                              padding='same',
                              use_bias=False)
         self.output = x
     self.parameters = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES,
                                         scope="Global")
Exemplo n.º 8
0
 def generator_fn(input_image, mode):
     with tf.name_scope('generator'):
         #4*4
         dense_1 = layers.dense(inputs=input_image, units=batch_size*16)
         batch_norm_1 = layers.batch_normalization(inputs=dense_1)
         reshape_1 = tf.reshape(batch_norm_1, shape=(batch_size, 4, 4, batch_size))
         relu_1 = tf.nn.relu(reshape_1)
         # 8*8
         conv_T_1 = layers.conv2d_transpose(inputs=relu_1, filters=64, kernel_size=(2, 2), strides=(2, 2), padding='same')
         batch_norm_2 = layers.batch_normalization(inputs=conv_T_1)
         relu_2 = tf.nn.relu(batch_norm_2)
         # 16*16
         conv_T_2 = layers.conv2d_transpose(inputs=relu_2, filters=32, kernel_size=(2, 2), strides=(2, 2), padding='same')
         batch_norm_3 = layers.batch_normalization(inputs=conv_T_2)
         relu_3 = tf.nn.relu(batch_norm_3)
         # 32*32
         conv_T_3 = layers.conv2d_transpose(inputs=relu_3, filters=16, kernel_size=(2, 2), strides=(2, 2), padding='same')
         batch_norm_4 = layers.batch_normalization(inputs=conv_T_3)
         relu_4 = tf.nn.relu(batch_norm_4)
         # 64*64
         conv_T_4 = layers.conv2d_transpose(
             inputs=relu_4, filters=3, kernel_size=(2, 2), strides=(2, 2), padding='same')
         tanh_1 = tf.nn.tanh(conv_T_4)
         print(tanh_1)
         return tanh_1
Exemplo n.º 9
0
def generator(inp0, dim, name, reuse):
    # inp0 = tf.placeholder(tf.float32, [None, G.size, G.size, 1])  # G.size == 2 ** 8
    with tf.variable_scope(name, reuse=reuse):
        ten1 = tl.conv2d(inp0, 1 * dim, 4, 2, 'same', activation=tf.nn.leaky_relu)
        ten2 = tl.conv2d(ten1, 2 * dim, 4, 2, 'same', activation=tf.nn.leaky_relu)
        ten3 = tl.conv2d(ten2, 4 * dim, 4, 2, 'same', activation=tf.nn.leaky_relu)
        ten4 = tl.conv2d(ten3, 8 * dim, 4, 2, 'same', activation=tf.nn.leaky_relu)
        ten5 = tl.conv2d(ten4, 16 * dim, 4, 2, 'same', activation=tf.nn.leaky_relu)

        ten6 = tl.conv2d(ten5, 16 * dim, 3, 1, 'same', activation=tf.nn.leaky_relu)
        ten6 = tl.conv2d(ten6, 16 * dim, 3, 1, 'same', activation=tf.nn.leaky_relu)
        ten6 += ten5
        ten7 = tl.conv2d(ten6, 16 * dim, 3, 1, 'same', activation=tf.nn.leaky_relu)
        ten7 = tl.conv2d(ten7, 16 * dim, 3, 1, 'same', activation=tf.nn.leaky_relu)
        ten7 += ten6

        ten8 = tl.conv2d_transpose(ten7, 16 * dim, 3, 1, 'same', activation=leRU_batch_norm)
        ten8 = tl.conv2d_transpose(ten8, 16 * dim, 3, 1, 'same', activation=leRU_batch_norm)
        ten8 += ten7

        ten9 = tl.conv2d_transpose(ten8, 16 * dim, 3, 1, 'same', activation=leRU_batch_norm)
        ten9 = tl.conv2d_transpose(ten9, 16 * dim, 3, 1, 'same', activation=leRU_batch_norm)
        ten9 += ten8

        ten5 = tl.conv2d_transpose(ten9, 16 * dim, 4, 2, 'same', activation=leRU_batch_norm)
        ten4 = tl.conv2d_transpose(ten5, 8 * dim, 4, 2, 'same', activation=leRU_batch_norm)
        ten3 = tl.conv2d_transpose(ten4, 4 * dim, 4, 2, 'same', activation=leRU_batch_norm)
        ten2 = tl.conv2d_transpose(ten3, 2 * dim, 4, 2, 'same', activation=leRU_batch_norm)
        ten1 = tl.conv2d_transpose(ten2, 1 * dim, 4, 2, 'same', activation=leRU_batch_norm)

        ten1 = tf.concat((ten1, inp0), axis=3)
        ten1 = tl.conv2d(ten1, 1 * dim, 3, 1, 'same', activation=leRU_batch_norm)
        ten1 = tl.conv2d(ten1, 3, 3, 1, 'same', activation=tf.nn.tanh)
        ten1 = ten1 * 0.505 + 0.5
        return ten1
Exemplo n.º 10
0
def generator_net(inputs, scope, reuse=None, rgb=False):
	
	output_channels = 3 if rgb else 1
	
	with tf.variable_scope(scope, reuse=reuse):
	
		# branch  1 ( color reconstruction)
		
		cv1   = conv2d(inputs, filters=16, kernel_size=3, strides=1, padding='same', activation=None, name='conv9_i')
		cv1_r = leaky_relu(cv1)
		
		res1_c = conv2d(cv1_r, filters=16, kernel_size=5, strides=1, padding='same', activation=None, name='conv3a_1')
		res1_b = batch_normalization(res1_c)
		res1_r = leaky_relu(res1_b)
		
		res1_d = conv2d(res1_r, filters=16, kernel_size=3, strides=1, padding='same', activation=None, name='conv3b_1')
		res1   = batch_normalization(res1_d)
		
		sum1  = cv1 + res1
		
		res2_c = conv2d(sum1, filters=16, kernel_size=3, strides=1, padding='same', activation=None, name='conv3a_2')
		res2_b = batch_normalization(res2_c)
		res2_r = leaky_relu(res2_b)
		
		res2_d = conv2d(res2_r, filters=16, kernel_size=3, strides=1, padding='same', activation=None, name='conv3b_2')
		res2   = batch_normalization(res2_d)
		
		br1 = sum1 + res2
		
		
		# branch 2 (features extraction)
		br2 = conv2d(inputs, filters=16, kernel_size=5, strides=1, padding='same', activation=tf.nn.leaky_relu, name='conv_bf1')
		br2 = max_pooling2d(br2, pool_size=2, strides=2, name='maxpool1')
		br2 = conv2d(br2, filters=16, kernel_size=3, strides=1, padding='same', activation=tf.nn.leaky_relu, name='conv_bf2')
		br2 = max_pooling2d(br2, pool_size=2, strides=2, name='maxpool2a')
		br2 = conv2d(br2, filters=16, kernel_size=3, strides=1, padding='same', activation=tf.nn.leaky_relu, name='conv_bf3')
		br2 = max_pooling2d(br2, pool_size=2, strides=2, name='maxpool2')
		
		print(br2.shape)
		br2 = conv2d_transpose(br2, filters=16, kernel_size=3, padding='same', strides=2, activation=tf.nn.leaky_relu, name="deconv_1")
		print(br2.shape)
		br2 = conv2d_transpose(br2, filters=16, kernel_size=3, padding='same', strides=2, activation=tf.nn.leaky_relu, name="deconv_2")
		print(br2.shape)
		br2 = conv2d_transpose(br2, filters=16, kernel_size=3, padding='same', strides=2, activation=tf.nn.leaky_relu, name="deconv_3")
		print(br2.shape)
		
		# concatenate branches and reconstruct image
		sum3 = tf.concat((br1, br2), axis=3);
		model = conv2d(sum3, filters=output_channels, kernel_size=3, strides=1, padding='same', activation=None, name='conv9_f')
		
		return model
Exemplo n.º 11
0
def _build_generator(input_data, name='generator'):
    with tf.variable_scope(name):
        net = layers.dense(input_data, 128)
        net = tf.nn.relu(net)
        net = tf.reshape(net, [-1, 4, 4, 8])
        net = layers.conv2d_transpose(net, 128, [5, 5], activation=tf.nn.relu, strides=[2, 2], padding='same')  # 8x8
        net = layers.batch_normalization(net, momentum=0.9, training=True)
        net = layers.conv2d_transpose(net, 64, [5, 5], activation=tf.nn.relu, strides=[2, 2])  # 19x19
        net = layers.batch_normalization(net, momentum=0.9, training=True)
        net = layers.conv2d_transpose(net, 32, [5, 5], activation=tf.nn.relu)  # 23x23
        net = layers.batch_normalization(net, momentum=0.9, training=True)
        net = layers.conv2d_transpose(net, 16, [5, 5], activation=tf.nn.relu)  # 27x27
        net = layers.batch_normalization(net, momentum=0.9, training=True)
        net = layers.conv2d_transpose(net, 1, [2, 2], activation=tf.nn.relu)  # 28x28
    return net
Exemplo n.º 12
0
 def generator(
     self,
     noise,
     label,
     training,
     reuse=False,
 ):
     with tf.variable_scope('generator', reuse=reuse):
         net = tf.concat([label, noise], axis=-1)  # (batch_size, 445)
         net = layers.dense(
             net,
             self.g_dim * 3 * 3 * 4,  # (batch_size, 32768)
             kernel_initializer=tf.truncated_normal_initializer(
                 mean=0, stddev=0.02))
         net = tf.nn.leaky_relu(net)
         net = tf.reshape(
             net, [-1, 3, 3, self.g_dim * 4])  # Reshape (3 x 3 x 512)
         net = deconv2d(net,
                        self.g_dim * 2,
                        strides=[1, 1],
                        padding='valid',
                        training=training)  # (7 x 7 x 256)
         net = deconv2d(net, self.g_dim,
                        training=training)  # (14 x 14 x 128)
         net = layers.conv2d_transpose(
             net,
             1,
             kernel_size=(5, 5),
             strides=(2, 2),
             activation=tf.nn.tanh,
             padding='same',
             kernel_initializer=tf.truncated_normal_initializer(
                 mean=0, stddev=0.02))  # (28 x 28 x 1)
         return net  # (28, 28, 1)
Exemplo n.º 13
0
    def deconv_block(self, input, filters, conv, padding, scope):
        with tf.variable_scope(scope):
            deconv1 = conv2d_transpose(input,
                                       filters,
                                       kernel_size=(3, 3),
                                       strides=[2, 2],
                                       padding=padding)
            deconv_shape = tf.shape(deconv1)
            conv_shape = tf.shape(conv)
            offsets = [
                0, (conv_shape[1] - deconv_shape[1]) // 2,
                (conv_shape[2] - deconv_shape[2]) // 2, 0
            ]
            size = [-1, deconv_shape[1], deconv_shape[2], filters]
            conv_crop = tf.slice(conv, offsets, size)
            conv1 = tf.concat([deconv1, conv_crop], 3)
            bn = batch_normalization(conv1)
            drop = dropout(bn, .25)
            conv2 = conv2d(drop,
                           filters,
                           kernel_size=(3, 3),
                           activation=tf.nn.relu,
                           name='middle1',
                           padding="SAME")
            conv3 = conv2d(conv2,
                           filters,
                           kernel_size=(3, 3),
                           activation=tf.nn.relu,
                           name='middle2',
                           padding="SAME")

        return conv3
Exemplo n.º 14
0
def restoring_branch(net,
                     depth,
                     activation,
                     is_training,
                     init=he_init,
                     data_format='channels_first',
                     name=''):
    net_ = L.conv2d_transpose(net,
                              depth, [2, 2],
                              strides=2,
                              padding='SAME',
                              data_format=data_format,
                              kernel_initializer=init,
                              name='{}_W1'.format(name))
    net_ = activation(net_, name='{}_A1'.format(name))
    net_ = batch_norm(net_, is_training)
    net_ = L.conv2d(net_,
                    depth, [3, 3],
                    strides=1,
                    padding='SAME',
                    data_format=data_format,
                    kernel_initializer=init,
                    name='{}_W2'.format(name))
    net_ = activation(net_, name='{}_A2'.format(name))
    net_ = batch_norm(net_, is_training)
    return net_
Exemplo n.º 15
0
def generator(inp0, mask, dim, name, reuse):
    # inp0 = tf.placeholder(tf.float32, [None, G.size, G.size, 3])  # G.size > 2 ** 5 + 1
    with tf.variable_scope(name, reuse=reuse):
        inp0 = tf.concat((inp0, mask), axis=3)

        ten1 = conv_mask(inp0, mask, dim, 0)
        ten2 = conv_mask(ten1, mask, dim, 1)
        ten3 = conv_mask(ten2, mask, dim, 2)
        ten4 = conv_mask(ten3, mask, dim, 3)
        ten5 = conv_mask(ten4, mask, dim, 4)

        ten6 = conv_tp_res(ten5, dim, 4)
        ten6 = conv_tp_res(ten6, dim, 4)
        ten6 = conv_tp_res(ten6, dim, 4)
        ten6 = conv_tp_res(ten6, dim, 4)

        ten5 = conv_tp_concat(ten6, ten5, dim, 5)
        ten4 = conv_tp_concat(ten5, ten4, dim, 4)
        ten3 = conv_tp_concat(ten4, ten3, dim, 3)
        ten2 = conv_tp_concat(ten3, ten2, dim, 2)
        ten1 = conv_tp_concat(ten2, ten1, dim, 1)

        ten1 = tf.concat((ten1, inp0), axis=3)
        ten1 = tl.conv2d_transpose(ten1, dim, 3, 1, 'valid', activation=leRU_batch_norm)
        ten1 = tl.conv2d(ten1, 3, 3, 1, 'valid', activation=tf.nn.tanh)
        ten1 = ten1 * 0.505 + 0.5
        return ten1
Exemplo n.º 16
0
def conv_tp(ten, dim, idx):
    filters = (2**idx) * dim
    return tl.conv2d_transpose(ten,
                               filters,
                               3,
                               2,
                               'same',
                               activation=leru_batch_norm)
Exemplo n.º 17
0
def conv_tp_conv(ten, filters):
    ten = tl.conv2d_transpose(ten,
                              filters,
                              3,
                              1,
                              'valid',
                              activation=leru_batch_norm)
    ten = tl.conv2d(ten, filters, 3, 1, 'valid', activation=tf.nn.leaky_relu)
    return ten
Exemplo n.º 18
0
    def inference(self, input):

        conv = conv2d_transpose(input,
                                self.noutput,
                                kernel_size=(3, 3),
                                strides=(2, 2),
                                padding="SAME")
        bn = batch_normalization(conv, self.training)
        return tf.nn.relu(bn)
Exemplo n.º 19
0
def UpSampler(input, num_outputs = None):
	'''
	上采样单元,使用转置卷积来使尺寸扩大两倍
	'''
	if not num_outputs:
		num_outputs = input.shape.as_list()[-1]
	net = conv2d_transpose(input, num_outputs, kernel_size = (3, 3), strides = (2, 2), padding = "SAME")

	return net
Exemplo n.º 20
0
Arquivo: Unet.py Projeto: dev6969/-
    def deconv_net(self,input1,input2,filters,
                 kernel_size=[4,4],strides=[2,2],
                 activation=tf.nn.relu,name=None):
        
        concat = self.crop_and_concat(input1,input2)        
        deconv2 = conv2d_transpose(inputs=concat, filters=filters,kernel_size=kernel_size, strides=strides,activation=activation ,data_format="channels_last", name=name)
#         tensor = batch_normalization(tensor)
        if name !=None : print("{}:{}".format(name,deconv2.shape))
        return deconv2
Exemplo n.º 21
0
def build_generator(noise, reuse=False):
    with tf.variable_scope("generator") as scope:
        if reuse:
            tf.get_variable_scope().reuse_variables()

        #
        t = noise

        t = dense(inputs=t, units=7 * 7 * 256, activation=my_leaky_relu)

        t = batch_normalization(inputs=t)

        t = tf.reshape(t, shape=[-1, 7, 7, 256])

        #
        t = conv2d_transpose(inputs=t,
                             filters=128,
                             kernel_size=[5, 5],
                             strides=1,
                             padding="same",
                             activation=my_leaky_relu)

        t = batch_normalization(inputs=t)

        t = conv2d_transpose(inputs=t,
                             filters=64,
                             kernel_size=[5, 5],
                             strides=2,
                             padding="same",
                             activation=my_leaky_relu)

        t = batch_normalization(inputs=t)

        t = conv2d_transpose(inputs=t,
                             filters=1,
                             kernel_size=[5, 5],
                             strides=2,
                             padding="same",
                             activation=tf.tanh)

        image = t
        #print("\nGen output image shape: {}".format(image.shape))
        return image
Exemplo n.º 22
0
def default_conv2d_transpose(inputs, filters):
    return layers.conv2d_transpose(
        inputs,
        filters=filters,
        kernel_size=4,
        strides=(2, 2),
        padding='same',
        data_format='channels_last',
        use_bias=False,
    )
Exemplo n.º 23
0
    def generator(self, embed_input, noise_input, training=True, reuse=False):
        def deconv2d(x,
                     filters,
                     kernels=(5, 5),
                     strides=(2, 2),
                     output_shape=None):
            x = layers.conv2d_transpose(
                x,
                filters,
                kernels,
                strides,
                padding='same',
                kernel_initializer=tf.random_normal_initializer(mean=0,
                                                                stddev=0.02))
            # x = layers.batch_normalization(x, training=training)
            x = leaky_relu(x)
            return x

        s = self.image_size
        s2, s4, s8, s16 = int(s / 2), int(s / 4), int(s / 8), int(s / 16)
        with tf.variable_scope('Generator', reuse=reuse):
            encoding_vec = layers.dense(
                embed_input,
                self.encode_dim,
                name='embedding/dense',
                kernel_initializer=tf.truncated_normal_initializer(
                    mean=0, stddev=0.02))
            encoding_vec = leaky_relu(encoding_vec)
            net = tf.concat([encoding_vec, noise_input], axis=-1)
            net = layers.dense(
                net,
                self.gen_dim * 8 * s16 * s16,
                name='concat/dense',
                kernel_initializer=tf.truncated_normal_initializer(
                    mean=0, stddev=0.02))
            # net = layers.batch_normalization(net)
            net = leaky_relu(net)
            net = tf.reshape(
                net, [-1, s16, s16, self.gen_dim * 8])  # Conv1 (4 x 4 x 1024)
            net = deconv2d(net, self.gen_dim * 4,
                           output_shape=[s8, s8])  # Conv1 (8 x 8 x 512)
            net = deconv2d(net, self.gen_dim * 2,
                           output_shape=[s4, s4])  # Conv2 (16 x 16 x 256)
            net = deconv2d(net, self.gen_dim,
                           output_shape=[s2, s2])  # Conv3 (32 x 32 x 128)
            net = layers.conv2d_transpose(
                net,
                3,
                kernel_size=(5, 5),
                strides=(2, 2),
                activation=tf.nn.tanh,
                padding='same',
                kernel_initializer=tf.truncated_normal_initializer(
                    mean=0, stddev=0.02))
            return net
Exemplo n.º 24
0
    def create_generator(self, z, rate, is_training, reuse=None):
        # hard code all layer params

        upsample_power = 5  # 2**5=32
        momentum = 0.99

        with tf.variable_scope("generator", reuse=reuse):
            # Since "mode collapse" issue, feature of FC needs to be small
            # each layer come with dropout and batch_norm to improve the performance
            x = fully_connected(z, 4 * 4 * 1, activation_fn=tf.nn.leaky_relu)
            # x = dropout(x, rate = rate,training=is_training)
            x = dropout(x, rate=rate)
            x = tf.contrib.layers.batch_norm(x,
                                             is_training=is_training,
                                             decay=momentum)

            x = tf.reshape(x, shape=(-1, 4, 4, 1))
            for _ in range(upsample_power):
                x = conv2d_transpose(x,
                                     128, (5, 5),
                                     strides=(2, 2),
                                     padding='same',
                                     data_format='channels_last',
                                     activation=tf.nn.leaky_relu,
                                     use_bias=True)
                # x = dropout(x, rate = rate,training=is_training)
                x = dropout(x, rate=rate)
                x = tf.contrib.layers.batch_norm(x,
                                                 is_training=is_training,
                                                 decay=momentum)

            x = conv2d_transpose(x,
                                 1, (5, 5),
                                 strides=(1, 1),
                                 padding='same',
                                 data_format='channels_last',
                                 activation=tf.sigmoid,
                                 use_bias=True)
            # assert tf.shape(x) == (128,128,1)

            # output is a matrix with -1 to 1
            return x
Exemplo n.º 25
0
def transpose_conv1d_layer(x,
                           n_out,
                           kernel_size,
                           stride=1,
                           activation=ACTIVATION,
                           regularize=True,
                           use_bias=True,
                           drop_rate=0.0,
                           batch_norm=BATCH_NORM,
                           training=True,
                           name=None,
                           reuse=None):

    if batch_norm:
        if name:
            x = batch_norm_layer(x, training, name=name + '_bn', reuse=reuse)
        else:
            x = batch_norm_layer(x, training, name=name, reuse=reuse)

    #wt_init = tf.truncated_normal_initializer(stddev=0.2)
    #bi_init = tf.truncated_normal_initializer(mean=BIAS_SHIFT,stddev=0.01)
    wt_init = None
    bi_init = None

    if regularize:
        wt_reg = WT_REG
        bi_reg = BI_REG
    else:
        wt_reg = None
        bi_reg = None

    x_tmp = tf.expand_dims(x, 3)
    x_tmp = tf.transpose(x_tmp, (0, 1, 3, 2))

    y_tmp = layers.conv2d_transpose(x_tmp,
                                    n_out,
                                    kernel_size=[kernel_size, 1],
                                    strides=(stride, 1),
                                    padding='same',
                                    data_format='channels_last',
                                    activation=activation,
                                    use_bias=add_bias,
                                    kernel_initializer=wt_init,
                                    bias_initializer=bi_init,
                                    kernel_regularizer=None,
                                    bias_regularizer=None,
                                    activity_regularizer=None,
                                    trainable=True,
                                    name=None,
                                    reuse=None)

    y = tf.reduce_sum(y_tmp, axis=2)
    y = layers.dropout(y, rate=drop_rate, training=training)
    return y
Exemplo n.º 26
0
    def __generator__(self, noise_input):
        with tf.variable_scope("generator"):
            noise_feature = layers.dense(noise_input, 4 * 4 * 256)
            noise_feature = tf.nn.relu(noise_feature)
            noise_feature = tf.reshape(noise_feature, shape=(-1, 4, 4, 256))

            decnn1 = layers.conv2d_transpose(noise_feature,
                                             filters=128,
                                             kernel_size=5,
                                             strides=(2, 2),
                                             padding="SAME")
            decnn1 = tf.nn.relu(decnn1)

            decnn2 = layers.conv2d_transpose(decnn1,
                                             filters=64,
                                             kernel_size=5,
                                             strides=(2, 2),
                                             padding="SAME")
            decnn2 = tf.nn.relu(decnn2)

            if cfg.params["dataset"] == "mnist":
                output = layers.conv2d_transpose(decnn2,
                                                 filters=1,
                                                 kernel_size=5,
                                                 strides=(2, 2),
                                                 padding="SAME")
            elif cfg.params["dataset"] == "cifar":
                output = layers.conv2d_transpose(decnn2,
                                                 filters=3,
                                                 kernel_size=5,
                                                 strides=(2, 2),
                                                 padding="SAME")
            output = tf.nn.tanh(output)
            #print_Arch:
            print("Generator-Architecture")
            print("input:{}".format(noise_input.shape))
            print("project:{}".format(noise_feature.shape))
            print("decnn1:{}".format(decnn1.shape))
            print("decnn2:{}".format(decnn2.shape))
            print("output:{}".format(output.shape))
            return output
Exemplo n.º 27
0
def generator(inp0, dim, name, reuse):
    # inp0 = tf.placeholder(tf.float32, [None, G.size, G.size, 4])  # G.size == 2 ** 8
    with tf.variable_scope(name, reuse=reuse):
        ten1 = tl.conv2d(inp0, 1 * dim, 4, 2, 'same', activation=tf.nn.leaky_relu)
        ten2 = tl.conv2d(ten1, 2 * dim, 4, 2, 'same', activation=tf.nn.leaky_relu)
        ten3 = tl.conv2d(ten2, 4 * dim, 4, 2, 'same', activation=tf.nn.leaky_relu)
        ten4 = tl.conv2d(ten3, 8 * dim, 4, 2, 'same', activation=tf.nn.leaky_relu)
        ten5 = tl.conv2d(ten4, 16 * dim, 4, 2, 'same', activation=tf.nn.leaky_relu)
        ten6 = tl.conv2d(ten5, 32 * dim, 4, 2, 'same', activation=tf.nn.leaky_relu)

        ten7 = tf.pad(ten6, paddings=tf.constant([(0, 0), (2, 2), (2, 2), (0, 0)]), mode='REFLECT')
        ten7 = tl.conv2d(ten7, 32 * dim, 3, 1, 'valid', activation=leRU_batch_norm)
        ten7 = tl.conv2d(ten7, 32 * dim, 3, 1, 'valid', activation=leRU_batch_norm)
        ten7 = ten6 + ten7

        ten6 = tl.conv2d_transpose(ten7, 64 * dim, 4, 2, 'same', activation=leRU_batch_norm)
        ten5 = tl.conv2d_transpose(ten6, 32 * dim, 4, 2, 'same', activation=leRU_batch_norm)
        ten4 = tl.conv2d_transpose(ten5, 16 * dim, 4, 2, 'same', activation=leRU_batch_norm)
        ten3 = tl.conv2d_transpose(ten4, 8 * dim, 4, 2, 'same', activation=leRU_batch_norm)
        ten2 = tl.conv2d_transpose(ten3, 4 * dim, 4, 2, 'same', activation=leRU_batch_norm)
        ten1 = tl.conv2d_transpose(ten2, 2 * dim, 4, 2, 'same', activation=leRU_batch_norm)

        ten1 = tl.conv2d(tf.concat((ten1, inp0), axis=3), 1 * dim, 3, 1, 'same', activation=leRU_batch_norm)
        ten1 = tl.conv2d(ten1, 3, 3, 1, 'same', activation=tf.nn.tanh)
        ten1 = ten1 * 0.505 + 0.5
        return ten1
def _conv2d_transpose(input,
                      name,
                      filters=64,
                      kernel_size=4,
                      strides=2,
                      padding="same"):
    with tf.variable_scope(name):
        return conv2d_transpose(inputs=input,
                                name=name,
                                filters=filters,
                                kernel_size=kernel_size,
                                strides=strides,
                                padding=padding)
Exemplo n.º 29
0
    def inference(self, input):

        output = input
        for layer in self.layers:
            output = layer.inference(output)

        output = conv2d_transpose(output,
                                  self.num_classes,
                                  kernel_size=(3, 3),
                                  strides=2,
                                  padding="SAME",
                                  activation=None)

        return output
Exemplo n.º 30
0
def CONV(inputs, filters, kernel_size, strides, padding, is_transpose):
    if is_transpose:
        conv = conv2d_transpose(inputs=inputs,
                                filters=filters,
                                kernel_size=kernel_size,
                                strides=strides,
                                padding=padding)
    else:
        conv = conv2d(inputs=inputs,
                      filters=filters,
                      kernel_size=kernel_size,
                      strides=strides,
                      padding=padding)
    return conv