示例#1
0
 def reconstructor(self, z, y=None):
     with tf.variable_scope("reconstructor") as scope:
         """Encoder::: 3*CNN layers"""
         z_encode = ly.conv2d(z,
                              8,
                              kernel_size=3,
                              activation_fn=tf.nn.relu,
                              normalizer_fn=ly.batch_norm,
                              scope='r_en1')
         z_encode = ly.conv2d(z_encode,
                              8,
                              kernel_size=3,
                              activation_fn=tf.nn.relu,
                              normalizer_fn=ly.batch_norm,
                              scope='r_en2')
         z_encode = ly.conv2d(z_encode,
                              8,
                              kernel_size=3,
                              activation_fn=tf.nn.relu,
                              normalizer_fn=ly.batch_norm,
                              scope='r_en3')
         """Decoder:::3 * CNN_transpose layers"""
         z_decode = ly.conv2d_transpose(
             z_encode,
             8,
             3,
             stride=2,
             activation_fn=tf.nn.relu,
             normalizer_fn=ly.batch_norm,
             padding="SAME",
             weights_initializer=tf.random_normal_initializer(0, 0.1),
             scope='r_de1')
         z_decode = ly.conv2d_transpose(
             z_decode,
             8,
             3,
             stride=2,
             activation_fn=tf.nn.relu,
             normalizer_fn=ly.batch_norm,
             padding="SAME",
             weights_initializer=tf.random_normal_initializer(0, 0.1),
             scope='r_de2')
         z_decode = ly.conv2d_transpose(
             z_decode,
             1,
             3,
             stride=2,
             activation_fn=tf.nn.relu,
             normalizer_fn=ly.batch_norm,
             padding="SAME",
             weights_initializer=tf.random_normal_initializer(0, 0.1),
             scope='r_de3')
         return z_decode
示例#2
0
文件: nets.py 项目: 1202kbs/GAN
	def __call__(self, z):
		with tf.variable_scope(self.name) as scope:
			#g = tcl.fully_connected(z, 1024, activation_fn = tf.nn.relu, normalizer_fn=tcl.batch_norm,
			#						weights_initializer=tf.random_normal_initializer(0, 0.02))
			g = tcl.fully_connected(z, 7*7*128, activation_fn = tf.nn.relu, normalizer_fn=tcl.batch_norm,
									weights_initializer=tf.random_normal_initializer(0, 0.02))
			g = tf.reshape(g, (-1, 7, 7, 128))  # 7x7
			g = tcl.conv2d_transpose(g, 64, 4, stride=2, # 14x14x64
									activation_fn=tf.nn.relu, normalizer_fn=tcl.batch_norm, padding='SAME', weights_initializer=tf.random_normal_initializer(0, 0.02))
			g = tcl.conv2d_transpose(g, 1, 4, stride=2, # 28x28x1
										activation_fn=tf.nn.sigmoid, padding='SAME', weights_initializer=tf.random_normal_initializer(0, 0.02))
			return g
def generator(z,
              n_features_first=N_FEATURES_FIRST,
              n_features_reduction_factor=2,
              min_features=64,
              BN=True,
              power=5,
              init_method='He'):

    if BN:
        normalizer = layers.batch_norm
    else:  # WGAN-GP
        normalizer = None

    if init_method in ['He']:
        init = layers.variance_scaling_initializer()
    else:
        init = layers.xavier_initializer()

    # the layers use relu activations (default)
    with tf.variable_scope('generator'):
        # first layer (fully connected) -> [B, 4, 4, n_features]
        z = layers.fully_connected(z,
                                   num_outputs=4 * 4 * n_features_first,
                                   trainable=True,
                                   normalizer_fn=normalizer,
                                   weights_initializer=init)
        z = tf.reshape(z, [-1, 4, 4, n_features_first
                           ])  # we use the dimensions as NHWC resp. BHWC

        # middle layers (convolutions) -> [B, 4*(2**(power-3)), 4*(2**(power-3)), n_features2]
        for i in range(power - 3):
            n_out = max(
                int(n_features_first / (n_features_reduction_factor**(i + 1))),
                min_features)
            z = layers.conv2d_transpose(z,
                                        num_outputs=n_out,
                                        kernel_size=5,
                                        stride=2,
                                        trainable=True,
                                        normalizer_fn=normalizer,
                                        weights_initializer=init)

        # last layer (convolution) -> [B, (2**power), (2**power), 1] -> [B, (2**power)**2]
        z = layers.conv2d_transpose(z,
                                    num_outputs=1,
                                    kernel_size=5,
                                    stride=2,
                                    activation_fn=tf.nn.sigmoid,
                                    trainable=True,
                                    weights_initializer=init)
        size = 2**power
        return tf.reshape(z, shape=[-1, size * size])
示例#4
0
def Generator(n_samples,
              conditions=None,
              mean=None,
              variance=None,
              noise=None):
    if noise is None:  # sample from Gaussian
        noise = tf.random_normal([n_samples, NOISE_DIM], mean=0.0, stddev=1.0)
    if ((MODE == 'enc' or MODE == 'vae') and mean != None
            and variance != None):  # input: mean and var
        inputs = sample_z([n_samples, Z_DIM], mean, variance)
    elif (MODE == 'cond' or MODE == 'cond_ordered'):  # input: labels
        labels = tf.one_hot(tf.cast(conditions, tf.uint8),
                            10)  # [BATCH_SIZE, 10]
        inputs = tf.concat([noise, labels], 1)  #to: (BATCH_SIZE, NOISE_DIM+10)
    else:
        inputs = noise  # (BATCH_SIZE, NOISE_DIM)
    out = lays.fully_connected(
        inputs,
        4 * 4 * 4 * DIM,
        reuse=tf.AUTO_REUSE,  # expansion
        weights_initializer=tf.initializers.glorot_uniform(),
        scope='Gen.Input')
    out = tf.reshape(out, [-1, 4 * DIM, 4, 4])
    out = tf.transpose(out, [0, 2, 3, 1], name='NCHW_to_NHWC')
    out = lays.conv2d_transpose(
        out,
        2 * DIM,
        kernel_size=5,
        stride=2,
        scope='Gen.1',
        weights_initializer=tf.initializers.he_uniform(),
        reuse=tf.AUTO_REUSE)
    out = out[:, :7, :7, :]  # because output needs to be 28x28
    out = lays.conv2d_transpose(
        out,
        DIM,
        kernel_size=5,
        stride=2,
        scope='Gen.2',
        weights_initializer=tf.initializers.he_uniform(),
        reuse=tf.AUTO_REUSE)
    out = lays.conv2d_transpose(
        out,
        1,
        kernel_size=5,
        stride=2,
        scope='Gen.3',
        weights_initializer=tf.initializers.he_uniform(),
        reuse=tf.AUTO_REUSE,
        activation_fn=tf.nn.sigmoid)
    out = tf.transpose(out, [0, 3, 1, 2], name='NHWC_to_NCHW')
    return tf.reshape(out, [BATCH_SIZE, output_dim])
def encoder2(y, z):
    yb = tf.reshape(y, [-1, 1, 1, 40])
    h = tf.concat([y, z], axis=1)
    h = layers.fully_connected(h, 1024, weights_initializer=initializer)
    h = layers.batch_norm(h, activation_fn=lrelu)

    h = layers.fully_connected(h,
                               64 * 8 * 4 * 4,
                               activation_fn=None,
                               weights_initializer=initializer)
    h = tf.reshape(h, [-1, 4, 4, 64 * 8])
    h = layers.batch_norm(h, activation_fn=lrelu)
    h = conv_cond_concat(h, yb)

    h = layers.conv2d_transpose(h,
                                64 * 4,
                                5,
                                stride=2,
                                padding='SAME',
                                activation_fn=None,
                                weights_initializer=initializer)
    h = layers.batch_norm(h, activation_fn=lrelu)
    # h = conv_cond_concat(h, yb)

    h = layers.conv2d_transpose(h,
                                64 * 2,
                                5,
                                stride=2,
                                padding='SAME',
                                activation_fn=None,
                                weights_initializer=initializer)
    h = layers.batch_norm(h, activation_fn=lrelu)
    # h = conv_cond_concat(h, yb)

    h = layers.conv2d_transpose(h,
                                64 * 1,
                                5,
                                stride=2,
                                padding='SAME',
                                activation_fn=None,
                                weights_initializer=initializer)
    h = layers.batch_norm(h, activation_fn=lrelu)

    # h = conv_cond_concat(h, yb)
    h = layers.conv2d_transpose(h,
                                3,
                                5,
                                stride=2,
                                padding='SAME',
                                activation_fn=tf.nn.tanh,
                                weights_initializer=initializer)
    return h
示例#6
0
def Generate( z ):
    """
    Defined by GAN.
    The model is to generate image to cheat descrinator.
    Design with Conv2DTranspose and add batch normalization.
    
    Args:
        inputs : a 2-D tensor , shape is [batch size , z_dim]. Dtype must be float.
                Notice : z_dim nust be multiple of 4
    
    return:
        4-D tensor : [batch size , 64 , 64 , 3].
    """
    bias_regular = ly.l2_regularizer(0.2)
    print("Build generator")
    ## z is 512
    x = tf.reshape(z , [-1,1,1,int(z.shape[1])])
    x = ly.conv2d( x , 2048 , [1,1] , stride=[1,1] , activation_fn=tf.nn.selu 
                  , biases_regularizer=bias_regular 
                  , scope="g_conv_0")
    x = tf.reshape( x , [ -1,2,2,2048//4 ] )
    x = ly.conv2d_transpose( x , 512 , [4,4] , stride=[2,2] , activation_fn=tf.nn.selu 
                            , biases_regularizer=bias_regular
                            , padding="SAME" , scope="g_0")
    print(x.shape)
    ## 4x4x128
    x = ly.batch_norm( x , scope="g_bn_1")
    x = ly.conv2d_transpose( x , 256 , [4,4] , stride=[2,2] , activation_fn=tf.nn.selu
                            , biases_regularizer=bias_regular
                            , padding="SAME" , scope="g_1")
    
#     print(x.shape)
    ## 4x4  why should kernel size be 6x6 not 5x5 ? 
    x = ly.conv2d_transpose( x , 128 , [5,5] , stride=[2,2] , activation_fn=tf.nn.selu
                            , biases_regularizer=bias_regular
                            , padding="SAME" , scope="g_2")
    x = ly.batch_norm( x , scope="g_bn_2")
    
    x = ly.conv2d_transpose( x , 96 , [5,5] , stride=[2,2] , activation_fn=tf.nn.selu
                            , biases_regularizer=bias_regular
                            , padding="SAME" , scope="g_3")
#     x = tf.add(x , x_1 )
    print(x.shape)
    x = ly.conv2d_transpose( x , 64 , [3,3] , stride=[2,2] , activation_fn=tf.nn.selu
                            , biases_regularizer=bias_regular
                            , padding="SAME" , scope="g_4")
    x = ly.conv2d( x , 3 , [1,1] , stride=[1,1] , activation_fn=tf.nn.tanh
                            , biases_regularizer=bias_regular
                            , padding="SAME" , scope="g_5")
    print(x.shape)
    
    return x
示例#7
0
def autoencoder(inputs):
    net = lays.conv2d(inputs, 32, [5, 5], stride=2, padding='SAME')
    net = lays.conv2d(net, 16, [5, 5], stride=2, padding='SAME')
    net = lays.conv2d(net, 8, [5, 5], stride=4, padding='SAME')

    net = lays.conv2d_transpose(net, 16, [5, 5], stride=4, padding='SAME')
    net = lays.conv2d_transpose(net, 32, [5, 5], stride=2, padding='SAME')
    net = lays.conv2d_transpose(net,
                                1, [5, 5],
                                stride=2,
                                padding='SAME',
                                activation_fn=tf.nn.tanh)
    return net
示例#8
0
def decoder(inputs):
    # decoder
    # 2 x 2 x 8    ->  8 x 8 x 16
    # 8 x 8 x 16   ->  16 x 16 x 32
    # 16 x 16 x 32  ->  32 x 32 x 1
    net = lays.conv2d_transpose(inputs, 16, [5, 5], stride=4, padding='SAME')
    net = lays.conv2d_transpose(net, 32, [5, 5], stride=2, padding='SAME')
    net = lays.conv2d_transpose(net,
                                1, [5, 5],
                                stride=2,
                                padding='SAME',
                                activation_fn=tf.nn.tanh)
    return net
示例#9
0
def decoder(input_tensor):
    net = tf.expand_dims(input_tensor, 1)
    net = tf.expand_dims(net, 1)
    net = layers.conv2d_transpose(net, 128, 3, padding='VALID')
    net = layers.conv2d_transpose(net, 64, 5, padding='VALID')
    net = layers.conv2d_transpose(net, 32, 5, stride=2)
    net = layers.conv2d_transpose(net,
                                  1,
                                  5,
                                  stride=2,
                                  activation_fn=tf.nn.sigmoid)
    net = layers.flatten(net)
    return net
示例#10
0
def generator(z):
    train = ly.fully_connected(
        z, 4 * 4 * 512, activation_fn=lrelu, normalizer_fn=ly.batch_norm)
    train = tf.reshape(train, (-1, 4, 4, 512))
    train = ly.conv2d_transpose(train, 256, 3, stride=2,
                                activation_fn=tf.nn.relu, normalizer_fn=ly.batch_norm, padding='SAME', weights_initializer=tf.random_normal_initializer(0, 0.02))
    train = ly.conv2d_transpose(train, 128, 3, stride=2,
                                activation_fn=tf.nn.relu, normalizer_fn=ly.batch_norm, padding='SAME', weights_initializer=tf.random_normal_initializer(0, 0.02))
    train = ly.conv2d_transpose(train, 64, 3, stride=2,
                                activation_fn=tf.nn.relu, normalizer_fn=ly.batch_norm, padding='SAME', weights_initializer=tf.random_normal_initializer(0, 0.02))
    train = ly.conv2d_transpose(train, channel, 3, stride=1,
                                activation_fn=tf.nn.tanh, padding='SAME', weights_initializer=tf.random_normal_initializer(0, 0.02))
    return train
示例#11
0
def generator_conv(z, reuse=False):
    with tf.variable_scope('generator') as scope:
        if reuse:
            scope.reuse_variables()
        train = ly.fully_connected(z, 4 * 4 * 512, activation_fn=tf.nn.relu, normalizer_fn=ly.batch_norm)
        train = tf.reshape(train, (-1, 4, 4, 512))
        train = ly.conv2d_transpose(train, 256, 5, stride=2,
                                    activation_fn=tf.nn.relu, normalizer_fn=ly.batch_norm, padding='SAME', weights_initializer=tf.random_normal_initializer(0, 0.02),normalizer_params={'is_training':True})
        train = ly.conv2d_transpose(train, 128, 5, stride=2,
                                    activation_fn=tf.nn.relu, normalizer_fn=ly.batch_norm, padding='SAME', weights_initializer=tf.random_normal_initializer(0, 0.02),normalizer_params={'is_training':True})
        train = ly.conv2d_transpose(train, channel, 5, stride=2,
                                    activation_fn=tf.nn.tanh, padding='SAME')#, normalizer_fn=ly.batch_norm, weights_initializer=tf.random_normal_initializer(0, 0.02),normalizer_params={'is_training':True})
    return train
示例#12
0
def decoder(x, activation):
    x = ly.fully_connected(x,
                           64 * 4 * 8 * 8,
                           activation_fn=tf.nn.relu,
                           normalizer_fn=ly.batch_norm,
                           weights_initializer=tf.random_normal_initializer(
                               0, 0.02))
    x = tf.reshape(x, shape=[-1, 8, 8, 64 * 4])
    #unsample1 = ly.conv2d_transpose(x,512,kernel_size=5,stride=2,padding='SAME',activation_fn=activation,normalizer_fn=ly.batch_norm,weights_initializer=tf.random_normal_initializer(0, 0.02))

    upsample2 = ly.conv2d_transpose(
        x,
        256,
        kernel_size=5,
        stride=2,
        padding='SAME',
        activation_fn=activation,
        normalizer_fn=ly.batch_norm,
        weights_initializer=tf.random_normal_initializer(0, 0.02))

    #unsample3 = ly.conv2d_transpose(upsample2,1,kernel_size=4,stride=1,padding='SAME',activation_fn=lrelu,normalizer_fn=ly.batch_norm)
    upsample4 = ly.conv2d_transpose(
        upsample2,
        128,
        kernel_size=5,
        stride=2,
        padding='SAME',
        activation_fn=activation,
        normalizer_fn=ly.batch_norm,
        weights_initializer=tf.random_normal_initializer(0, 0.02))

    upsample5 = ly.conv2d_transpose(
        upsample4,
        32,
        kernel_size=5,
        stride=2,
        padding='SAME',
        activation_fn=activation,
        normalizer_fn=ly.batch_norm,
        weights_initializer=tf.random_normal_initializer(0, 0.02))

    upsample6 = ly.conv2d_transpose(
        upsample5,
        3,
        kernel_size=5,
        stride=1,
        padding='SAME',
        activation_fn=tf.nn.tanh,
        weights_initializer=tf.random_normal_initializer(0, 0.02))

    return upsample6
    def _build_generator_contrib(self, tensor=None, training=False):
        if tensor is None:
            print('NONE VALUE INPUT TO G.')
            raise
        if self.batch_norm_G:

            def bn(x, name=None):
                return tf.layers.batch_normalization(x,
                                                     fused=False,
                                                     training=training)
        else:
            bn = tf.identity
        with tf.variable_scope('generator', reuse=tf.AUTO_REUSE) as scope:
            n_filt = 512
            use_bias = False
            with tf.variable_scope('initial.{0}-{1}'.format(
                    self.n_noise, n_filt)):
                tensor = tf.nn.relu(
                    bn(tf.layers.dense(tf.reshape(tensor, [-1, self.n_noise]),
                                       units=4 * 4 * n_filt,
                                       kernel_initializer=init_normal(0.02),
                                       use_bias=True,
                                       name='dense'),
                       name='bn'))
                tensor = tf.reshape(tensor, shape=[-1, 4, 4, n_filt])

            # upscaling layers
            for layers in range(3):
                with tf.variable_scope('pyramid.{0}-{1}'.format(
                        n_filt, n_filt // 2)):
                    tensor = tf.nn.relu(
                        bn(tfcl.conv2d_transpose(
                            tensor,
                            n_filt // 2, [5, 5], [2, 2],
                            'SAME',
                            activation_fn=None,
                            weights_initializer=init_normal(0.02),
                            biases_initializer=init_normal(0.01)),
                           name='bn'))
                n_filt //= 2
            # final layer
            with tf.variable_scope('final.{0}-{1}'.format(
                    n_filt, self.n_channel)):
                tensor = tfcl.conv2d_transpose(
                    tensor,
                    self.n_channel, [5, 5], [2, 2],
                    'SAME',
                    activation_fn=tf.tanh,
                    weights_initializer=init_normal(0.02),
                    biases_initializer=init_normal(0.01))
            return tensor
示例#14
0
    def __call__(self, i):
        with tf.variable_scope(self.name):
            if self.reuse:
                tf.get_variable_scope().reuse_variables()
            else:
                assert tf.get_variable_scope().reuse is False
                self.reuse = True
            g = tcl.fully_connected(i,
                                    self.size * self.size * 1024,
                                    activation_fn=tf.nn.relu,
                                    normalizer_fn=tcl.batch_norm)
            g = tf.reshape(g, (-1, self.size, self.size, 1024))  # size
            g = tcl.conv2d_transpose(
                g,
                512,
                3,
                stride=2,  # size*2
                activation_fn=tf.nn.relu,
                normalizer_fn=tcl.batch_norm,
                padding='SAME',
                weights_initializer=tf.random_normal_initializer(0, 0.02))
            g = tcl.conv2d_transpose(
                g,
                256,
                3,
                stride=2,  # size*4
                activation_fn=tf.nn.relu,
                normalizer_fn=tcl.batch_norm,
                padding='SAME',
                weights_initializer=tf.random_normal_initializer(0, 0.02))
            g = tcl.conv2d_transpose(
                g,
                128,
                3,
                stride=2,  # size*8
                activation_fn=tf.nn.relu,
                normalizer_fn=tcl.batch_norm,
                padding='SAME',
                weights_initializer=tf.random_normal_initializer(0, 0.02))

            g = tcl.conv2d_transpose(
                g,
                self.channel,
                3,
                stride=2,  # size*16
                activation_fn=tf.nn.sigmoid,
                padding='SAME',
                weights_initializer=tf.random_normal_initializer(0, 0.02))
            return g

        return x
示例#15
0
 def generator(self, z):
     with tf.variable_scope('generator') as scope:
         img = layer.fully_connected(self.z, num_outputs=4*4*512, 
                                     activation_fn=leak_relu, normalizer_fn=layer.batch_norm, weights_initializer=tf.random_normal_initializer(0, 0.02))
         img = tf.reshape(img, [-1, 4, 4, 512])
         img = layer.conv2d_transpose(img, num_outputs=256, kernel_size=3, stride=2, padding='SAME',
                                      activation_fn=tf.nn.relu, normalizer_fn=layer.batch_norm, weights_initializer=tf.random_normal_initializer(0, 0.02))
         img = layer.conv2d_transpose(img, num_outputs=128, kernel_size=3, stride=2, padding='SAME', 
                                      activation_fn=tf.nn.relu, normalizer_fn=layer.batch_norm, weights_initializer=tf.random_normal_initializer(0, 0.02))
         img = layer.conv2d_transpose(img, num_outputs=64, kernel_size=3, stride=2, padding='SAME', 
                                      activation_fn=tf.nn.relu, normalizer_fn=layer.batch_norm, weights_initializer=tf.random_normal_initializer(0, 0.02))
         img = layer.conv2d_transpose(img, num_outputs=self.channel, kernel_size=3, stride=1, padding='SAME', 
                                      activation_fn=tf.nn.tanh, weights_initializer=tf.random_normal_initializer(0, 0.02))
     return img
示例#16
0
    def decoder_conv(self,latent,reuse=False):
        with tf.variable_scope('reconstructor') as scope:
            if reuse:
                scope.reuse_variables()

            latent = ly.fully_connected(latent, 7*7*256, activation_fn =tf.nn.leaky_relu, weights_initializer=tf.random_normal_initializer(0, 0.02))
            latent = self.bo_batch_norm(latent, self.is_train)
            latent = tf.reshape(latent,shape=[ -1, 7, 7, 256])
            dim = 32
            unsample1 = ly.conv2d_transpose(latent, dim*4, kernel_size=3, stride=2, padding='SAME', activation_fn=tf.nn.leaky_relu)
            upsample2 = ly.conv2d_transpose(unsample1, dim*2, kernel_size=3, stride=2, padding='SAME', activation_fn=tf.nn.leaky_relu)
            upsample3 = ly.conv2d_transpose(upsample2 ,dim*1, kernel_size=3, stride=2, padding='SAME', activation_fn=tf.nn.leaky_relu)
            upsample4 = ly.conv2d_transpose(upsample3 ,3, kernel_size=3, stride=2, padding='SAME', activation_fn=tf.nn.tanh, weights_initializer=tf.random_normal_initializer(0, 0.02))
        return upsample6 4
示例#17
0
def FastStyleNet(n_in):
    # w = math.sqrt(2)
    c1 = tf.nn.leaky_relu(layers.conv2d(n_in, 32, kernel_size=9, stride=1, rate=1, padding='SAME', normalizer_fn=layers.layer_norm, weights_initializer=layers.xavier_initializer(uniform=False)))
    c2 = tf.nn.leaky_relu(layers.conv2d(c1, 64, kernel_size=4, stride=2, rate=1, padding='SAME', normalizer_fn=layers.layer_norm, weights_initializer=layers.xavier_initializer(uniform=False)))
    c3 = tf.nn.leaky_relu(layers.conv2d(c2, 128, kernel_size=4, stride=2, rate=1, padding='SAME', normalizer_fn=layers.layer_norm, weights_initializer=layers.xavier_initializer(uniform=False)))
    r1 = ResidualBlock(c3, 128)
    r2 = ResidualBlock(r1, 128)
    r3 = ResidualBlock(r2, 128)
    r4 = ResidualBlock(r3, 128)
    r5 = ResidualBlock(r4, 128)
    d1 = tf.nn.leaky_relu(layers.conv2d_transpose(r5, 64, kernel_size=4, stride=2, padding='SAME', normalizer_fn=layers.layer_norm, weights_initializer=layers.xavier_initializer(uniform=False)))
    d2 = tf.nn.leaky_relu(layers.conv2d_transpose(d1, 32, kernel_size=4, stride=2, padding='SAME', normalizer_fn=layers.layer_norm, weights_initializer=layers.xavier_initializer(uniform=False)))
    d3 = tf.tanh(layers.conv2d_transpose(d2, 3, kernel_size=9, stride=1, padding='SAME', normalizer_fn=layers.layer_norm, weights_initializer=layers.xavier_initializer(uniform=False)))
    return (d3 + 1) * 127.5
示例#18
0
def decoder(inputs):
    # 64 => 2000
    h1 = Layers.fully_connected(inputs, 2000)
    # 2000 => 256
    h2 = Layers.fully_connected(h1, 256)
    # 256 => 4x4x16
    h2 = tf.reshape(h2, [-1, 4, 4, 16])
    # 4x4x16 => 8x8x16
    h3 = Layers.conv2d_transpose(h2, 16, 5, 2)
    # 8x8x16 => 16x16x8
    h4 = Layers.conv2d_transpose(h3, 8, 5, 2)
    # 16x16x8 => 32x32x3
    h5 = Layers.conv2d_transpose(h4, 3, 5, 2, activation_fn=tf.sigmoid)
    return h5
示例#19
0
def energyDecoder(encoded, reuse=False):
    sc = tf.get_variable_scope()
    with tf.variable_scope(sc, reuse=reuse):

        conv5 = layers.conv2d_transpose(encoded,
                                        256,
                                        4,
                                        stride=2,
                                        normalizer_fn=layers.batch_norm,
                                        activation_fn=None,
                                        scope='d_conv5')
        conv5 = lrelu(conv5)

        conv6 = layers.conv2d_transpose(conv5,
                                        128,
                                        4,
                                        stride=2,
                                        normalizer_fn=layers.batch_norm,
                                        activation_fn=None,
                                        scope='d_conv6')
        conv6 = lrelu(conv6)

        conv7 = layers.conv2d_transpose(conv6,
                                        64,
                                        4,
                                        stride=2,
                                        normalizer_fn=layers.batch_norm,
                                        activation_fn=None,
                                        scope='d_conv7')
        conv7 = lrelu(conv7)

        conv8 = layers.conv2d_transpose(conv7,
                                        2,
                                        4,
                                        stride=2,
                                        activation_fn=tf.nn.tanh,
                                        scope='d_conv8')

        print 'encoded:', encoded
        print 'conv5:', conv5
        print 'conv6:', conv6
        print 'conv7:', conv7
        print 'conv8:', conv8

        print 'END D\n'
        tf.add_to_collection('vars', conv5)
        tf.add_to_collection('vars', conv6)
        tf.add_to_collection('vars', conv7)
        tf.add_to_collection('vars', conv8)
        return conv8
示例#20
0
	def __call__(self, z):
		with tf.variable_scope(self.name) as scope:
			g = tcl.fully_connected(z, self.size * self.size * 1024, activation_fn=tf.nn.relu, normalizer_fn=tcl.batch_norm)
			g = tf.reshape(g, (-1, self.size, self.size, 1024))  # size
			g = tcl.conv2d_transpose(g, 512, 3, stride=2, # size*2
									activation_fn=tf.nn.relu, normalizer_fn=tcl.batch_norm, padding='SAME', weights_initializer=tf.random_normal_initializer(0, 0.02))
			g = tcl.conv2d_transpose(g, 256, 3, stride=2, # size*4
									activation_fn=tf.nn.relu, normalizer_fn=tcl.batch_norm, padding='SAME', weights_initializer=tf.random_normal_initializer(0, 0.02))
			g = tcl.conv2d_transpose(g, 128, 3, stride=2, # size*8
									activation_fn=tf.nn.relu, normalizer_fn=tcl.batch_norm, padding='SAME', weights_initializer=tf.random_normal_initializer(0, 0.02))
			
			g = tcl.conv2d_transpose(g, self.channel, 3, stride=2, # size*16
										activation_fn=tf.nn.sigmoid, padding='SAME', weights_initializer=tf.random_normal_initializer(0, 0.02))
			return g
示例#21
0
文件: nets.py 项目: 1202kbs/GAN
	def __call__(self, z):
		with tf.variable_scope(self.name) as scope:
			g = tcl.fully_connected(z, self.size * self.size * 1024, activation_fn=tf.nn.relu, normalizer_fn=tcl.batch_norm)
			g = tf.reshape(g, (-1, self.size, self.size, 1024))  # size
			g = tcl.conv2d_transpose(g, 512, 3, stride=2, # size*2
									activation_fn=tf.nn.relu, normalizer_fn=tcl.batch_norm, padding='SAME', weights_initializer=tf.random_normal_initializer(0, 0.02))
			g = tcl.conv2d_transpose(g, 256, 3, stride=2, # size*4
									activation_fn=tf.nn.relu, normalizer_fn=tcl.batch_norm, padding='SAME', weights_initializer=tf.random_normal_initializer(0, 0.02))
			g = tcl.conv2d_transpose(g, 128, 3, stride=2, # size*8
									activation_fn=tf.nn.relu, normalizer_fn=tcl.batch_norm, padding='SAME', weights_initializer=tf.random_normal_initializer(0, 0.02))
			
			g = tcl.conv2d_transpose(g, self.channel, 3, stride=2, # size*16
										activation_fn=tf.nn.sigmoid, padding='SAME', weights_initializer=tf.random_normal_initializer(0, 0.02))
			return g
示例#22
0
    def _generator(self, z, batch_size=None, is_training=True, reuse=False):
        with tf.variable_scope('Generator'):
            if reuse:
                tf.get_variable_scope().reuse_variables()

            s = int(self.s / 2**self.lvls)
            c = int(self.gf_dim * 2**(min(self.lvls - 1, 3)))
            normalizer_params = {
                'is_training': is_training,
                'decay': 0.9,
                'scale': True
            } if self.g_bn_fn else None
            h0 = layers.fully_connected(
                z,
                s * s * c,
                activation_fn=self.activation_fn,
                normalizer_fn=self.g_bn_fn,
                normalizer_params=normalizer_params,
                # weights_initializer=self.w_init,
                scope="h0")
            print(h0.name, h0.get_shape())
            x_hat = tf.reshape(h0, shape=[-1, s, s, c], name='h1')
            print(x_hat.name, x_hat.get_shape())
            for l in range(self.lvls - 1, 0, -1):
                c = int(self.gf_dim * 2**(min(l - 1, 3)))
                x_hat = layers.conv2d_transpose(
                    x_hat,
                    c,
                    kernel_size=self.kernel_size,
                    stride=2,
                    activation_fn=self.activation_fn,
                    normalizer_fn=self.g_bn_fn,
                    normalizer_params=normalizer_params,
                    # weights_initializer=self.w_init,
                    scope="g{}".format(l))
                print(x_hat.name, x_hat.get_shape())
            x_hat = layers.conv2d_transpose(
                x_hat,
                self.c,
                kernel_size=self.kernel_size,
                stride=2,
                activation_fn=None,
                # weights_initializer=self.w_init,
                scope="g0")
            print(x_hat.name, x_hat.get_shape())

            # x_hat = self.out_activation_fn(x_hat + self.mean_img)
            x_hat = self.out_activation_fn(x_hat)
        return x_hat
示例#23
0
def Generator(n_samples, conditions, noise=None):
    if noise is None:
        noise = tf.random_normal([n_samples, 128])
    # conditional input: label of digit in image
    labels = tf.one_hot(tf.cast(conditions, tf.uint8), 10)  #[n_samples, 10]
    # for now just concat the inputs: label appended to noise
    output = tf.concat([noise, labels], 1)  # to: (BATCH_SIZE,128+10)
    output = lays.fully_connected(
        output,
        4 * 4 * 4 * DIM,  # expand noise input
        weights_initializer=tf.initializers.glorot_uniform(),
        reuse=tf.AUTO_REUSE,
        scope='Gen.Input')
    output = tf.reshape(output, [-1, 4 * DIM, 4, 4])
    tf.summary.image("Generator-after-input-layer-image", output)
    output = tf.transpose(output, [0, 2, 3, 1], name='NCHW_to_NHWC')
    output = lays.conv2d_transpose(
        output,
        2 * DIM,
        kernel_size=5,
        stride=2,
        weights_initializer=tf.initializers.he_uniform(),
        reuse=tf.AUTO_REUSE,
        scope='Gen.1')
    tf.summary.image(
        "Generator-after-first-deconv-layer-image", output
    )  # TODO : trasnpose necc..?; only first of batch or for loop for whole batch? tf.summary.image("fake_imgs",img_tensor,max_outputs=1)
    output = output[:, :7, :7, :]  # because output needs to be 28x28
    output = lays.conv2d_transpose(
        output,
        DIM,
        kernel_size=5,
        stride=2,
        weights_initializer=tf.initializers.he_uniform(),
        reuse=tf.AUTO_REUSE,
        scope='Gen.2')
    tf.summary.image("Generator-after-second-deconv-layer-image", output)
    output = lays.conv2d_transpose(
        output,
        1,
        kernel_size=5,
        stride=2,
        activation_fn=tf.nn.sigmoid,  #tf.tanh,  
        weights_initializer=tf.initializers.he_uniform(),
        reuse=tf.AUTO_REUSE,
        scope='Gen.3')
    tf.summary.image("Generator-after-third-deconv-layer-image", output)
    output = tf.transpose(output, [0, 3, 1, 2], name='NHWC_to_NCHW')
    return tf.layers.Flatten()(output)  #tf.reshape(output, [-1, OUTPUT_DIM])
示例#24
0
def generator(z,
              n_features_first=N_FEATURES_FIRST,
              n_features_reduction_factor=2,
              fix_first_layers=False,
              fix_last_layer=False,
              fix_2last_layer=False,
              architecture='WGANGP'):

    first_layers_trainable = not fix_first_layers
    last_layer_trainable = not fix_last_layer
    last2_layer_trainable = not fix_2last_layer

    if architecture == 'DCGAN':
        normalizer = layers.batch_norm
    else:  # WGAN-GP
        normalizer = None

    # the layers use relu activations (default)
    with tf.variable_scope('generator'):
        z = layers.fully_connected(z,
                                   num_outputs=4 * 4 * n_features_first,
                                   trainable=first_layers_trainable,
                                   normalizer_fn=normalizer)
        z = tf.reshape(z, [-1, 4, 4, n_features_first
                           ])  # we use the dimensions as NHWC resp. BHWC

        z = layers.conv2d_transpose(
            z,
            num_outputs=int(n_features_first / n_features_reduction_factor),
            kernel_size=5,
            stride=2,
            trainable=first_layers_trainable,
            normalizer_fn=normalizer)
        z = layers.conv2d_transpose(
            z,
            num_outputs=int(n_features_first /
                            (n_features_reduction_factor**2)),
            kernel_size=5,
            stride=2,
            trainable=last2_layer_trainable,
            normalizer_fn=normalizer)
        z = layers.conv2d_transpose(z,
                                    num_outputs=1,
                                    kernel_size=5,
                                    stride=2,
                                    activation_fn=tf.nn.sigmoid,
                                    trainable=last_layer_trainable)
        return z[:, 2:-2, 2:
                 -2, :]  # FK: of the 32x32 image leave away the outer border of 4 pixels to get a 28x28
示例#25
0
def generator(z , reuse = True):
    train = ly.fully_connected(
        z, 4 * 4 * 512, activation_fn=lrelu, normalizer_fn=ly.batch_norm)
    train = tf.reshape(train, (-1, 4, 4, 512))
    #input ?*4*4*512 output 8*8*256(1)>23*23*128(2)>69*69*64(3)>69*69*1(4)
    train = ly.conv2d_transpose(train, 256, 3, stride=2,
                                activation_fn=tf.nn.relu, normalizer_fn=ly.batch_norm, padding='same', weights_initializer=tf.random_normal_initializer(0, 0.02))
    train = ly.conv2d_transpose(train, 128, 9, stride=2,
                                activation_fn=tf.nn.relu, normalizer_fn=ly.batch_norm, padding='valid', weights_initializer=tf.random_normal_initializer(0, 0.02))
    train = ly.conv2d_transpose(train, 64, 3, stride=3,
                                activation_fn=tf.nn.relu, normalizer_fn=ly.batch_norm, padding='SAME', weights_initializer=tf.random_normal_initializer(0, 0.02))
    train = ly.conv2d_transpose(train, channel, 3, stride=1,
                                activation_fn=tf.nn.tanh, padding='SAME', weights_initializer=tf.random_normal_initializer(0, 0.02))
    print(train.name)
    return train
示例#26
0
def v9():
    l0 = tf.placeholder(tf.float32, (None, 368, 368, 3))

    # feature extraction
    l1 = c4c(l0, (64, 64, 64, 64, 64), 'module_1')

    l2 = c4c(l1, (128, 128, 128, 128, 128), 'module_2')

    l3 = c4c(l2, (256, 256, 256, 256, 256), 'module_3')

    fmap = c4(l3, (512, 512, 256, 256), 'module_4')

    l5_1 = c4(fmap, (128, 128, 128, 512), 'stage_1_1')
    dmap_1 = layers.conv2d(l5_1, 52, 1, activation_fn=None)  # 26*2 limbs

    concat_1 = tf.concat((dmap_1, fmap), axis=3)

    dmap_2 = c5(concat_1, 52, 'stage_2')
    concat_2 = tf.concat((dmap_2, fmap), axis=3)

    dmap_3 = c5(concat_2, 52, 'stage_3')
    concat_3 = tf.concat((dmap_3, fmap), axis=3)

    dmap_4 = c5(concat_3, 52, 'stage_4')
    concat_4 = tf.concat((dmap_4, fmap), axis=3)

    dmap_5 = c5(concat_4, 52, 'stage_5')
    concat_5 = tf.concat((dmap_5, fmap), axis=3)

    dmap_6 = c5(concat_5, 52, 'stage_6')
    concat_6 = tf.concat((dmap_6, fmap), axis=3)

    dmap_tiny = layers.conv2d_transpose(concat_6, 52, 9, 2)
    fmap_tiny = layers.conv2d_transpose(fmap, 256, 3, 2)
    concat_tiny = tf.concat((dmap_tiny, fmap_tiny), axis=3)

    dmap_mid = layers.conv2d_transpose(concat_tiny, 52, 9, 2)
    fmap_mid = layers.conv2d_transpose(fmap_tiny, 256, 3, 2)
    concat_mid = tf.concat((dmap_mid, fmap_mid), axis=3)

    dmap_large = layers.conv2d_transpose(concat_mid, 52, 9, 2)

    dmaps = [
        dmap_1, dmap_2, dmap_3, dmap_4, dmap_5, dmap_6, dmap_tiny, dmap_mid,
        dmap_large
    ]

    return l0, dmaps
示例#27
0
def generator(z,
              initial_shape,
              target_shape,
              final_activate,
              init_fn=tf.random_normal_initializer(stddev=0.02),
              kernel_size=5,
              for_train=True):
    h = L.fully_connected(z,
                          initial_shape[0] * initial_shape[1] *
                          initial_shape[2],
                          activation_fn=tf.nn.relu,
                          normalizer_fn=L.batch_norm,
                          normalizer_params={
                              'is_training': for_train,
                              'center': True,
                              'scale': True
                          },
                          weights_initializer=init_fn)

    h = tf.reshape(h,
                   [-1, initial_shape[0], initial_shape[1], initial_shape[2]])
    k = math.log(target_shape[0] / initial_shape[0], 2)
    next_channel = int(initial_shape[2] / 2)
    for i in range(int(k)):
        h = L.conv2d_transpose(
            inputs=h,
            num_outputs=int(next_channel),
            kernel_size=kernel_size,
            stride=2,
            padding='same',
            activation_fn=tf.nn.relu,
            normalizer_fn=L.batch_norm,
            normalizer_params={
                'is_training': for_train,
                'center': True,
                'scale': True
            },
            # use_bias=True,
            weights_initializer=init_fn)
        next_channel /= 2

    out = L.conv2d_transpose(h,
                             num_outputs=target_shape[2],
                             kernel_size=kernel_size,
                             stride=1,
                             activation_fn=final_activate,
                             weights_initializer=init_fn)
    return out
示例#28
0
def autoencoder(inputs):
    # encoder
    # 32 x 32 x 1   ->  16 x 16 x 32
    # 16 x 16 x 32  ->  8 x 8 x 16
    # 8 x 8 x 16    ->  2 x 2 x 8
    net = lays.conv2d(inputs, 32, [5, 5], stride=2, padding='SAME')
    net = lays.conv2d(net, 16, [5, 5], stride=2, padding='SAME')
    net = lays.conv2d(net, 8, [5, 5], stride=4, padding='SAME')
    # decoder
    # 2 x 2 x 8    ->  8 x 8 x 16
    # 8 x 8 x 16   ->  16 x 16 x 32
    # 16 x 16 x 32  ->  32 x 32 x 1
    net = lays.conv2d_transpose(net, 16, [5, 5], stride=4, padding='SAME')
    net = lays.conv2d_transpose(net, 32, [5, 5], stride=2, padding='SAME')
    net = lays.conv2d_transpose(net, 1, [5, 5], stride=2, padding='SAME', activation_fn=tf.nn.tanh)
    return net
示例#29
0
def deconv2d(input,
             deconv_info,
             is_train,
             name="deconv2d",
             info=False,
             stddev=0.01,
             activation_fn=tf.nn.relu,
             batch_norm=True):
    with tf.variable_scope(name):
        output_shape = deconv_info[0]
        k = deconv_info[1]
        s = deconv_info[2]
        _ = layers.conv2d_transpose(
            input,
            num_outputs=output_shape,
            weights_initializer=tf.truncated_normal_initializer(stddev=stddev),
            biases_initializer=tf.zeros_initializer(),
            kernel_size=[k, k],
            stride=[s, s],
            padding='SAME')
        _ = bn_act(_,
                   is_train,
                   batch_norm=batch_norm,
                   activation_fn=activation_fn)
        if info: log.info('{} {}'.format(name, _))
    return _
示例#30
0
    def create_one_step_pred(self, x, config, reuse):
        with tf.variable_scope(config.scope, reuse=reuse):
            # extract features
            conv1 = layers.conv2d(x, 32, 8, stride=2)
            conv2 = layers.conv2d(conv1, 32, 3)
            conv3 = layers.conv2d(conv2, 32, 3, activation_fn=None)
            conv3 = tf.nn.relu(conv3 + conv1)
            conv4 = layers.conv2d(conv3, 32, 3)
            conv5 = layers.conv2d(conv2, 32, 3, activation_fn=None)
            encoded = tf.nn.relu(conv3 + conv5)

            # predict state
            pred_frame = layers.conv2d_transpose(encoded,
                                                 1,
                                                 8,
                                                 stride=2,
                                                 activation_fn=None)

            # predict reward
            rconv1 = layers.conv2d(encoded, 32, 3)
            rpool1 = layers.max_pool2d(rconv1, 2)
            rconv2 = layers.conv2d(rpool1, 32, 3)
            rpool2 = layers.max_pool2d(rconv1, 2, padding="same")
            rpool2 = layers.flatten(rpool2)
            pred_reward = layers.fully_connected(rpool2, 1, activation_fn=None)

            return pred_frame, pred_reward
示例#31
0
    def build_graph(self):
        with tf.variable_scope('worker', reuse=self.config.reuse):
            # extract features
            conv1 = layers.conv2d(self.x, 32, 8, stride=2)
            conv2 = layers.conv2d(conv1, 32, 3)
            conv3 = layers.conv2d(conv2, 32, 3, activation_fn=None)
            conv3 = tf.nn.relu(conv3 + conv1)
            conv4 = layers.conv2d(conv3, 32, 3)
            conv5 = layers.conv2d(conv2, 32, 3, activation_fn=None)
            encoded = tf.nn.relu(conv3 + conv5)

            # predict state
            self.pred_state = layers.conv2d_transpose(encoded,
                                                      1,
                                                      8,
                                                      stride=2,
                                                      activation_fn=None)

            # predict reward
            rconv1 = layers.conv2d(encoded, 32, 3)
            rpool1 = layers.max_pool2d(rconv1, 2)
            rconv2 = layers.conv2d(rpool1, 32, 3)
            rpool2 = layers.max_pool2d(rconv1, 2, padding="same")
            rpool2 = layers.flatten(rpool2)
            self.pred_reward = layers.fully_connected(rpool2,
                                                      1,
                                                      activation_fn=None)
示例#32
0
def deconv2d(input,
             deconv_info,
             is_train,
             name="deconv2d",
             stddev=0.02,
             activation_fn=None):
    with tf.variable_scope(name):
        output_shape = deconv_info[0]
        k = deconv_info[1]
        s = deconv_info[2]
        deconv = layers.conv2d_transpose(
            input,
            num_outputs=output_shape,
            weights_initializer=tf.truncated_normal_initializer(stddev=stddev),
            biases_initializer=tf.zeros_initializer(),
            kernel_size=[k, k],
            stride=[s, s],
            padding='VALID')
        if not activation_fn:
            deconv = tf.nn.relu(deconv)
            deconv = tf.contrib.layers.batch_norm(deconv,
                                                  center=True,
                                                  scale=True,
                                                  decay=0.9,
                                                  is_training=is_train,
                                                  updates_collections=None)
        else:
            deconv = activation_fn(deconv)
        return deconv
示例#33
0
def Upsample(x, num, scope, training_nn, padding='SAME', act=True, norm=True):
    with arg_scope(
        [layers.conv2d_transpose],
            kernel_size=3,
            weights_initializer=tf.random_normal_initializer(stddev=0.02),
            biases_initializer=tf.constant_initializer(0.0),
            activation_fn=None,
            normalizer_fn=None,
            trainable=training_nn,
            padding=padding,
            reuse=tf.AUTO_REUSE,
            stride=2):
        x = layers.conv2d_transpose(x, num_outputs=num, scope=scope)
        if norm:
            x = layers.batch_norm(x,
                                  decay=0.99,
                                  scale=True,
                                  epsilon=1e-5,
                                  is_training=training_nn,
                                  updates_collections=None,
                                  reuse=tf.AUTO_REUSE,
                                  scope=scope + '/BN')
        if act:
            x = PRelu(x, scope)
    return x
    def get_shortcut(self, stride, scope='shortcut'):
        """Reshape and repeat to get the shortcut of input,
           upsampling if stride = 2

        Reference
        =========
        [1] TensorFlow 实战
        """
        def upsample(inputs, stride, scope, odd_flag=False):
            with tf.name_scope(scope):
                if stride == 1:
                    return inputs
                else:
                    # upsampling by transposed convolution
                    input_shape = self.inputs.get_shape().as_list()
                    k = tf.ones(
                        [2, 2,
                         int(input_shape[3]),
                         int(input_shape[3])],
                         )
                    output_shape=[tf.shape(self.inputs)[0], input_shape[1]*stride,
                             input_shape[2]*stride, input_shape[3]]
                    up = tf.nn.conv2d_transpose(
                            value=inputs,
                            filter = k,
                            output_shape=output_shape,
                            strides=[1, stride, stride, 1],
                            padding='SAME',
                            name='upsample')
                    if odd_flag:
                        up = up[:,0:-1,0:-1,:]
                    return up


        if self.depth_in == self.depth_out:
            self.shortcut = upsample(self.inputs, stride, scope, self.odd_flag)
        else:
            self.shortcut = conv2d_transpose(
                inputs=self.inputs,
                num_outputs=self.depth_out,
                kernel_size=[1,1],
                stride=stride,
                padding='SAME',
                normalizer_fn=None,
                activation_fn=None,
                scope=scope)
            if self.odd_flag:
                self.shortcut = self.shortcut[:,0:-1,0:-1,:]
示例#35
0
def conv2d_transpose(inputs,
                     activation_fn=lrelu,
                     normalizer_fn=instance_norm,
                     scope='conv2d_transpose',
                     **kwargs):
    """Summary

    Parameters
    ----------
    inputs : TYPE
        Description
    activation_fn : TYPE, optional
        Description
    normalizer_fn : TYPE, optional
        Description
    scope : str, optional
        Description
    **kwargs
        Description

    Returns
    -------
    TYPE
        Description
    """
    with tf.variable_scope(scope or 'conv2d_transpose'):
        h = tfl.conv2d_transpose(
            inputs=inputs,
            activation_fn=None,
            normalizer_fn=None,
            weights_initializer=tf.truncated_normal_initializer(stddev=0.02),
            biases_initializer=None,
            **kwargs)
        if normalizer_fn:
            h = normalizer_fn(h)
        if activation_fn:
            h = activation_fn(h)
        return h
    def get_bottlenet(self):
        """Form the network"""
        # get collections
        bottlelayer = namedtuple("bottlelayer",
                              ['kernel_shape','stride','bn_flag','padding','act_fn'])
        with tf.name_scope(self.scope):
            input_now = self.inputs
            for i, kernel in enumerate(self.bottle_params):
                with tf.name_scope('bottle_sub_'+str(i)):
                    kernel = bottlelayer._make(kernel)
                    with tf.name_scope('conv2d_transpose'):
                        residual = conv2d_transpose(
                            inputs=input_now,
                            num_outputs=kernel.kernel_shape[-1],
                            kernel_size=kernel.kernel_shape[0:2],
                            padding=kernel.padding,
                            stride=kernel.stride)
                        if kernel.stride == 2 and self.odd_flag == True:
                            residual = residual[:,0:-1,0:-1,:]
                    if kernel.bn_flag:
                        residual = utils.get_batch_norm(residual,
                                                        self.is_training,
                                                        scope=self.scope+'batch_norm')
                    if kernel.act_fn is not None:
                        with tf.name_scope('activate'):
                            residual = kernel.act_fn(residual)
                    input_now = residual
                    print(i, " ", residual.get_shape())
        # add shortcut
        self.get_shortcut(self.stride,scope=self.scope+'_shortcut')
        # print("shortcut ", self.shortcut.get_shape())
        residual = residual + self.shortcut
        if self.summary_flag:
            tf.summary.histogram('bottle_residual', residual)

        return residual
示例#37
0
    def __call__(self, x, is_training = True):
        with tf.variable_scope(self.name) as scope:
            with arg_scope([tcl.batch_norm], is_training=is_training, scale=True):
                with arg_scope([tcl.conv2d, tcl.conv2d_transpose], activation_fn=tf.nn.relu, 
                                     normalizer_fn=tcl.batch_norm, 
                                     biases_initializer=None, 
                                     padding='SAME',
                                     weights_regularizer=tcl.l2_regularizer(0.0002)):
                    size = 16  
                    # x: s x s x 3
                    se = tcl.conv2d(x, num_outputs=size, kernel_size=4, stride=1) # 256 x 256 x 16
                    se = resBlock(se, num_outputs=size * 2, kernel_size=4, stride=2) # 128 x 128 x 32
                    se = resBlock(se, num_outputs=size * 2, kernel_size=4, stride=1) # 128 x 128 x 32
                    se = resBlock(se, num_outputs=size * 4, kernel_size=4, stride=2) # 64 x 64 x 64
                    se = resBlock(se, num_outputs=size * 4, kernel_size=4, stride=1) # 64 x 64 x 64
                    se = resBlock(se, num_outputs=size * 8, kernel_size=4, stride=2) # 32 x 32 x 128
                    se = resBlock(se, num_outputs=size * 8, kernel_size=4, stride=1) # 32 x 32 x 128
                    se = resBlock(se, num_outputs=size * 16, kernel_size=4, stride=2) # 16 x 16 x 256
                    se = resBlock(se, num_outputs=size * 16, kernel_size=4, stride=1) # 16 x 16 x 256
                    se = resBlock(se, num_outputs=size * 32, kernel_size=4, stride=2) # 8 x 8 x 512
                    se = resBlock(se, num_outputs=size * 32, kernel_size=4, stride=1) # 8 x 8 x 512

                    pd = tcl.conv2d_transpose(se, size * 32, 4, stride=1) # 8 x 8 x 512 
                    pd = tcl.conv2d_transpose(pd, size * 16, 4, stride=2) # 16 x 16 x 256 
                    pd = tcl.conv2d_transpose(pd, size * 16, 4, stride=1) # 16 x 16 x 256 
                    pd = tcl.conv2d_transpose(pd, size * 16, 4, stride=1) # 16 x 16 x 256 
                    pd = tcl.conv2d_transpose(pd, size * 8, 4, stride=2) # 32 x 32 x 128 
                    pd = tcl.conv2d_transpose(pd, size * 8, 4, stride=1) # 32 x 32 x 128 
                    pd = tcl.conv2d_transpose(pd, size * 8, 4, stride=1) # 32 x 32 x 128 
                    pd = tcl.conv2d_transpose(pd, size * 4, 4, stride=2) # 64 x 64 x 64 
                    pd = tcl.conv2d_transpose(pd, size * 4, 4, stride=1) # 64 x 64 x 64 
                    pd = tcl.conv2d_transpose(pd, size * 4, 4, stride=1) # 64 x 64 x 64 
                    
                    pd = tcl.conv2d_transpose(pd, size * 2, 4, stride=2) # 128 x 128 x 32
                    pd = tcl.conv2d_transpose(pd, size * 2, 4, stride=1) # 128 x 128 x 32
                    pd = tcl.conv2d_transpose(pd, size, 4, stride=2) # 256 x 256 x 16
                    pd = tcl.conv2d_transpose(pd, size, 4, stride=1) # 256 x 256 x 16

                    pd = tcl.conv2d_transpose(pd, 3, 4, stride=1) # 256 x 256 x 3
                    pd = tcl.conv2d_transpose(pd, 3, 4, stride=1) # 256 x 256 x 3
                    pos = tcl.conv2d_transpose(pd, 3, 4, stride=1, activation_fn = tf.nn.sigmoid)#, padding='SAME', weights_initializer=tf.random_normal_initializer(0, 0.02))
                                
                    return pos