def generator(hparams, z, train, reuse):
    with tf.variable_scope("generator") as scope:
        if reuse:
            tf.get_variable_scope().reuse_variables()

        s_h, s_w = hparams.output_height, hparams.output_width
        s_h2, s_h4, s_h8, s_h16 = int(s_h/2), int(s_h/4), int(s_h/8), int(s_h/16)
        s_w2, s_w4, s_w8, s_w16 = int(s_w/2), int(s_w/4), int(s_w/8), int(s_w/16)

        g_bn0 = ops.batch_norm(name='g_bn0')
        g_bn1 = ops.batch_norm(name='g_bn1')
        g_bn2 = ops.batch_norm(name='g_bn2')
        g_bn3 = ops.batch_norm(name='g_bn3')

        # project `z` and reshape
        h0 = tf.reshape(ops.linear(z, hparams.gf_dim*8*s_h16*s_w16, 'g_h0_lin'), [-1, s_h16, s_w16, hparams.gf_dim * 8])
        h0 = tf.nn.relu(g_bn0(h0, train=train))

        h1 = ops.deconv2d(h0, [hparams.batch_size, s_h8, s_w8, hparams.gf_dim*4], name='g_h1')
        h1 = tf.nn.relu(g_bn1(h1, train=train))

        h2 = ops.deconv2d(h1, [hparams.batch_size, s_h4, s_w4, hparams.gf_dim*2], name='g_h2')
        h2 = tf.nn.relu(g_bn2(h2, train=train))

        h3 = ops.deconv2d(h2, [hparams.batch_size, s_h2, s_w2, hparams.gf_dim*1], name='g_h3')
        h3 = tf.nn.relu(g_bn3(h3, train=train))

        h4 = ops.deconv2d(h3, [hparams.batch_size, s_h, s_w, hparams.c_dim], name='g_h4')
        x_gen = tf.nn.tanh(h4)

        return x_gen
Пример #2
0
def generator(z, is_training):
    # Firstly let's reshape input vector into 3-d tensor. 
    

    z_ = ops.linear(z, GENERATOR_DENSE_SIZE*4*4, 'g_h0_lin')
    h_in = tf.reshape(z_, [-1, 4, 4, GENERATOR_DENSE_SIZE])
    g_batch_norm_in=ops.batch_norm(name='g_batch_norm_in')
    h_in_bn = g_batch_norm_in(h_in,is_training)
    h_in_z=ops.lrelu(x=h_in_bn,name='g_lr_1')
        
    h_1=ops.deconv2d(h_in_z,output_shape=[BATCH_SIZE,8,8,512],k_h=5,k_w=5,d_h=2, d_w=2,name="g_deconv_1")
    g_batch_norm_1=ops.batch_norm(name='g_batch_norm_1')
    h_1_bn = g_batch_norm_1(h_1,is_training)
    h_1_z=ops.lrelu(x=h_1_bn,name='g_lr_2')
    h_1_z_dr=tf.nn.dropout(h_1_z,0.3)
    
    h_2=ops.deconv2d(h_1_z_dr,output_shape=[BATCH_SIZE,16,16,256],k_h=5,k_w=5,d_h=2, d_w=2,name="g_deconv_2")
    g_batch_norm_2=ops.batch_norm(name='g_batch_norm_2')
    h_2_bn = g_batch_norm_2(h_2,is_training)
    h_2_z=ops.lrelu(x=h_2_bn,name='g_lr_3')
    h_2_z_dr=tf.nn.dropout(h_2_z,0.3)
    
    h_3=ops.deconv2d(h_2_z_dr,output_shape=[BATCH_SIZE,32,32,128],k_h=5,k_w=5,d_h=2, d_w=2,name="g_deconv_3")
    g_batch_norm_3=ops.batch_norm(name='g_batch_norm_3')   
    h_3_bn = g_batch_norm_3(h_3,is_training)
    h_3_z=ops.lrelu(x=h_3_bn,name='g_lr_4')
    
    h_out = ops.deconv2d(h_3_z, [BATCH_SIZE, IMAGE_SIZE, IMAGE_SIZE, N_CHANNELS],
            name='g_out')

    return tf.nn.tanh(h_out)
Пример #3
0
    def generator(self, z):
        # project `z` and reshape
        self.h0, self.h0_w, self.h0_b = ops.deconv2d(
            z, [self.batch_size, 32, 32, self.gf_dim],
            k_h=1,
            k_w=1,
            d_h=1,
            d_w=1,
            name='g_h0',
            with_w=True)
        h0 = ops.lrelu(self.h0)

        self.h1, self.h1_w, self.h1_b = ops.deconv2d(
            h0, [self.batch_size, 32, 32, self.gf_dim],
            name='g_h1',
            d_h=1,
            d_w=1,
            with_w=True)
        h1 = ops.lrelu(self.h1)

        h2, self.h2_w, self.h2_b = ops.deconv2d(
            h1, [self.batch_size, 32, 32, 3 * 16],
            d_h=1,
            d_w=1,
            name='g_h2',
            with_w=True)
        h2 = PS(h2, r=4, color=True)

        return tf.tanh(h2)  # [-1 1]
Пример #4
0
    def generator(self, t_z, t_text_embedding):

        s = self.options['image_size']
        s2, s4, s8, s16 = int(s / 2), int(s / 4), int(s / 8), int(s / 16)
        reduced_text_embedding = ops.lrelu(
            ops.linear(t_text_embedding, self.options['t_dim'], 'g_embedding'))
        z_concat = tf.concat(1, [t_z, reduced_text_embedding])
        z_ = ops.linear(z_concat, self.options['gf_dim'] * 8 * s16 * s16,
                        'g_h0_lin')
        h0 = tf.reshape(z_, [-1, s16, s16, self.options['gf_dim'] * 8])
        h0 = tf.nn.relu(self.g_bn0(h0))

        h1 = ops.deconv2d(
            h0,
            [self.options['batch_size'], s8, s8, self.options['gf_dim'] * 4],
            name='g_h1')
        h1 = tf.nn.relu(self.g_bn1(h1))

        h2 = ops.deconv2d(
            h1,
            [self.options['batch_size'], s4, s4, self.options['gf_dim'] * 2],
            name='g_h2')
        h2 = tf.nn.relu(self.g_bn2(h2))

        h3 = ops.deconv2d(
            h2,
            [self.options['batch_size'], s2, s2, self.options['gf_dim'] * 1],
            name='g_h3')
        h3 = tf.nn.relu(self.g_bn3(h3))

        h4 = ops.deconv2d(h3, [self.options['batch_size'], s, s, 3],
                          name='g_h4')

        return (tf.tanh(h4) / 2. + 0.5)
Пример #5
0
    def generator_pix2pix(self, image, reuse=False):
        output_size = self.patch_size
        s = math.ceil(output_size/16.0)*16
        s2, s4, s8, s16 = int(s/2), int(s/4), int(s/8), int(s/16)
        # gf_dim = 16 # Dimension of gen filters in first conv layer.
        with tf.variable_scope("generator") as scope:

            # image is 128 x 128 x (input_c_dim + output_c_dim)
            if reuse:
                tf.get_variable_scope().reuse_variables()
            else:
                assert tf.get_variable_scope().reuse == False
            # do we need here???
            #image = image / 255.0
            # liuas 2018.5.9
            # trick: using lrelu instead of relu

            ngf = 16 # number of generator filters in first conv layer
            # encoder_1: [batch, 16, 16, 3] => [batch, 8, 8, ngf]
            conv1 = conv2d(image, ngf, k_h=4, k_w=4, name='adv_g_enc1')
            conv2 = layer_norm(conv2d(lrelu(conv1, 0.2), ngf*2, k_h=4, k_w=4, name='adv_g_enc2'), name='adv_g_enc2ln')
            conv3 = layer_norm(conv2d(lrelu(conv2, 0.2), ngf*4, k_h=4, k_w=4, name='adv_g_enc3'), name='adv_g_enc3ln')
            conv4 = layer_norm(conv2d(lrelu(conv3, 0.2), ngf*8, k_h=4, k_w=4, name='adv_g_enc4'), name='adv_g_enc4ln')
            deconv1, _, _ = deconv2d(tf.nn.relu(conv4), [self.batch_size, s8, s8, ngf*4], k_h=4, k_w=4, name='adv_g_dec1', with_w=True)
            deconv1 = layer_norm(deconv1, name="adv_g_dec1ln")
            input = tf.concat([deconv1, conv3], axis=3)
            deconv2, _, _ = deconv2d(tf.nn.relu(input), [self.batch_size, s4, s4, ngf*2], k_h=4, k_w=4, name='adv_g_dec2', with_w=True)
            deconv2 = layer_norm(deconv2, name="adv_g_dec2ln")
            input = tf.concat([deconv2, conv2], axis=3)
            deconv3, _, _ = deconv2d(tf.nn.relu(input), [self.batch_size, s2, s2, ngf], k_h=4, k_w=4, name='adv_g_dec3', with_w=True)
            deconv3 = layer_norm(deconv3, name="adv_g_dec3ln")
            input = tf.concat([deconv3, conv1], axis=3)
            deconv4, _, _ = deconv2d(tf.nn.relu(input), [self.batch_size, output_size, output_size, 3], k_h=4, k_w=4, name='adv_g_dec4', with_w=True)

            return tf.tanh(deconv4)
Пример #6
0
def generator(input_z,
              t_txt=None,
              is_train=True,
              reuse=False,
              batch_size=batch_size):

    g_bn0 = ops.batch_norm(name='g_bn0')
    g_bn1 = ops.batch_norm(name='g_bn1')
    g_bn2 = ops.batch_norm(name='g_bn2')
    g_bn3 = ops.batch_norm(name='g_bn3')

    s = image_size  # output image size [64]
    s2, s4, s8, s16 = int(s / 2), int(s / 4), int(s / 8), int(s / 16)
    gf_dim = 128

    with tf.variable_scope("generator", reuse=reuse):
        tl.layers.set_name_reuse(reuse)

        z_concat = tf.concat([input_z, t_txt], 1)
        z_ = ops.linear(z_concat, gf_dim * 8 * s16 * s16, 'g_h0_lin')
        h0 = tf.reshape(z_, [-1, s16, s16, gf_dim * 8])
        h0 = tf.nn.relu(g_bn0(h0))
        h1 = ops.deconv2d(h0, [batch_size, s8, s8, gf_dim * 4], name='g_h1')
        h1 = tf.nn.relu(g_bn1(h1))

        h2 = ops.deconv2d(h1, [batch_size, s4, s4, gf_dim * 2], name='g_h2')
        h2 = tf.nn.relu(g_bn2(h2))

        h3 = ops.deconv2d(h2, [batch_size, s2, s2, gf_dim * 1], name='g_h3')
        h3 = tf.nn.relu(g_bn3(h3))

        h4 = ops.deconv2d(h3, [batch_size, s, s, 3], name='g_h4')

    return h4, tf.tanh(h4)
    def __init__(self, z_size, channel, resnet=False, output_size=32):
        super(Generator, self).__init__()
        s = 4
        self.output_size = output_size
        if self.output_size == 32:
            s = 4
        if self.output_size == 48:
            s = 6
        self.s = s
        self.z_size = z_size
        self.resnet = resnet
        self.fully_connect = nn.Linear(z_size, s * s * 256)
        self.relu = nn.ReLU()
        self.tanh = nn.Tanh()

        self.deconv1 = deconv2d(256, 256, padding=0)
        self.bn1 = nn.BatchNorm2d(256)
        self.deconv2 = deconv2d(256, 128, padding=0)
        self.bn2 = nn.BatchNorm2d(128)
        self.deconv3 = deconv2d(128, 64, padding=0)
        self.bn3 = nn.BatchNorm2d(64)
        self.conv4 = conv2d(64, channel, padding=1, kernel_size=3, stride=1)
        self.conv_res4 = conv2d(256,
                                channel,
                                padding=1,
                                kernel_size=3,
                                stride=1)

        self.re1 = Residual_G(256, 256, up_sampling=True)
        self.re2 = Residual_G(256, 256, up_sampling=True)
        self.re3 = Residual_G(256, 256, up_sampling=True)
        self.bn = nn.BatchNorm2d(256)
Пример #8
0
def dcgan_decoder(opts, noise, is_training=False, reuse=False):
    output_shape = datashapes[opts['dataset']]
    num_units = opts['g_num_filters']
    batch_size = tf.shape(noise)[0]
    num_layers = opts['g_num_layers']
    height = output_shape[0] // 2 ** (num_layers - 1)
    width = output_shape[1] // 2 ** (num_layers - 1)

    h0 = ops.linear(
        opts, noise, num_units * height * width, scope='h0_lin')
    h0 = tf.reshape(h0, [-1, height, width, num_units])
    h0 = tf.nn.relu(h0)
    layer_x = h0
    for i in range(num_layers - 1):
        scale = 2 ** (i + 1)
        _out_shape = [batch_size, height * scale,
                      width * scale, num_units // scale]
        layer_x = ops.deconv2d(opts, layer_x, _out_shape,
                               scope='h%d_deconv' % i)
        if opts['batch_norm']:
            layer_x = ops.batch_norm(opts, layer_x,
                                     is_training, reuse, scope='h%d_bn' % i)
        layer_x = tf.nn.relu(layer_x)
    _out_shape = [batch_size] + list(output_shape)

    last_h = ops.deconv2d(
        opts, layer_x, _out_shape, d_h=1, d_w=1, scope='hfinal_deconv')
    return tf.nn.sigmoid(last_h), last_h
Пример #9
0
    def generator(self, z):
        with tf.variable_scope('generator') as scope:
            # 从输出大小推各步尺寸
            o_h0, o_w0 = self.cfg.output_height, self.cfg.output_width
            o_h1, o_w1 = get_conved_size(o_h0, 2), get_conved_size(o_w0, 2)
            o_h2, o_w2 = get_conved_size(o_h1, 2), get_conved_size(o_w1, 2)
            o_h3, o_w3 = get_conved_size(o_h2, 2), get_conved_size(o_w2, 2)
            o_h4, o_w4 = get_conved_size(o_h3, 2), get_conved_size(o_w3, 2)

            # 把relu过程分开是因为deconv2d过程需要权重共享
            z_ = linear(z, self.cfg.gf_dim * 8 * o_h4 * o_w4, scope='g_h0_lin')
            h0 = tf.reshape(z_, [-1, o_h4, o_w4, self.cfg.gf_dim * 8])
            h0 = tf.nn.relu(self.bn_g0(h0, train=True))
            h1 = deconv2d(
                h0, [self.cfg.batch_size, o_h3, o_w3, self.cfg.gf_dim * 4],
                scope='g_h1')
            h1 = tf.nn.relu(self.bn_g1(h1, train=True))
            h2 = deconv2d(
                h1, [self.cfg.batch_size, o_h2, o_w2, self.cfg.gf_dim * 2],
                scope='g_h2')
            h2 = tf.nn.relu(self.bn_g2(h2, train=True))
            h3 = deconv2d(
                h2, [self.cfg.batch_size, o_h1, o_w1, self.cfg.gf_dim * 1],
                scope='g_h3')
            h3 = tf.nn.relu(self.bn_g3(h3, train=True))
            h4 = deconv2d(h3, [self.cfg.batch_size, o_h0, o_w0, self.c_dim],
                          scope='g_h4')

            return tf.nn.tanh(h4)
Пример #10
0
    def patch_generator(self, index, z, y=None):
        """
        patch generator
        :param z: 
        :param y: 
        :param scope: 
        :return: 
        """
        with tf.variable_scope("generator%d" % index) as scope:
            s_h, s_w = self.output_height/int(np.sqrt(self.num_patches)), \
                       int(self.output_width/np.sqrt(self.num_patches)) # 16, 16
            s_h2, s_w2 = int(s_h / 2), int(s_w / 2)  # 8, 8
            s_h4, s_w4 = int(s_h2 / 2), int(s_w2 / 2)  # 4, 4

            yb = tf.reshape(y, [self.batch_size, 1, 1, self.y_dim + 2])
            z = tf.concat([z, y], 1)

            h0 = tf.nn.relu(self.g_bns_all[index][0](linear(
                z, self.gfc_dim, "g_h0_lin")))
            h0 = tf.concat([h0, y], 1)

            h1 = tf.nn.relu(self.g_bns_all[index][1](linear(
                h0, self.gf_dim * 4 * s_h4 * s_w4, "g_h1_lin")))
            h1 = tf.reshape(h1, [self.batch_size, s_h4, s_w4, self.gf_dim * 4])
            h1 = conv_cond_concat(h1, yb)

            h2 = deconv2d(h1, [self.batch_size, s_h2, s_w2, self.gf_dim * 2],
                          name="g_h2")
            h2 = self.g_bns_all[index][2](h2)
            h2 = tf.nn.relu(h2)
            h2 = conv_cond_concat(h2, yb)

            return tf.nn.sigmoid(
                deconv2d(h2, [self.batch_size, s_h, s_w, self.c_dim],
                         name="g_h3"))
Пример #11
0
    def generator(self, z_set, y_set):
        """
        Fully generator
        :param z_set: 
        :param y_set: 
        :return: 
        """
        z = tf.concat([v for v in z_set], axis=1)
        y = y_set[0]
        with tf.variable_scope("generator_f") as scope:
            s_h, s_w = self.output_height, self.output_width  # 32, 32
            s_h2, s_w2 = int(s_h / 2), int(s_w / 2)  # 16, 16
            s_h4, s_w4 = int(s_h2 / 2), int(s_w2 / 2)  # 8, 8

            yb = tf.reshape(y, [self.batch_size, 1, 1, self.y_dim + 2])
            z = tf.concat([z, y], 1)

            h0 = tf.nn.relu(self.g_bn_f0(linear(z, self.gfc_dim, "g_h0_lin")))
            h0 = tf.concat([h0, y], 1)

            h1 = tf.nn.relu(
                self.g_bn_f1(
                    linear(h0, self.gf_dim * 4 * s_h4 * s_w4, "g_h1_lin")))
            h1 = tf.reshape(h1, [self.batch_size, s_h4, s_w4, self.gf_dim * 4])
            h1 = conv_cond_concat(h1, yb)

            h2 = deconv2d(h1, [self.batch_size, s_h2, s_w2, self.gf_dim * 2],
                          name="g_h2")
            h2 = self.g_bn_f2(h2)
            h2 = tf.nn.relu(h2)
            h2 = conv_cond_concat(h2, yb)

            return tf.nn.sigmoid(
                deconv2d(h2, [self.batch_size, s_h, s_w, self.c_dim],
                         name="g_h3"))
Пример #12
0
    def generator(self, z, train=True):
        with tf.variable_scope("generator") as scope:
            # train is True when training, False when sampling
            batch_size = self.batch_size
            if not train: # When sampling
                scope.reuse_variables()
                batch_size = self.sample_num

            # Convolution parameter sizes
            s_h, s_w = self.image_shape
            s_h2, s_w2 = self.conv_out_size_same(s_h, 2), self.conv_out_size_same(s_w, 2)
            s_h4, s_w4 = self.conv_out_size_same(s_h2, 2), self.conv_out_size_same(s_w2, 2)
            s_h8, s_w8 = self.conv_out_size_same(s_h4, 2), self.conv_out_size_same(s_w4, 2)
            s_h16, s_w16 = self.conv_out_size_same(s_h8, 2), self.conv_out_size_same(s_w8, 2)

            # Project z, reshape and go through 4 convolution blocks (deconv, batch norm, relu)
            self.z_ = linear(z, self.gf_dim * 8 * s_h16 * s_w16, 'g_h0_lin')
            self.h0 = tf.reshape(self.z_, [-1, s_h16, s_w16, self.gf_dim * 8])
            h0 = tf.nn.relu(self.g_bn0(self.h0, train=train))
            h1 = tf.nn.relu(self.g_bn1(deconv2d(h0, [batch_size, s_h8, s_w8, self.gf_dim * 4], name='g_h1'), train=train))
            h2 = tf.nn.relu(self.g_bn2(deconv2d(h1, [batch_size, s_h4, s_w4, self.gf_dim * 2], name='g_h2'), train=train))
            h3 = tf.nn.relu(self.g_bn3(deconv2d(h2, [batch_size, s_h2, s_w2, self.gf_dim * 1], name='g_h3'), train=train))
            h4 = tf.nn.tanh(deconv2d(h3, [batch_size, s_h, s_w, self.c_dim], name='g_h4'))

            return h4
Пример #13
0
def generator(images, options, reuse=False, name='gen'):
    # reuse or not
    with tf.variable_scope(name):
        if reuse:
            tf.get_variable_scope().reuse_variables()
        else:
            assert tf.get_variable_scope().reuse is False
            
        # down sampling
        x = relu(instance_norm(conv2d(images, options.nf, ks=7, s=1, name='gen_ds_conv1'), 'in1_1'))
        x = relu(instance_norm(conv2d(x, 2*options.nf, ks=4, s=2, name='gen_ds_conv2'), 'in1_2'))
        x = relu(instance_norm(conv2d(x, 4*options.nf, ks=4, s=2, name='gen_ds_conv3'), 'in1_3'))
        
        # bottleneck
        x = relu(instance_norm(conv2d(x, 4*options.nf, ks=3, s=1, name='gen_bn_conv1'), 'in2_1'))
        x = relu(instance_norm(conv2d(x, 4*options.nf, ks=3, s=1, name='gen_bn_conv2'), 'in2_2'))
        x = relu(instance_norm(conv2d(x, 4*options.nf, ks=3, s=1, name='gen_bn_conv3'), 'in2_3'))
        x = relu(instance_norm(conv2d(x, 4*options.nf, ks=3, s=1, name='gen_bn_conv4'), 'in2_4'))
        x = relu(instance_norm(conv2d(x, 4*options.nf, ks=3, s=1, name='gen_bn_conv5'), 'in2_5'))
        x = relu(instance_norm(conv2d(x, 4*options.nf, ks=3, s=1, name='gen_bn_conv6'), 'in2_6'))
        
        # up sampling
        x = relu(instance_norm(deconv2d(x, 2*options.nf, ks=4, s=2, name='gen_us_deconv1'), 'in3_1'))
        x = relu(instance_norm(deconv2d(x, options.nf, ks=4, s=2, name='gen_us_deconv2'), 'in3_2'))
        x = tanh(deconv2d(x, 3, ks=7, s=1, name='gen_us_dwconv3'))
        
        return x
def generator(hparams, z, scope_name, train, reuse):

    with tf.variable_scope(scope_name) as scope:
        if reuse:
            scope.reuse_variables()

        output_size = 64
        s = output_size
        s2, s4, s8, s16 = int(s/2), int(s/4), int(s/8), int(s/16)

        g_bn0 = ops.batch_norm(name='g_bn0')
        g_bn1 = ops.batch_norm(name='g_bn1')
        g_bn2 = ops.batch_norm(name='g_bn2')
        g_bn3 = ops.batch_norm(name='g_bn3')

        # project `z` and reshape
        h0 = tf.reshape(ops.linear(z, hparams.gf_dim*8*s16*s16, 'g_h0_lin'), [-1, s16, s16, hparams.gf_dim * 8])
        h0 = tf.nn.relu(g_bn0(h0, train=train))

        h1 = ops.deconv2d(h0, [hparams.batch_size, s8, s8, hparams.gf_dim*4], name='g_h1')
        h1 = tf.nn.relu(g_bn1(h1, train=train))

        h2 = ops.deconv2d(h1, [hparams.batch_size, s4, s4, hparams.gf_dim*2], name='g_h2')
        h2 = tf.nn.relu(g_bn2(h2, train=train))

        h3 = ops.deconv2d(h2, [hparams.batch_size, s2, s2, hparams.gf_dim*1], name='g_h3')
        h3 = tf.nn.relu(g_bn3(h3, train=train))

        h4 = ops.deconv2d(h3, [hparams.batch_size, s, s, hparams.c_dim], name='g_h4')
        x_gen = tf.nn.tanh(h4)

    return x_gen
Пример #15
0
 def G(z, scope='Generator'):
     """[using prior distribution z to generator fake image]
     
     Arguments:
         z {np.array} -- [numpy array for describe prior distribution]
     
     Keyword Arguments:
         scope {str} -- [scope of generator] (default: {'Generator'})
     
     Returns:
         [Tensor] -- [fake image]
     """
     with tf.variable_scope(scope) as scope:
         log.warn(scope.name)
         z = tf.reshape(z, [self.batch_size, 1, 1, -1])
         g_1 = deconv2d(z, deconv_info[0], is_train, name='g_1_deconv')
         log.info('{} {}'.format(scope.name, g_1))
         g_2 = deconv2d(g_1, deconv_info[1], is_train, name='g_2_deconv')
         log.info('{} {}'.format(scope.name, g_2))
         g_3 = deconv2d(g_2, deconv_info[2], is_train, name='g_3_deconv')
         log.info('{} {}'.format(scope.name, g_3))
         g_4 = deconv2d(g_3, deconv_info[3], is_train, name='g_4_deconv', activation_fn=tf.tanh)
         log.info('{} {}'.format(scope.name, g_4))
         output = g_4
         assert output.get_shape().as_list() == self.image.get_shape().as_list(), output.get_shape().as_list()
     return output
Пример #16
0
def mdl_resize_net_64(net, mdl_feats):
    # Assume input feature map dimensions of 224x224
    net.mdl_resize_net = {}
    with tf.variable_scope('MDLResize_Net'):
        # Output (bs, 112, 112)
        # conv1 = conv2d('conv1', mdl_feats, 3, 128, stride=2, padding="SAME", norm=net.norm, mode=net.mode)
        # net.mdl_resize_net['conv1'] = conv1
        # Output (bs, 56, 56)
        pool1 = tf.layers.max_pooling2d(mdl_feats,
                                        4,
                                        4,
                                        padding='same',
                                        name='pool1')
        net.mdl_resize_net['pool1'] = pool1
        # Output (bs, 60, 60)
        deconv1 = deconv2d('deconv1',
                           pool1,
                           5,
                           64,
                           stride=1,
                           padding="VALID",
                           norm=net.norm,
                           mode=net.mode)
        net.mdl_resize_net['deconv1'] = deconv1
        # Output (bs, 64, 64)
        deconv2 = deconv2d('deconv2',
                           deconv1,
                           5,
                           32,
                           stride=1,
                           padding="VALID",
                           norm=net.norm,
                           mode=net.mode)
        net.mdl_resize_net['deconv2'] = deconv2
    return deconv2
    def _build_model(self):
        with tf.variable_scope('generator', reuse=self.reuse):
            s = self.hparas['IMAGE_SIZE'][0]
            s2, s4, s8, s16 = int(s / 2), int(s / 4), int(s / 8), int(s / 16)

            gf_dim = self.gf_dim

            reduced_text_embedding = ops.lrelu(ops.linear(
                self.text, self.hparas['TEXT_DIM'], 'g_embedding'))
            z_concat = tf.concat([self.z, reduced_text_embedding], 1)
            z_ = ops.linear(z_concat, gf_dim * 8 * s16 * s16, 'g_h0_lin')
            h0 = tf.reshape(z_, [-1, s16, s16, gf_dim * 8])
            h0 = tf.nn.relu(ops.batch_norm(h0))

            h1 = ops.deconv2d(h0, [self.hparas['BATCH_SIZE'],
                               s8, s8, gf_dim * 4], name='g_h1')
            h1 = tf.nn.relu(ops.batch_norm(h1))

            h2 = ops.deconv2d(h1, [self.hparas['BATCH_SIZE'],
                               s4, s4, gf_dim * 2], name='g_h2')
            h2 = tf.nn.relu(ops.batch_norm(h2))

            h3 = ops.deconv2d(h2, [self.hparas['BATCH_SIZE'],
                               s2, s2, gf_dim * 1], name='g_h3')
            h3 = tf.nn.relu(ops.batch_norm(h3))

            h4 = ops.deconv2d(h3, [self.hparas['BATCH_SIZE'], s, s, 3], name='g_h4')

            self.generator_net = tf.tanh(h4) / 2.0 + 0.5
            self.outputs = tf.tanh(h4) / 2.0 + 0.5
Пример #18
0
    def generator_pix2pix(self, image, reuse=False):
        output_size = self.patch_size
        s = math.ceil(output_size/16.0)*16
        s2, s4, s8, s16 = int(s/2), int(s/4), int(s/8), int(s/16)
        # gf_dim = 16 # Dimension of gen filters in first conv layer.
        with tf.variable_scope("generator") as scope:

            # image is 128 x 128 x (input_c_dim + output_c_dim)
            if reuse:
                tf.get_variable_scope().reuse_variables()
            else:
                assert tf.get_variable_scope().reuse == False
            # do we need here???
            #image = image / 255.0
            # liuas 2018.5.9
            # trick: using lrelu instead of relu

            ngf = 16 # number of generator filters in first conv layer
            # encoder_1: [batch, 16, 16, 3] => [batch, 8, 8, ngf]
            conv1 = conv2d(image, ngf, k_h=4, k_w=4, name='adv_g_enc1')
            conv2 = layer_norm(conv2d(lrelu(conv1, 0.2), ngf*2, k_h=4, k_w=4, name='adv_g_enc2'), name='adv_g_enc2ln')
            conv3 = layer_norm(conv2d(lrelu(conv2, 0.2), ngf*4, k_h=4, k_w=4, name='adv_g_enc3'), name='adv_g_enc3ln')
            conv4 = layer_norm(conv2d(lrelu(conv3, 0.2), ngf*8, k_h=4, k_w=4, name='adv_g_enc4'), name='adv_g_enc4ln')
            deconv1, _, _ = deconv2d(tf.nn.relu(conv4), [self.batch_size, s8, s8, ngf*4], k_h=4, k_w=4, name='adv_g_dec1', with_w=True)
            deconv1 = layer_norm(deconv1, name="adv_g_dec1ln")
            input = tf.concat([deconv1, conv3], axis=3)
            deconv2, _, _ = deconv2d(tf.nn.relu(input), [self.batch_size, s4, s4, ngf*2], k_h=4, k_w=4, name='adv_g_dec2', with_w=True)
            deconv2 = layer_norm(deconv2, name="adv_g_dec2ln")
            input = tf.concat([deconv2, conv2], axis=3)
            deconv3, _, _ = deconv2d(tf.nn.relu(input), [self.batch_size, s2, s2, ngf], k_h=4, k_w=4, name='adv_g_dec3', with_w=True)
            deconv3 = layer_norm(deconv3, name="adv_g_dec3ln")
            input = tf.concat([deconv3, conv1], axis=3)
            deconv4, _, _ = deconv2d(tf.nn.relu(input), [self.batch_size, output_size, output_size, 3], k_h=4, k_w=4, name='adv_g_dec4', with_w=True)

            return tf.tanh(deconv4)
Пример #19
0
    def sampler(self, z):
        # 与Generator不同在于: 1)设置了变量重用 2)在batch_norm时设置train=False
        with tf.variable_scope('generator') as scope:
            scope.reuse_variables()
            o_h0, o_w0 = self.cfg.output_height, self.cfg.output_width
            o_h1, o_w1 = get_conved_size(o_h0, 2), get_conved_size(o_w0, 2)
            o_h2, o_w2 = get_conved_size(o_h1, 2), get_conved_size(o_w1, 2)
            o_h3, o_w3 = get_conved_size(o_h2, 2), get_conved_size(o_w2, 2)
            o_h4, o_w4 = get_conved_size(o_h3, 2), get_conved_size(o_w3, 2)

            z_ = linear(z, self.cfg.gf_dim * 8 * o_h4 * o_w4, scope='g_h0_lin')
            h0 = tf.reshape(z_, [-1, o_h4, o_w4, self.cfg.gf_dim * 8])
            h0 = tf.nn.relu(self.bn_g0(h0, train=False))
            h1 = deconv2d(
                h0, [self.cfg.batch_size, o_h3, o_w3, self.cfg.gf_dim * 4],
                scope='g_h1')
            h1 = tf.nn.relu(self.bn_g1(h1, train=False))
            h2 = deconv2d(
                h1, [self.cfg.batch_size, o_h2, o_w2, self.cfg.gf_dim * 2],
                scope='g_h2')
            h2 = tf.nn.relu(self.bn_g2(h2, train=False))
            h3 = deconv2d(
                h2, [self.cfg.batch_size, o_h1, o_w1, self.cfg.gf_dim * 1],
                scope='g_h3')
            h3 = tf.nn.relu(self.bn_g3(h3, train=False))
            h4 = deconv2d(h3, [self.cfg.batch_size, o_h0, o_w0, self.c_dim],
                          scope='g_h4')

            return tf.nn.tanh(h4)
Пример #20
0
 def G(z, scope='Generator'):
     with tf.variable_scope(scope) as scope:
         log.warn(scope.name)
         z = tf.reshape(z, [self.batch_size, 1, 1, -1])
         g_1 = deconv2d(z, deconv_info[0], is_train, name='g_1_deconv')
         log.info('{} {}'.format(scope.name, g_1))
         g_2 = deconv2d(g_1,
                        deconv_info[1],
                        is_train,
                        name='g_2_deconv')
         log.info('{} {}'.format(scope.name, g_2))
         g_3 = deconv2d(g_2,
                        deconv_info[2],
                        is_train,
                        name='g_3_deconv')
         log.info('{} {}'.format(scope.name, g_3))
         g_4 = deconv2d(g_3,
                        deconv_info[3],
                        is_train,
                        name='g_4_deconv',
                        activation_fn=tf.tanh)
         log.info('{} {}'.format(scope.name, g_4))
         output = g_4
         #print(output.get_shape().as_list(), self.image.get_shape().as_list(), output.get_shape().as_list())
         assert output.get_shape().as_list() == self.image.get_shape(
         ).as_list(), output.get_shape().as_list()
     return output
Пример #21
0
def discriminator_ae(x, reuse=False):
    #x_small = resize(x, (56, 56))

    with tf.variable_scope('discriminator', reuse=reuse):
        net = conv2d(x, 3, 32, d=2, scope='conv_1', dropout=False)
        net = conv2d(net, 32, 64, d=2, scope='conv_2', dropout=False)
        net = tf.reshape(net, [-1, 56 * 56 * 64])
        code = lrelu(fc(net, 1000, scope='fc_3'))
        net = lrelu(fc(code, 56 * 56 * 64, scope='fc_4'))
        net = tf.reshape(net, [-1, 56, 56, 64])
        net = resize(net, (112, 112))
        net = deconv2d(net, 64, 32, 112, scope='conv_5', dropout=False)
        net = resize(net, (224, 224))
        #net = deconv2d(net, 32, 16, 224, scope='conv_6')

        dimension = 3

        if dimension == 2:
            sc_1, x = tf.split(x, [1, 2], axis=3)
            net = deconv2d(net, 32, 2, 224, scope='deconv_7', bn=False)
            #loss = tf.sqrt(2 * tf.nn.l2_loss(x - net)) / (224*224)
            loss = tf.norm(x - net, ord=1) / (224 * 224)
            net = tf.concat([sc_1, net], axis=3)
        else:
            net = deconv2d(net, 32, 3, 224, scope='deconv_10', bn=False)
            loss = tf.sqrt(2 * tf.nn.l2_loss(x - net)) / (224 * 224)

        return net, loss
Пример #22
0
    def __call__(self, input):
        if self._deconv_type == 'bilinear':
            from ops import bilinear_deconv2d as deconv2d
        elif self._deconv_type == 'nn':
            from ops import nn_deconv2d as deconv2d
        elif self._deconv_type == 'transpose':
            from ops import deconv2d
        else:
            raise NotImplementedError
        with tf.variable_scope(self.name, reuse=self._reuse):
            if not self._reuse:
                log.warn(self.name)
            _ = fc(input,
                   self.start_dim_x * self.start_dim_y * self.start_dim_ch,
                   self._is_train,
                   info=not self._reuse,
                   norm='none',
                   name='fc')
            _ = tf.reshape(_, [
                _.shape.as_list()[0], self.start_dim_y, self.start_dim_x,
                self.start_dim_ch
            ])
            if not self._reuse:
                log.info('reshape {} '.format(_.shape.as_list()))
            num_deconv_layer = int(
                np.ceil(
                    np.log2(
                        max(float(self._h / self.start_dim_y),
                            float(self._w / self.start_dim_x)))))
            for i in range(num_deconv_layer):
                _ = deconv2d(_,
                             max(self._c,
                                 int(_.get_shape().as_list()[-1] / 2)),
                             self._is_train,
                             info=not self._reuse,
                             norm=self._norm_type,
                             name='deconv{}'.format(i + 1))
                if num_deconv_layer - i <= self._num_res_block:
                    _ = conv2d_res(
                        _,
                        self._is_train,
                        info=not self._reuse,
                        name='res_block{}'.format(self._num_res_block -
                                                  num_deconv_layer + i + 1))
            _ = deconv2d(_,
                         self._c,
                         self._is_train,
                         k=1,
                         s=1,
                         info=not self._reuse,
                         activation_fn=tf.tanh,
                         norm='none',
                         name='deconv{}'.format(i + 2))
            _ = tf.image.resize_bilinear(_, [self._h, self._w])

            self._reuse = True
            self.var_list = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES,
                                              self.name)
            return _
Пример #23
0
def GeneratorCNN(z, config, reuse=None):
    '''
    maps z to a 64x64 images with values in [-1,1]
    uses batch normalization internally
    '''

    #trying to get around batch_size like this:
    batch_size = tf.shape(z)[0]
    #batch_size=tf.placeholder_with_default(64,[],'bs')

    with tf.variable_scope("generator", reuse=reuse) as vs:
        g_bn0 = batch_norm(name='g_bn0')
        g_bn1 = batch_norm(name='g_bn1')
        g_bn2 = batch_norm(name='g_bn2')
        g_bn3 = batch_norm(name='g_bn3')

        s_h, s_w = config.gf_dim, config.gf_dim  #64,64
        s_h2, s_w2 = conv_out_size_same(s_h, 2), conv_out_size_same(s_w, 2)
        s_h4, s_w4 = conv_out_size_same(s_h2, 2), conv_out_size_same(s_w2, 2)
        s_h8, s_w8 = conv_out_size_same(s_h4, 2), conv_out_size_same(s_w4, 2)
        s_h16, s_w16 = conv_out_size_same(s_h8, 2), conv_out_size_same(s_w8, 2)

        # project `z` and reshape
        z_, self_h0_w, self_h0_b = linear(z,
                                          config.gf_dim * 8 * s_h16 * s_w16,
                                          'g_h0_lin',
                                          with_w=True)

        self_h0 = tf.reshape(z_, [-1, s_h16, s_w16, config.gf_dim * 8])
        h0 = tf.nn.relu(g_bn0(self_h0))

        h1, h1_w, h1_b = deconv2d(h0,
                                  [batch_size, s_h8, s_w8, config.gf_dim * 4],
                                  name='g_h1',
                                  with_w=True)
        h1 = tf.nn.relu(g_bn1(h1))

        h2, h2_w, h2_b = deconv2d(h1,
                                  [batch_size, s_h4, s_w4, config.gf_dim * 2],
                                  name='g_h2',
                                  with_w=True)
        h2 = tf.nn.relu(g_bn2(h2))

        h3, h3_w, h3_b = deconv2d(h2,
                                  [batch_size, s_h2, s_w2, config.gf_dim * 1],
                                  name='g_h3',
                                  with_w=True)
        h3 = tf.nn.relu(g_bn3(h3))

        h4, h4_w, h4_b = deconv2d(h3, [batch_size, s_h, s_w, config.c_dim],
                                  name='g_h4',
                                  with_w=True)
        out = tf.nn.tanh(h4)

    variables = tf.contrib.framework.get_variables(vs)
    return out, variables
Пример #24
0
 def generator(self, z, reuse=None):
     with tf.variable_scope('generator', reuse=reuse):
         # project `z` and reshape
         h0 = tf.reshape(linear(z, self.gf_dim * 8 * 4 * 4, 'g_h0_lin'), [-1, 4, 4, self.gf_dim * 8])
         h0 = tf.nn.relu(self.g_bn0(h0))
         h1 = deconv2d(h0, [self.batch_size, 8, 8, self.gf_dim * 4], name='g_h1')
         h1 = tf.nn.relu(self.g_bn1(h1))
         h2 = deconv2d(h1, [self.batch_size, 16, 16, self.gf_dim * 2], name='g_h2')
         h2 = tf.nn.relu(self.g_bn2(h2))
         h3 = deconv2d(h2, [self.batch_size, 32, 32, 3], name='g_h3')
         return tf.nn.tanh(h3)
Пример #25
0
    def _create_generator(self, z, train=True, reuse=False, name="generator"):
        out_size = [(conv_out_size_same(self.img_size[0], 2),
                     conv_out_size_same(self.img_size[1],
                                        2), self.num_gen_feature_maps)]
        for i in range(self.g_num_conv_layers - 1):
            out_size = [(conv_out_size_same(
                out_size[0][0], 2), conv_out_size_same(
                    out_size[0][1], 2), out_size[0][2] * 2)] + out_size

        print(out_size)
        with tf.variable_scope(name) as scope:
            if reuse:
                scope.reuse_variables()

            z_split = tf.split(z, self.num_gens, axis=0)
            h0 = []
            for i, var in enumerate(z_split):
                h0.append(
                    tf.nn.relu(linear(var,
                                      out_size[0][0] * out_size[0][1] *
                                      out_size[0][2],
                                      scope='g_h0_linear{}'.format(i),
                                      stddev=0.02),
                               name="g_h0_relu{}".format(i)))

            g_out = []
            for k, var in enumerate(h0):
                var = tf.reshape(var, [
                    self.g_batch_size, out_size[0][0], out_size[0][1],
                    out_size[0][2]
                ])
                for i in range(1, self.g_num_conv_layers):
                    var = tf.nn.relu(deconv2d(var, [
                        self.g_batch_size, out_size[i][0], out_size[i][1],
                        out_size[i][2]
                    ],
                                              stddev=0.02,
                                              name="g{}_h{}_deconv".format(
                                                  k, i)),
                                     name="g{}_h{}_relu".format(k, i))

                g_out.append(
                    tf.nn.tanh(deconv2d(var, [
                        self.g_batch_size, self.img_size[0], self.img_size[1],
                        self.img_size[2]
                    ],
                                        stddev=0.02,
                                        name="g{}_out_deconv".format(k, i)),
                               name="g{}_out_tanh".format(k, i)))

            g_out = tf.concat(g_out, axis=0, name="g_out")

            return g_out
Пример #26
0
def im_unet(net, ims):
    net.im_net = {}
    bs, h, w, ch = ims.get_shape().as_list()
    with tf.variable_scope('ImNet_UNet'):
        conv1 = conv2d('conv1',
                       ims,
                       5,
                       32,
                       act=None,
                       norm=net.norm,
                       mode=net.mode)
        net.im_net['conv1'] = conv1
        conv2 = conv2d('conv2', conv1, 3, 64, norm=net.norm, mode=net.mode)
        net.im_net['conv2'] = conv2
        conv3 = conv2d('conv3', conv2, 3, 128, norm=net.norm, mode=net.mode)
        net.im_net['conv3'] = conv3
        conv4 = conv2d('conv4', conv3, 3, 256, norm=net.norm, mode=net.mode)
        net.im_net['conv4'] = conv4
        _, fh, fw, ch = conv4.get_shape().as_list()
        deconv1 = deconv2d('deconv1',
                           conv4,
                           3,
                           128,
                           norm=net.norm,
                           mode=net.mode)
        net.im_net['deconv1'] = deconv1
        deconv1 = tf.concat([deconv1, conv3], axis=3)
        deconv2 = deconv2d('deconv2',
                           deconv1,
                           3,
                           64,
                           norm=net.norm,
                           mode=net.mode)
        net.im_net['deconv2'] = deconv2
        deconv2 = tf.concat([deconv2, conv2], axis=3)
        deconv3 = deconv2d('deconv3',
                           deconv2,
                           3,
                           32,
                           norm=net.norm,
                           mode=net.mode)
        net.im_net['deconv3'] = deconv3
        deconv3 = tf.concat([deconv3, conv1], axis=3)
        im_feats = deconv2d('deconv4',
                            deconv3,
                            5,
                            32,
                            stride=1,
                            norm=None,
                            mode=net.mode)
        net.im_net['out'] = im_feats

    return im_feats
Пример #27
0
def deconv_gen(z, batch_size):
    """
    Transpose-convolutional Generator.
    """
    z_expand1 = dense(z, 7 * 7 * 64, "expand1")  # TODO : add a relu here ?
    z_matrix = tf.nn.relu(tf.reshape(z_expand1, [batch_size, 7, 7, 64]))
    deconv1 = tf.nn.relu(
        deconv2d(z_matrix, 5, [batch_size, 14, 14, 32], "deconv1"))
    deconv2 = deconv2d(deconv1, 5, [batch_size, 28, 28, 1], "deconv2")
    gen_image = tf.nn.sigmoid(deconv2)

    return gen_image
Пример #28
0
def dcgan_decoder(opts, inputs, archi, num_layers, num_units, output_shape,
                  batch_norm, reuse, is_training):
    batch_size = tf.shape(inputs)[0]
    if archi == 'dcgan':
        height = output_shape[0] / 2**num_layers
        width = output_shape[1] / 2**num_layers
    elif archi == 'dcgan_mod':
        height = output_shape[0] / 2**(num_layers - 1)
        width = output_shape[1] / 2**(num_layers - 1)

    h0 = ops.linear(opts,
                    inputs,
                    num_units * ceil(height) * ceil(width),
                    scope='hid0/lin')
    h0 = tf.reshape(h0, [-1, ceil(height), ceil(width), num_units])
    h0 = tf.nn.relu(h0)
    layer_x = h0
    for i in range(num_layers - 1):
        scale = 2**(i + 1)
        _out_shape = [
            batch_size,
            ceil(height * scale),
            ceil(width * scale),
            int(num_units / scale)
        ]
        layer_x = ops.deconv2d(opts,
                               layer_x,
                               _out_shape,
                               scope='hid%d/deconv' % i)
        if batch_norm:
            layer_x = ops.batch_norm(opts,
                                     layer_x,
                                     is_training,
                                     reuse,
                                     scope='hid%d/bn' % i)
        layer_x = tf.nn.relu(layer_x)
    _out_shape = [batch_size] + list(output_shape)
    if archi == 'dcgan':
        last_h = ops.deconv2d(opts,
                              layer_x,
                              _out_shape,
                              scope='hid_final/deconv')
    elif archi == 'dcgan_mod':
        last_h = ops.deconv2d(opts,
                              layer_x,
                              _out_shape,
                              d_h=1,
                              d_w=1,
                              scope='hid_final/deconv')
    if opts['input_normalize_sym']:
        return tf.nn.tanh(last_h), last_h
    else:
        return tf.nn.sigmoid(last_h), last_h
Пример #29
0
def dcgan_decoder(opts, noise, is_training=False, reuse=False):
    output_shape = datashapes[opts['dataset']]
    num_units = opts['k_g_num_filters']
    batch_size = tf.shape(noise)[0]
    num_layers = opts['k_g_num_layers']
    if opts['k_g_arch'] == 'dcgan':
        height = output_shape[0] / 2**num_layers
        width = output_shape[1] / 2**num_layers
    elif opts['k_g_arch'] == 'dcgan_mod':
        height = output_shape[0] / 2**(num_layers - 1)
        width = output_shape[1] / 2**(num_layers - 1)

    h0 = ops.linear(opts,
                    noise,
                    num_units * ceil(height) * ceil(width),
                    scope='h0_lin')
    h0 = tf.reshape(h0, [-1, ceil(height), ceil(width), num_units])
    h0 = tf.nn.relu(h0)
    layer_x = h0
    for i in range(num_layers - 1):
        scale = 2**(i + 1)
        _out_shape = [
            batch_size,
            ceil(height * scale),
            ceil(width * scale),
            int(num_units / scale)
        ]
        layer_x = ops.deconv2d(opts,
                               layer_x,
                               _out_shape,
                               scope='h%d_deconv' % i)
        if opts['batch_norm']:
            layer_x = ops.batch_norm(opts,
                                     layer_x,
                                     is_training,
                                     reuse,
                                     scope='h%d_bn' % i)
        layer_x = tf.nn.relu(layer_x)
    _out_shape = [batch_size] + list(output_shape)
    if opts['k_g_arch'] == 'dcgan':
        last_h = ops.deconv2d(opts, layer_x, _out_shape, scope='hfinal_deconv')
    elif opts['k_g_arch'] == 'dcgan_mod':
        last_h = ops.deconv2d(opts,
                              layer_x,
                              _out_shape,
                              d_h=1,
                              d_w=1,
                              scope='hfinal_deconv')
    if opts['input_normalize_sym']:
        return tf.nn.tanh(last_h), last_h
    else:
        return tf.nn.sigmoid(last_h), last_h
Пример #30
0
    def get_generator_net(self, name):
        batch_size = self.get_batch_size()
        with tf.variable_scope('generator') as scope:

            z = tf.concat([self.noise, self.labels], 1)
            net = ops.linear(
                z,
                output_size=1024,
                scope='gen_fully1',
                weights_initializer=self.get_weights_initializer())
            net = ops.batch_norm(net, self.is_training, scope='gen_bn1')
            net = ops.lrelu(net)

            net = tf.concat([net, self.labels], 1)
            # Wonder what this will be doing if the size is not divisible by 4
            h, w = self.data.shape[0] // 4, self.data.shape[1] // 4
            net = ops.linear(
                net,
                output_size=h * w * 2 * batch_size,
                scope='gen_fully2',
                weights_initializer=self.get_weights_initializer())
            net = ops.batch_norm(net, self.is_training, scope='gen_bn2')
            net = ops.lrelu(net)

            net = tf.reshape(net, [batch_size, h, w, 2 * batch_size])
            yb = tf.reshape(self.labels, shape=[batch_size, 1, 1, -1])
            net = ops.conv_cond_concat(net, yb)

            h, w = self.data.shape[0] // 2, self.data.shape[1] // 2
            net = ops.deconv2d(
                net, [batch_size, h, w, 2 * batch_size],
                4,
                4,
                2,
                2,
                scope='gen_deconv1',
                weights_initializer=self.get_weights_initializer())
            net = ops.batch_norm(net, self.is_training, scope='gen_bn3')
            net = ops.lrelu(net)

            net = ops.conv_cond_concat(net, yb)
            out = ops.deconv2d(net,
                               self.images.shape,
                               4,
                               4,
                               2,
                               2,
                               scope='gen_deconv2',
                               weights_initializer=xavier_initializer())

            return tf.nn.sigmoid(out, name=name), out
Пример #31
0
 def generator(self, z, reuse=None):
     with tf.variable_scope('generator', reuse=reuse):
         # project `z` and reshape
         h0 = tf.reshape(linear(z, self.gf_dim * 8 * 4 * 4, 'g_h0_lin'),
                         [-1, 4, 4, self.gf_dim * 8])
         h0 = tf.nn.relu(self.g_bn0(h0))
         h1 = deconv2d(h0, [self.batch_size, 8, 8, self.gf_dim * 4],
                       name='g_h1')
         h1 = tf.nn.relu(self.g_bn1(h1))
         h2 = deconv2d(h1, [self.batch_size, 16, 16, self.gf_dim * 2],
                       name='g_h2')
         h2 = tf.nn.relu(self.g_bn2(h2))
         h3 = deconv2d(h2, [self.batch_size, 32, 32, 3], name='g_h3')
         return tf.nn.tanh(h3)
Пример #32
0
    def __call__(self, input, reuse=False):
        with tf.variable_scope(self.name):
            if reuse:
                tf.get_variable_scope().reuse_variables()
            else:
                assert tf.get_variable_scope().reuse is False

            act = tf.nn.relu
            _, h, w, c = input.shape.as_list()

            # Justin Johnson's model from https://github.com/jcjohnson/fast-neural-style/
            # The network with 9 blocks consists of: c7s1-32, d64, d128, R128, R128, R128,
            # R128, R128, R128, R128, R128, R128, u64, u32, c7s1-3
            # input shape == (32 x 32 x 128)
            c1 = act(
                instance_norm(conv2d(input, c, 7, 1, name='g_e1_c'),
                              'g_e1_bn'))
            # c1 shape == (32 x 32 x 128)
            c2 = act(
                instance_norm(conv2d(c1, c * 2, 3, 2, name='g_e2_c'),
                              'g_e2_bn'))
            # c2 shape == (16 x 16 x 256)
            c3 = act(
                instance_norm(conv2d(c2, c * 4, 3, 2, name='g_e3_c'),
                              'g_e3_bn'))
            # c3 shape == (8 x 8 x 512)

            # define G network with 9 resnet blocks
            r1 = residule_block(c3, c * 4, name='g_r1')
            r2 = residule_block(r1, c * 4, name='g_r2')
            r3 = residule_block(r2, c * 4, name='g_r3')
            r4 = residule_block(r3, c * 4, name='g_r4')
            r5 = residule_block(r4, c * 4, name='g_r5')
            r6 = residule_block(r5, c * 4, name='g_r6')
            r7 = residule_block(r6, c * 4, name='g_r7')
            r8 = residule_block(r7, c * 4, name='g_r8')
            r9 = residule_block(r8, c * 4, name='g_r9')

            d1 = act(
                instance_norm(deconv2d(r9, c * 2, 3, 2, name='g_d1_dc'),
                              'g_d1_bn'))
            # d1 shape == (16 x 16 x 256)
            d2 = act(
                instance_norm(deconv2d(d1, c, 3, 2, name='g_d2_dc'),
                              'g_d2_bn'))
            # d1 shape == (32 x 32 x 128)
            output = tf.nn.tanh(deconv2d(d2, c, 7, 1, name='g_output_dc'))
            # output shape == (32, 32, 128)

            return tf.identity(output, name=self.name + '_output')
Пример #33
0
    def __call__(self, is_ref):
        """
        Builds the graph propagating from z to x.
        On the first pass, should make variables.
        All variables with names beginning with "g_" will be used for the
        generator network.
        """
        dcgan = self.dcgan
        assert isinstance(dcgan, DCGAN)

        def make_z(shape, minval, maxval, name, dtype):
            assert dtype is tf.float32
            if is_ref:
                with tf.variable_scope(name) as scope:
                    z = tf.get_variable("z", shape,
                                initializer=tf.random_uniform_initializer(minval, maxval),
                                trainable=False)
                    if z.device != "/device:GPU:0":
                        print "z.device is " + str(z.device)
                        assert False
            else:
                z = tf.random_uniform(shape,
                                   minval=minval, maxval=maxval,
                                   name=name, dtype=tf.float32)
            return z


        z = make_z([dcgan.batch_size, dcgan.z_dim],
                                   minval=-1., maxval=1.,
                                   name='z', dtype=tf.float32)
        zs = [z]

        if hasattr(dcgan, 'generator_built'):
            tf.get_variable_scope().reuse_variables()
            make_vars = False
        else:
            make_vars = True


        def reuse_wrapper(packed, *args):
            """
            A wrapper that processes the output of TensorFlow calls differently
            based on whether we are reusing Variables or not.

            Parameters
            ----------
            packed: The output of the TensorFlow call
            args: List of names

            If make_vars is True, then `packed` will contain all the new Variables,
            and we need to assign them to dcgan.foo fields.
            If make_vars is False, then `packed` is just the output tensor, and we
            just return that.
            """
            if make_vars:
                assert len(packed) == len(args) + 1, len(packed)
                out = packed[0]
            else:
                out = packed
            return out

        assert not dcgan.y_dim
        # project `z` and reshape
        z_ = reuse_wrapper(linear(z, dcgan.gf_dim*8*4*4, 'g_h0_lin', with_w=make_vars), 'h0_w', 'h0_b')

        h0 = tf.reshape(z_, [-1, 4, 4, dcgan.gf_dim * 8])
        h0 = tf.nn.relu(dcgan.vbn(h0, "g_vbn_0"))
        h0z = make_z([dcgan.batch_size, 4, 4, dcgan.gf_dim],
                                   minval=-1., maxval=1.,
                                   name='h0z', dtype=tf.float32)
        zs.append(h0z)
        h0 = tf.concat(3, [h0, h0z])

        h1 = reuse_wrapper(deconv2d(h0,
            [dcgan.batch_size, 8, 8, dcgan.gf_dim*4], name='g_h1', with_w=make_vars),
            'h1_w', 'h1_b')
        h1 = tf.nn.relu(dcgan.vbn(h1, "g_vbn_1"))
        h1z = make_z([dcgan.batch_size, 8, 8, dcgan.gf_dim],
                                   minval=-1., maxval=1.,
                                   name='h1z', dtype=tf.float32)
        zs.append(h1z)
        h1 = tf.concat(3, [h1, h1z])


        h2 = reuse_wrapper(deconv2d(h1,
            [dcgan.batch_size, 16, 16, dcgan.gf_dim*2], name='g_h2', with_w=make_vars),
            'h2_w', 'h2_b')
        h2 = tf.nn.relu(dcgan.vbn(h2, "g_vbn_2"))
        half = dcgan.gf_dim // 2
        if half == 0:
            half = 1
        h2z = make_z([dcgan.batch_size, 16, 16, half],
                                   minval=-1., maxval=1.,
                                   name='h2z', dtype=tf.float32)
        zs.append(h2z)
        h2 = tf.concat(3, [h2, h2z])


        h3 = reuse_wrapper(deconv2d(h2,
            [dcgan.batch_size, 32, 32, dcgan.gf_dim*1], name='g_h3', with_w=make_vars),
            'h3_w', 'h3_b')
        if make_vars:
            h3_name = "h3_relu_first"
        else:
            h3_name = "h3_relu_reuse"
        h3 = tf.nn.relu(dcgan.vbn(h3, "g_vbn_3"), name=h3_name)
        print "h3 shape: ", h3.get_shape()

        quarter = dcgan.gf_dim // 4
        if quarter == 0:
            quarter = 1
        h3z = make_z([dcgan.batch_size, 32, 32, quarter],
                                   minval=-1., maxval=1.,
                                   name='h3z', dtype=tf.float32)
        zs.append(h3z)
        h3 = tf.concat(3, [h3, h3z])

        assert dcgan.image_shape[0] == 128

        h4 = reuse_wrapper(deconv2d(h3,
                [dcgan.batch_size, 64, 64, dcgan.gf_dim*1],
                name='g_h4', with_w=make_vars),
            'h4_w', 'h4_b')
        h4 = tf.nn.relu(dcgan.vbn(h4, "g_vbn_4"))
        print "h4 shape: ", h4.get_shape()

        eighth = dcgan.gf_dim // 8
        if eighth == 0:
            eighth = 1
        h4z = make_z([dcgan.batch_size, 64, 64, eighth],
                                   minval=-1., maxval=1.,
                                   name='h4z', dtype=tf.float32)
        zs.append(h4z)
        h4 = tf.concat(3, [h4, h4z])

        h5 = reuse_wrapper(deconv2d(h4,
                [dcgan.batch_size, 128, 128, dcgan.gf_dim * 1],
                name='g_h5', with_w=make_vars),
            'h5_w', 'h5_b')
        h5 = tf.nn.relu(dcgan.vbn(h5, "g_vbn_5"))
        print "h5 shape: ", h5.get_shape()

        sixteenth = dcgan.gf_dim // 16
        if sixteenth == 0:
            sixteenth = 1
        h5z = make_z([dcgan.batch_size, 128, 128, sixteenth],
                                   minval=-1., maxval=1.,
                                   name='h5z', dtype=tf.float32)
        zs.append(h5z)
        h5 = tf.concat(3, [h5, h5z])

        h6 = reuse_wrapper(deconv2d(h5,
                [dcgan.batch_size, 128, 128, 3],
                d_w = 1, d_h = 1,
                name='g_h6', with_w=make_vars,
                init_bias=dcgan.out_init_b,
                stddev=dcgan.out_stddev),
            'h6_w', 'h6_b')
        print 'h6 shape: ', h6.get_shape()

        out = tf.nn.tanh(h6)

        dcgan.generator_built = True
        return out, zs