예제 #1
0
 def discriminator(self, images, image_size, reuse=False):
     image_size /= 64
     with tf.variable_scope('discriminator', reuse=reuse):
         gd_h0 = lrelu(conv2d(images, 64, name="d_gd_h0_conv"))
         gd_h1 = lrelu(conv2d(gd_h0, 128, name='d_gd_h1_conv'))
         gd_h2 = lrelu(conv2d(gd_h1, 256, name='d_gd_h2_conv'))
         gd_h3 = lrelu(conv2d(gd_h2, 512, name='d_gd_h3_conv'))
         gd_h4 = lrelu(conv2d(gd_h3, 512, name='d_gd_h4_conv'))
         gd_h5 = lrelu(conv2d(gd_h4, 512, name='d_gd_h5_conv'))
         gd_h = linear(tf.reshape(
             gd_h5, [self.batch_size, int(512 * image_size * image_size)]), 64 * image_size * image_size, 'd_gd_linear')
         return linear(gd_h, 1, 'd_linear')
예제 #2
0
 def generator(self, images):
     with tf.variable_scope("generator"):
         g_h0 = tf.nn.relu(conv2d(images, 16, name='g_encode_0'))
         g_h1 = tf.nn.relu(conv2d(g_h0, 32, name='g_encode_1'))
         g_h2 = tf.nn.relu(conv2d(g_h1, 64, name='g_encode_2'))
         g_flat = tf.reshape(g_h2, [self.batch_size, -1])
         g_encode = linear(g_flat, 128, 'g_encode')
         g_decode = linear(g_encode, 512 * 4 * 4, 'g_h0')
         g_h3 = tf.nn.relu(tf.reshape(g_decode, [self.batch_size, 4, 4, 512]))
         g_h4 = tf.nn.relu(conv2d_transpose(g_h3, [self.batch_size, 8, 8, 256], name='g_h1'))
         g_h5 = tf.nn.relu(conv2d_transpose(g_h4, [self.batch_size, 16, 16, 128], name='g_h2'))
         g_h6 = tf.nn.relu(conv2d_transpose(g_h5, [self.batch_size, 32, 32, 64], name='g_h3'))
         g_h7 = conv2d_transpose(g_h6, [self.batch_size, 64, 64, 3], name='g_h4')
         return tf.nn.tanh(g_h7)
예제 #3
0
def Gen_Adap_Weights(aspect_ratio, shape, name="adap_conv_layer"):
    with tf.variable_scope(name):
        # Number of units in the final layer (for convolution)
        n_units = shape[0] * shape[1] * shape[2] * shape[3] + shape[3]

        adap_weights = linear(aspect_ratio, 16, 'l_1')
        adap_weights = linear(adap_weights, 32, 'l_2')
        adap_weights = linear(adap_weights, n_units, 'l_3')

        bias = adap_weights[:, -shape[3]:]
        bias = tf.reshape(bias, [-1])

        weights = tf.reshape(adap_weights[:, :-shape[3]], shape)

        return weights, bias
예제 #4
0
def began_encoder(num_units,
                  num_layers,
                  output_dim,
                  inputs,
                  opts,
                  is_training=False,
                  reuse=False):
    layer_x = inputs
    layer_x = ops.conv2d(opts, layer_x, num_units, scope='hfirst_conv')
    for i in range(num_layers):
        if i % 3 < 2:
            if i != num_layers - 2:
                ii = i - int(i / 3)
                scale = (ii + 1 - int(ii / 2))
            else:
                ii = i - int(i / 3)
                scale = (ii - int((ii - 1) / 2))
            layer_x = ops.conv2d(opts,
                                 layer_x,
                                 num_units * scale,
                                 d_h=1,
                                 d_w=1,
                                 scope='_h{}_conv'.format(i))
            layer_x = tf.nn.relu(layer_x)
        else:
            if i != num_layers - 1:
                layer_x = ops.downsample(layer_x,
                                         scope='h{}_maxpool'.format(i),
                                         reuse=reuse)
    # Tensor should be [N, 8, 8, filters] at this point
    layer_x = ops.linear(opts, layer_x, output_dim, scope='out_lin')

    return layer_x
예제 #5
0
    def eve_model(self, data_input=None, reuse=False):

        ####### Eve's network #######

        with tf.variable_scope("eve") as scope:
            if reuse:
                scope.reuse_variables()

            self.eve_input = data_input

            print(self.eve_input)

            self.eve0 = lrelu(
                conv2d(self.eve_input, self.output_size, name='eve_h0_conv'))
            self.eve_bn1 = batch_norm(name='eve_bn1')
            self.eve1 = lrelu(
                self.eve_bn1(
                    conv2d(self.eve0, self.output_size * 2,
                           name='eve_h1_conv')))
            self.eve_bn2 = batch_norm(name='eve_bn2')
            self.eve2 = lrelu(
                self.eve_bn2(
                    conv2d(self.eve1, self.output_size * 4,
                           name='eve_h2_conv')))
            self.eve_bn3 = batch_norm(name='eve_bn3')
            self.eve3 = lrelu(
                self.eve_bn3(
                    conv2d(self.eve2, self.output_size * 8,
                           name='eve_h3_conv')))
            self.eve4 = linear(tf.reshape(self.eve0, [self.batch_size, -1]), 1,
                               'eve_h3_lin')
            return self.eve4, self.eve4
예제 #6
0
    def bob_model(self, data_input_image=None):

        ####### Bob's network #######

        # bob's input
        self.bob_input = data_input_image
        print(self.bob_input)

        self.bob0 = lrelu(
            conv2d(self.bob_input, self.output_size, name='bob_h0_conv'))
        self.bob_bn1 = batch_norm(name='bob_bn1')
        self.bob1 = lrelu(
            self.bob_bn1(
                conv2d(self.bob0, self.output_size * 2, name='bob_h1_conv')))
        self.bob_bn2 = batch_norm(name='bob_bn2')
        self.bob2 = lrelu(
            self.bob_bn2(
                conv2d(self.bob1, self.output_size * 4, name='bob_h2_conv')))
        self.bob_bn3 = batch_norm(name='bob_bn3')
        self.bob3 = lrelu(
            self.bob_bn3(
                conv2d(self.bob2, self.output_size * 8, name='bob_h3_conv')))
        self.bob4 = linear(tf.reshape(self.bob3, [self.batch_size, -1]),
                           self.msg_len, 'bob_h3_lin')
        return tf.nn.tanh(self.bob4)
예제 #7
0
    def tower(bn, suffix):
        assert not self.y_dim
        print "\ttower "+suffix
        h0 = lrelu(bn(conv2d(noisy_image, self.df_dim, name='d_h0_conv' + suffix, d_h=2, d_w=2,
            k_w=3, k_h=3), "d_bn_0" + suffix))
        print "\th0 ", h0.get_shape()
        h1 = lrelu(bn(conv2d(h0, self.df_dim * 2, name='d_h1_conv' + suffix, d_h=2, d_w=2,
            k_w=3, k_h=3), "d_bn_1" + suffix))
        print "\th1 ", h1.get_shape()
        h2 = lrelu(bn(conv2d(h1, self.df_dim * 4, name='d_h2_conv' + suffix, d_h=2, d_w=2,
            k_w=3, k_h=3), "d_bn_2" + suffix))
        print "\th2 ", h2.get_shape()

        h3 = lrelu(bn(conv2d(h2, self.df_dim*4, name='d_h3_conv' + suffix, d_h=1, d_w=1,
            k_w=3, k_h=3), "d_bn_3" + suffix))
        print "\th3 ", h3.get_shape()
        h4 = lrelu(bn(conv2d(h3, self.df_dim*4, name='d_h4_conv' + suffix, d_h=1, d_w=1,
            k_w=3, k_h=3), "d_bn_4" + suffix))
        print "\th4 ", h4.get_shape()
        h5 = lrelu(bn(conv2d(h4, self.df_dim*8, name='d_h5_conv' + suffix, d_h=2, d_w=2,
            k_w=3, k_h=3), "d_bn_5" + suffix))
        print "\th5 ", h5.get_shape()

        h6 = lrelu(bn(conv2d(h5, self.df_dim*8, name='d_h6_conv' + suffix,
            k_w=3, k_h=3), "d_bn_6" + suffix))
        print "\th6 ", h6.get_shape()
        # return tf.reduce_mean(h6, [1, 2])
        h6_reshaped = tf.reshape(h6, [batch_size, -1])
        print '\th6_reshaped: ', h6_reshaped.get_shape()

        h7 = lrelu(bn(linear(h6_reshaped, self.df_dim * 40, scope="d_h7" + suffix), "d_bn_7" + suffix))

        return h7
예제 #8
0
def generator(input_z,
              t_txt=None,
              is_train=True,
              reuse=False,
              batch_size=batch_size):

    g_bn0 = ops.batch_norm(name='g_bn0')
    g_bn1 = ops.batch_norm(name='g_bn1')
    g_bn2 = ops.batch_norm(name='g_bn2')
    g_bn3 = ops.batch_norm(name='g_bn3')

    s = image_size  # output image size [64]
    s2, s4, s8, s16 = int(s / 2), int(s / 4), int(s / 8), int(s / 16)
    gf_dim = 128

    with tf.variable_scope("generator", reuse=reuse):
        tl.layers.set_name_reuse(reuse)

        z_concat = tf.concat([input_z, t_txt], 1)
        z_ = ops.linear(z_concat, gf_dim * 8 * s16 * s16, 'g_h0_lin')
        h0 = tf.reshape(z_, [-1, s16, s16, gf_dim * 8])
        h0 = tf.nn.relu(g_bn0(h0))
        h1 = ops.deconv2d(h0, [batch_size, s8, s8, gf_dim * 4], name='g_h1')
        h1 = tf.nn.relu(g_bn1(h1))

        h2 = ops.deconv2d(h1, [batch_size, s4, s4, gf_dim * 2], name='g_h2')
        h2 = tf.nn.relu(g_bn2(h2))

        h3 = ops.deconv2d(h2, [batch_size, s2, s2, gf_dim * 1], name='g_h3')
        h3 = tf.nn.relu(g_bn3(h3))

        h4 = ops.deconv2d(h3, [batch_size, s, s, 3], name='g_h4')

    return h4, tf.tanh(h4)
예제 #9
0
def resnet_generator(z, labels):
    with tf.variable_scope('generator'):
        embedding_map = tf.get_variable(
            name='embedding_map',
            shape=[1000, 100],
            initializer=tf.contrib.layers.xavier_initializer())
        label_embedding = tf.nn.embedding_lookup(embedding_map, labels)
        noise_plus_labels = tf.concat([z, label_embedding], 1)
        linear = ops.linear(noise_plus_labels, G_DIM * 8 * 4 * 4, use_sn=True)
        linear = tf.reshape(linear, [-1, G_DIM * 8, 4, 4])

        res1 = resnet_blocks.class_conditional_generator_block(
            linear, labels, G_DIM * 8, 1000, True, "res1")  # 8x8
        res2 = resnet_blocks.class_conditional_generator_block(
            res1, labels, G_DIM * 4, 1000, True, "res2")  # 16x16
        nl = non_local.sn_non_local_block_sim(res2, None, name='nl')
        res3 = resnet_blocks.class_conditional_generator_block(
            nl, labels, G_DIM * 2, 1000, True, "res3")  # 32x32
        res4 = resnet_blocks.class_conditional_generator_block(
            res3, labels, G_DIM, 1000, True, "res4")  # 64x64
        res4 = tf.layers.batch_normalization(res4, training=True)
        res4 = tf.nn.relu(res4)

        conv = ops.conv2d(res4, 3, 3, 3, 1, 1, name="conv", use_sn=True)
        conv = tf.nn.tanh(conv)

        return conv
def relation_network(state, mask, seed=123):
    # Placeholder layer sizes
    d_e = [64, 64, 64]
    d_o = [128, 128]

    # Build graph:
    initial_elems = state

    # Embedding Part
    for i, layer in enumerate(d_e):
        el = initial_elems
        el, _ = relation_layer(layer, el, mask, name='l' + str(i))

    c = mask_and_pool(el, mask)  # pool to get context for next block

    # Fully connected part
    fc = c
    for i, layer in enumerate(d_o):
        fc, _, _ = linear(fc, layer, name='lO_' + str(i))

    # Output
    embedding = fc

    # Returns the network output and parameters
    return embedding, []
예제 #11
0
    def generator(self, z):
        with tf.variable_scope('generator') as scope:
            # 从输出大小推各步尺寸
            o_h0, o_w0 = self.cfg.output_height, self.cfg.output_width
            o_h1, o_w1 = get_conved_size(o_h0, 2), get_conved_size(o_w0, 2)
            o_h2, o_w2 = get_conved_size(o_h1, 2), get_conved_size(o_w1, 2)
            o_h3, o_w3 = get_conved_size(o_h2, 2), get_conved_size(o_w2, 2)
            o_h4, o_w4 = get_conved_size(o_h3, 2), get_conved_size(o_w3, 2)

            # 把relu过程分开是因为deconv2d过程需要权重共享
            z_ = linear(z, self.cfg.gf_dim * 8 * o_h4 * o_w4, scope='g_h0_lin')
            h0 = tf.reshape(z_, [-1, o_h4, o_w4, self.cfg.gf_dim * 8])
            h0 = tf.nn.relu(self.bn_g0(h0, train=True))
            h1 = deconv2d(
                h0, [self.cfg.batch_size, o_h3, o_w3, self.cfg.gf_dim * 4],
                scope='g_h1')
            h1 = tf.nn.relu(self.bn_g1(h1, train=True))
            h2 = deconv2d(
                h1, [self.cfg.batch_size, o_h2, o_w2, self.cfg.gf_dim * 2],
                scope='g_h2')
            h2 = tf.nn.relu(self.bn_g2(h2, train=True))
            h3 = deconv2d(
                h2, [self.cfg.batch_size, o_h1, o_w1, self.cfg.gf_dim * 1],
                scope='g_h3')
            h3 = tf.nn.relu(self.bn_g3(h3, train=True))
            h4 = deconv2d(h3, [self.cfg.batch_size, o_h0, o_w0, self.c_dim],
                          scope='g_h4')

            return tf.nn.tanh(h4)
예제 #12
0
    def sampler(self, z):
        # 与Generator不同在于: 1)设置了变量重用 2)在batch_norm时设置train=False
        with tf.variable_scope('generator') as scope:
            scope.reuse_variables()
            o_h0, o_w0 = self.cfg.output_height, self.cfg.output_width
            o_h1, o_w1 = get_conved_size(o_h0, 2), get_conved_size(o_w0, 2)
            o_h2, o_w2 = get_conved_size(o_h1, 2), get_conved_size(o_w1, 2)
            o_h3, o_w3 = get_conved_size(o_h2, 2), get_conved_size(o_w2, 2)
            o_h4, o_w4 = get_conved_size(o_h3, 2), get_conved_size(o_w3, 2)

            z_ = linear(z, self.cfg.gf_dim * 8 * o_h4 * o_w4, scope='g_h0_lin')
            h0 = tf.reshape(z_, [-1, o_h4, o_w4, self.cfg.gf_dim * 8])
            h0 = tf.nn.relu(self.bn_g0(h0, train=False))
            h1 = deconv2d(
                h0, [self.cfg.batch_size, o_h3, o_w3, self.cfg.gf_dim * 4],
                scope='g_h1')
            h1 = tf.nn.relu(self.bn_g1(h1, train=False))
            h2 = deconv2d(
                h1, [self.cfg.batch_size, o_h2, o_w2, self.cfg.gf_dim * 2],
                scope='g_h2')
            h2 = tf.nn.relu(self.bn_g2(h2, train=False))
            h3 = deconv2d(
                h2, [self.cfg.batch_size, o_h1, o_w1, self.cfg.gf_dim * 1],
                scope='g_h3')
            h3 = tf.nn.relu(self.bn_g3(h3, train=False))
            h4 = deconv2d(h3, [self.cfg.batch_size, o_h0, o_w0, self.c_dim],
                          scope='g_h4')

            return tf.nn.tanh(h4)
예제 #13
0
def generator(z,trainable=True, reuse=tf.AUTO_REUSE):
    #z = tf.reshape(z,(-1,1,1,n_dim))
    with tf.variable_scope("generator", reuse=reuse):
        ch = 1024
        z = ops.linear(z,ch*4*4,scope='g_h0')
        z = tf.reshape(z,(-1,4,4,ch))#4*4*1024
        print(z)
        
        z = risidual_up_block(z,ch,trainable,scope='deconv0')#8*8*1024
        print(z)
        
        z = risidual_up_block(z,ch//2,trainable,scope='deconv1')#16*16*512
        print(z)
        
        z = risidual_up_block(z,ch//4,trainable,scope='deconv2')#32*32*256
        print(z)
        
        z = risidual_up_block(z,ch//8,trainable,scope='deconv3')#64*64*128
        z = attention(z,z.shape[-1])
        print(z)
        
        z = risidual_up_block(z,ch//16,trainable,scope='deconv4')#128*128*64
        print(z)
        
        z = risidual_up_block(z,ch//32,trainable,scope='deconv5')#256*256*64
        print(z)
        z = tf.layers.batch_normalization(z,training=trainable)
        z = tf.nn.relu(z)
        z = ops.snconv2d(z,channel,3,3,1,1,name='last_layer')
        z = tf.nn.tanh(z)
        print(z)
    return z
예제 #14
0
    def generator(self, z_enc, train):
        with tf.variable_scope('gan'):
            base_filters = self.d_size
            h0 = ops.linear(z_enc[:, 0:(self.z_size - 1)],
                            self.z_size - 1,
                            4 * 4 * 4 * base_filters,
                            scope='g_f0')
            h0 = tf.reshape(h0, [self.batch_size, 4, 4, 4, base_filters])
            h0 = tf.nn.relu(self.g_bn0(h0, train))
            h1 = ops.deconv3d(h0, [self.batch_size, 8, 8, 8, base_filters / 2],
                              name='g_h1')
            h1 = tf.nn.relu(self.g_bn1(h1, train))
            h2 = ops.deconv3d(h1,
                              [self.batch_size, 16, 16, 16, base_filters / 4],
                              name='g_h2')
            h2 = tf.nn.relu(self.g_bn2(h2, train))
            h3 = ops.deconv3d(h2, [self.batch_size, 32, 32, 32, 1],
                              name='g_h3')
            h3 = tf.nn.sigmoid(h3)
            self.voxels = tf.reshape(h3, [64, 32, 32, 32])
            v = z_enc[:, self.z_size - 1]

            rendered_imgs = []
            for i in xrange(self.batch_size):
                img = ops.project(
                    ops.transform_volume(self.voxels[i], ops.rot_matrix(v[i])),
                    self.tau)
                rendered_imgs.append(img)

            self.final_imgs = tf.reshape(tf.pack(rendered_imgs),
                                         [64, 32, 32, 1])
        return self.final_imgs
예제 #15
0
    def discriminator(self, image, is_training):

        # h0 = lrelu(conv2d(image))
        h0 = lrelu(conv2d(image, self.df_dim, self.data_format,
                          name='h0_conv'))

        chain = h0
        for h in range(1, self.nd_layers):
            # h1 = lrelu(BN(conv2d(h0)))
            chain = conv2d(chain,
                           self.df_dim * (2**h),
                           self.data_format,
                           name='h%i_conv' % h)
            chain = tf.contrib.layers.batch_norm(chain,
                                                 is_training=is_training,
                                                 scope='bn%i' % h,
                                                 **self.batchnorm_kwargs)
            chain = lrelu(chain)

        # h1 = linear(reshape(h0))
        hn = linear(tf.reshape(chain, [self.batch_size, -1]),
                    1,
                    'h%i_lin' % self.nd_layers,
                    transpose_b=self.transpose_b)

        return tf.nn.sigmoid(hn), hn
예제 #16
0
    def discriminator(self, image, reuse=False):
        with tf.variable_scope("discriminator") as scope:
            # image is 256 x 256 x (input_c_dim + output_c_dim)
            if reuse:
                tf.get_variable_scope().reuse_variables()
                scope.reuse_variables()
            else:
                assert tf.get_variable_scope().reuse == False

            h0 = lrelu(conv2d(image, self.df_dim, name='d_h0_conv'))
            # h0 is (128 x 128 x self.df_dim)
            h1 = lrelu(
                layer_norm(conv2d(h0, self.df_dim * 2, name='d_h1_conv')))
            # h1 is (64 x 64 x self.df_dim*2)
            h2 = lrelu(
                layer_norm(conv2d(h1, self.df_dim * 4, name='d_h2_conv')))
            # h2 is (32x 32 x self.df_dim*4)
            h3 = lrelu(
                layer_norm(
                    conv2d(h2, self.df_dim * 8, d_h=1, d_w=1,
                           name='d_h3_conv')))
            # h3 is (16 x 16 x self.df_dim*8)
            h4 = linear(tf.reshape(h3, [self.batch_size, -1]), 1, 'd_h3_lin')

            return tf.nn.sigmoid(h4), h4
예제 #17
0
    def __call__(self, x, is_reuse=False, is_train=True):
        with tf.variable_scope('generator') as scope:
            if is_reuse:
                scope.reuse_variables()

            unit_size = self.img_size[0] // (2 ** self.layer_n)
            unit_n = self.smallest_hidden_unit_n * (2 ** (self.layer_n - 1))
            batch_size = int(x.shape[0])

            with tf.variable_scope('pre'):
                x = linear(x, unit_size * unit_size * unit_n)
                x = tf.reshape(x, (batch_size, unit_size, unit_size, unit_n))
                if self.is_bn:
                    x = batch_norm(x, is_train)
                x = tf.nn.relu(x)

            for i in range(self.layer_n):
                with tf.variable_scope('layer{}'.format(i)):
                    if i == self.layer_n - 1:
                        unit_n = self.img_dim
                    else:
                        unit_n = self.smallest_hidden_unit_n * (2 ** (self.layer_n - i - 2))
                    x_shape = x.get_shape().as_list()
                    x = tf.image.resize_bilinear(x, (x_shape[1] * 2, x_shape[2] * 2))
                    x = conv2d(x, unit_n, self.k_size, 1, 'SAME')
                    if i != self.layer_n - 1:
                        if self.is_bn:
                            x = batch_norm(x, is_train)
                        x = tf.nn.relu(x)
            x = tf.nn.tanh(x)

            return x
예제 #18
0
def dcgan_decoder(opts, noise, is_training=False, reuse=False):
    output_shape = datashapes[opts['dataset']]
    num_units = opts['g_num_filters']
    batch_size = tf.shape(noise)[0]
    num_layers = opts['g_num_layers']
    height = output_shape[0] // 2 ** (num_layers - 1)
    width = output_shape[1] // 2 ** (num_layers - 1)

    h0 = ops.linear(
        opts, noise, num_units * height * width, scope='h0_lin')
    h0 = tf.reshape(h0, [-1, height, width, num_units])
    h0 = tf.nn.relu(h0)
    layer_x = h0
    for i in range(num_layers - 1):
        scale = 2 ** (i + 1)
        _out_shape = [batch_size, height * scale,
                      width * scale, num_units // scale]
        layer_x = ops.deconv2d(opts, layer_x, _out_shape,
                               scope='h%d_deconv' % i)
        if opts['batch_norm']:
            layer_x = ops.batch_norm(opts, layer_x,
                                     is_training, reuse, scope='h%d_bn' % i)
        layer_x = tf.nn.relu(layer_x)
    _out_shape = [batch_size] + list(output_shape)

    last_h = ops.deconv2d(
        opts, layer_x, _out_shape, d_h=1, d_w=1, scope='hfinal_deconv')
    return tf.nn.sigmoid(last_h), last_h
예제 #19
0
    def __init__(self, config):
        self.config = config
        #TODO seperate out into functions
        with tf.variable_scope(config.name):
            config = self.config
            self.X = tf.placeholder(tf.float32,\
                    [None,config.timesteps,config.input_size])
            X = tf.unstack(self.X, config.timesteps, 1)
            self.y = tf.placeholder(tf.int32, [None])

            lstm_cell = rnn.BasicLSTMCell(config.hidden_size, forget_bias=1.0)
            rnn_outputs, states = rnn.static_rnn(lstm_cell,
                                                 X,
                                                 dtype=tf.float32)
            output = linear(rnn_outputs[-1], config.output_size)
            #prediction
            self._prediction = tf.nn.softmax(output)
            #loss
            self._loss = tf.reduce_mean(
                (tf.losses.sparse_softmax_cross_entropy(
                    self.y, self._prediction)))
            #optimizer
            #TODO can change optimizer
            optimizer = tf.train.MomentumOptimizer(\
                    learning_rate=config.learning_rate,\
                    momentum=config.momentum,\
                    use_nesterov=True)
            self._optimize = optimizer.minimize(self._loss)
예제 #20
0
    def generator(self, z):
        with tf.variable_scope("generator") as scope:
            self.z_, self.h0_w, self.h0_b = linear(z, self.gf_dim*8*4*4, 'g_h0_lin', with_w=True)

            # TODO: Nicer iteration pattern here. #readability
            hs = [None]
            hs[0] = tf.reshape(self.z_, [-1, 4, 4, self.gf_dim * 8])
            hs[0] = tf.nn.relu(self.g_bns[0](hs[0], self.training_bool))

            i = 1 # Iteration number.
            depth_mul = 8  # Depth decreases as spatial component increases.
            size = 8  # Size increases as depth decreases.

            # 4 convolutional layers as well..for 64x64
            while size < self.image_size:
                hs.append(None)
                name = 'g_h{}'.format(i)
                hs[i], _, _ = conv2d_transpose(hs[i-1],
                    [self.batch_size, size, size, self.gf_dim*depth_mul], name=name, with_w=True)
                hs[i] = tf.nn.relu(self.g_bns[i](hs[i], self.training_bool))

                i += 1
                depth_mul //= 2
                size *= 2

            hs.append(None)
            name = 'g_h{}'.format(i)
            hs[i], _, _ = conv2d_transpose(hs[i - 1],
                [self.batch_size, size, size, 3], name=name, with_w=True)

            return tf.nn.tanh(hs[i])
예제 #21
0
def discriminator(hparams, x, train, reuse):

    if reuse:
        tf.get_variable_scope().reuse_variables()

    d_bn1 = ops.batch_norm(name='d_bn1')
    d_bn2 = ops.batch_norm(name='d_bn2')
    d_bn3 = ops.batch_norm(name='d_bn3')

    h0 = ops.lrelu(ops.conv2d(x, hparams.df_dim, name='d_h0_conv'))

    h1 = ops.conv2d(h0, hparams.df_dim * 2, name='d_h1_conv')
    h1 = ops.lrelu(d_bn1(h1, train=train))

    h2 = ops.conv2d(h1, hparams.df_dim * 4, name='d_h2_conv')
    h2 = ops.lrelu(d_bn2(h2, train=train))

    h3 = ops.conv2d(h2, hparams.df_dim * 8, name='d_h3_conv')
    h3 = ops.lrelu(d_bn3(h3, train=train))

    h4 = ops.linear(tf.reshape(h3, [hparams.batch_size, -1]), 1, 'd_h3_lin')

    d_logit = h4
    d = tf.nn.sigmoid(d_logit)

    return d, d_logit
def deepmind_CNN(state, output_size=128, seed=123):
    w = {}
    #initializer = tf.contrib.layers.xavier_initializer()
    initializer = tf.truncated_normal_initializer(0, 0.1, seed=seed)
    activation_fn = tf.nn.relu
    
    state = tf.transpose(state, perm=[0, 2, 3, 1])

    state = tf.truediv(state, 255.0) #Should probably be 255, but 256 is more efficient(?)
    l1, w['l1_w'], w['l1_b'] = conv2d(state,
      32, [8, 8], [4, 4], initializer, activation_fn, 'NHWC', name='l1')
    l2, w['l2_w'], w['l2_b'] = conv2d(l1,
      64, [4, 4], [2, 2], initializer, activation_fn, 'NHWC', name='l2')
    l3, w['l3_w'], w['l3_b'] = conv2d(l2, 
      64, [3, 3], [1, 1], initializer, activation_fn, 'NHWC', name='l3')

    shape = l3.get_shape().as_list()
    l3_flat = tf.reshape(l3, [-1, reduce(lambda x, y: x * y, shape[1:])])

    #l1, w['l1_w'], w['l1_b'] = conv2d(state/255.,
    #    16, [8, 8], [4, 4], initializer, activation_fn, 'NHWC', name='l1')
    #l2, w['l2_w'], w['l2_b'] = conv2d(l1,
    #    32, [4, 4], [2, 2], initializer, activation_fn, 'NHWC', name='l2')

    #shape = l2.get_shape().as_list()
    #l2_flat = tf.reshape(l2, [-1, reduce(lambda x, y: x * y, shape[1:])])
      
    embedding, w['l4_w'], w['l4_b'] = linear(l3_flat, 128,
      activation_fn=tf.identity, name='value_hid')

    # Returns the network output, parameters
    return embedding, w.values()
예제 #23
0
    def discriminator(self, image, y=None, reuse=False):
        with tf.variable_scope("discriminator") as scope:
            if reuse:
                scope.reuse_variables()

            h0 = ops.lrelu(
                ops.conv2d(image, self.args.df_dim, name='d_h0_conv'))
            h1 = ops.lrelu(
                ops.bn_layer(ops.conv2d(h0,
                                        self.args.df_dim * 2,
                                        name='d_h1_conv'),
                             name="d_bn1"))
            h2 = ops.lrelu(
                ops.bn_layer(ops.conv2d(h1,
                                        self.args.df_dim * 4,
                                        name='d_h2_conv'),
                             name="d_bn2"))
            h3 = ops.lrelu(
                ops.bn_layer(ops.conv2d(h2,
                                        self.args.df_dim * 8,
                                        name='d_h3_conv'),
                             name="d_bn3"))
            h4 = ops.linear(tf.reshape(h3, [self.args.batch_size, -1]), 1,
                            'd_h4_lin')

            return tf.nn.sigmoid(h4), h4
예제 #24
0
 def new_gate(gate_name):
     return linear([output_list[-1], o_prev],
                   output_size=self.ctl_hidden_size,
                   bias=True,
                   stddev=self.init_range,
                   scope="%s_gate_%s" %
                   (gate_name, layer_idx))
예제 #25
0
 def discriminator(self, images, image_size, reuse=False):
     image_size /= 64
     with tf.variable_scope('discriminator', reuse=reuse):
         gd_h0 = lrelu(conv2d(images, 64, name="d_gd_h0_conv"))
         gd_h1 = lrelu(conv2d(gd_h0, 128, name='d_gd_h1_conv'))
         gd_h2 = lrelu(conv2d(gd_h1, 256, name='d_gd_h2_conv'))
         gd_h3 = lrelu(conv2d(gd_h2, 512, name='d_gd_h3_conv'))
         gd_h4 = lrelu(conv2d(gd_h3, 512, name='d_gd_h4_conv'))
         gd_h5 = lrelu(conv2d(gd_h4, 512, name='d_gd_h5_conv'))
         gd_h = linear(
             tf.reshape(
                 gd_h5,
                 [self.batch_size,
                  int(512 * image_size * image_size)]),
             64 * image_size * image_size, 'd_gd_linear')
         return linear(gd_h, 1, 'd_linear')
예제 #26
0
def generator3(z, dim=64, reuse=True, training=True):
    bn = partial(batch_norm, is_training=training)
    dconv_bn_relu = partial(dconv,
                            normalizer_fn=bn,
                            activation_fn=relu,
                            biases_initializer=None)
    fc_bn_relu = partial(fc,
                         normalizer_fn=bn,
                         activation_fn=relu,
                         biases_initializer=None)

    with tf.variable_scope('generator', reuse=reuse):
        y = ops.linear(z, 2 * 2 * dim * 8, 'gl1')  #with bias
        y = tf.reshape(y, [-1, 2, 2, dim * 8])
        y = tf.nn.relu(
            tf.layers.batch_normalization(y,
                                          training=training,
                                          momentum=0.9,
                                          epsilon=1e-5,
                                          scale=True))
        y = dconv_bn_relu(y, dim * 4, 5, 2)  #without bias
        y = dconv_bn_relu(y, dim * 2, 5, 2)
        y = dconv_bn_relu(y, dim * 1, 5, 2)
        img = tf.tanh(dconv(y, 1, 5, 2))

        return img
예제 #27
0
def dcgan_encoder(opts,
                  inputs,
                  num_layers,
                  num_units,
                  output_dim,
                  num_mixtures,
                  batch_norm=False,
                  reuse=False,
                  is_training=False):
    outputs = []
    for k in range(num_mixtures):
        layer_x = inputs
        for i in range(num_layers):
            scale = 2**(num_layers - i - 1)
            layer_x = ops.conv2d(opts,
                                 layer_x,
                                 int(num_units / scale),
                                 scope='mix{}/hid{}/conv'.format(k, i))
            if batch_norm:
                layer_x = ops.batch_norm(opts,
                                         layer_x,
                                         is_training,
                                         reuse,
                                         scope='mix{}/hid{}/bn'.format(k, i))
            layer_x = tf.nn.relu(layer_x)
        output = ops.linear(opts,
                            layer_x,
                            output_dim,
                            scope='mix{}/hid_final'.format(k))
        outputs.append(output)
    return outputs
예제 #28
0
def began_decoder(opts, noise, is_training=False, reuse=False):

    output_shape = datashapes[opts['dataset']]
    num_units = opts['g_num_filters']
    num_layers = opts['g_num_layers']
    batch_size = tf.shape(noise)[0]

    h0 = ops.linear(opts, noise, num_units * 8 * 8, scope='h0_lin')
    h0 = tf.reshape(h0, [-1, 8, 8, num_units])
    layer_x = h0
    for i in range(num_layers):
        if i % 3 < 2:
            # Don't change resolution
            layer_x = ops.conv2d(opts, layer_x, num_units,
                                 d_h=1, d_w=1, scope='h%d_conv' % i)
            layer_x = tf.nn.elu(layer_x)
        else:
            if i != num_layers - 1:
                # Upsampling by factor of 2 with NN
                scale = 2 ** (i / 3 + 1)
                layer_x = ops.upsample_nn(layer_x, [scale * 8, scale * 8],
                                          scope='h%d_upsample' % i, reuse=reuse)
                # Skip connection
                append = ops.upsample_nn(h0, [scale * 8, scale * 8],
                                          scope='h%d_skipup' % i, reuse=reuse)
                layer_x = tf.concat([layer_x, append], axis=3)

    last_h = ops.conv2d(opts, layer_x, output_shape[-1],
                        d_h=1, d_w=1, scope='hfinal_conv')
    if opts['input_normalize_sym']:
        return tf.nn.tanh(last_h), last_h
    else:
        return tf.nn.sigmoid(last_h), last_h
예제 #29
0
def run_minibatch(inpt, num_kernels=5, kernel_dim=3):
    """
    * Take the output of some intermediate layer of the discriminator.
    * Multiply it by a 3D tensor to produce a matrix (of size num_kernels x 
    kernel_dim in the code below).
    * Compute the L1-distance between rows in this matrix across all samples 
    in a batch, and then apply a negative exponential.
    * The minibatch features for a sample are then the sum of these 
    exponentiated distances.
    * Concatenate the original input to the minibatch layer (the output of 
    the previous discriminator layer) with the newly created minibatch 
    features, and pass this as input to the next layer of the discriminator.

    :param inpt: 
    :param num_kernels: 
    :param kernel_dim: 
    :return: 
    """
    x = ops.linear(inpt, num_kernels * kernel_dim, scope='minibatch',
                   stddev=0.02)
    activation = tf.reshape(x, (-1, num_kernels, kernel_dim))
    diffs = tf.expand_dims(activation, axis=3) - \
        tf.expand_dims(tf.transpose(activation, [1, 2, 0]), axis=0)
    abs_diffs = tf.reduce_sum(tf.abs(diffs), axis=2)
    minibatch_features = tf.reduce_sum(tf.exp(-abs_diffs), axis=2)
    return tf.concat(values=[inpt, minibatch_features], axis=1)
예제 #30
0
파일: models.py 프로젝트: nhandam/p3_wgan
    def _create_critic(self, x, reuse=False, train=True, name="critic"):
        with tf.variable_scope(name) as scope:
            if reuse:
                scope.reuse_variables()

            normalizer = partial(batch_norm, is_training=train)

            # residual blocks
            resamples = ["down", "down", None, None]
            h = x
            for i in range(4):
                h = residual_block(h,
                                   k=3,
                                   s=2,
                                   stddev=0.02,
                                   atv_input=i > 0,
                                   bn_input=i > 0,
                                   resample=resamples[i],
                                   output_dim=self.num_cri_feature_maps,
                                   bn=normalizer,
                                   activation_fn=tf.nn.relu,
                                   name="c_block{}".format(i))

            # mean pool layer
            h = normalizer(h, scope="c_mean_pool.bn")
            h = tf.nn.relu(h, name="c_mean_pool.relu")
            h = tf.reduce_mean(h, axis=[1, 2], name="c_mean_pool")

            # output layer
            c_out = linear(h, 1, scope='c_out.lin')
            c_out = self.critic_atv(c_out, name="c_out.atv")
            return c_out
예제 #31
0
def generator(z, is_training):
    # Firstly let's reshape input vector into 3-d tensor. 
    

    z_ = ops.linear(z, GENERATOR_DENSE_SIZE*4*4, 'g_h0_lin')
    h_in = tf.reshape(z_, [-1, 4, 4, GENERATOR_DENSE_SIZE])
    g_batch_norm_in=ops.batch_norm(name='g_batch_norm_in')
    h_in_bn = g_batch_norm_in(h_in,is_training)
    h_in_z=ops.lrelu(x=h_in_bn,name='g_lr_1')
        
    h_1=ops.deconv2d(h_in_z,output_shape=[BATCH_SIZE,8,8,512],k_h=5,k_w=5,d_h=2, d_w=2,name="g_deconv_1")
    g_batch_norm_1=ops.batch_norm(name='g_batch_norm_1')
    h_1_bn = g_batch_norm_1(h_1,is_training)
    h_1_z=ops.lrelu(x=h_1_bn,name='g_lr_2')
    h_1_z_dr=tf.nn.dropout(h_1_z,0.3)
    
    h_2=ops.deconv2d(h_1_z_dr,output_shape=[BATCH_SIZE,16,16,256],k_h=5,k_w=5,d_h=2, d_w=2,name="g_deconv_2")
    g_batch_norm_2=ops.batch_norm(name='g_batch_norm_2')
    h_2_bn = g_batch_norm_2(h_2,is_training)
    h_2_z=ops.lrelu(x=h_2_bn,name='g_lr_3')
    h_2_z_dr=tf.nn.dropout(h_2_z,0.3)
    
    h_3=ops.deconv2d(h_2_z_dr,output_shape=[BATCH_SIZE,32,32,128],k_h=5,k_w=5,d_h=2, d_w=2,name="g_deconv_3")
    g_batch_norm_3=ops.batch_norm(name='g_batch_norm_3')   
    h_3_bn = g_batch_norm_3(h_3,is_training)
    h_3_z=ops.lrelu(x=h_3_bn,name='g_lr_4')
    
    h_out = ops.deconv2d(h_3_z, [BATCH_SIZE, IMAGE_SIZE, IMAGE_SIZE, N_CHANNELS],
            name='g_out')

    return tf.nn.tanh(h_out)
예제 #32
0
def resnet_discriminator(x, labels, reuse=False, use_sn=True):
    with tf.variable_scope('discriminator', reuse=reuse):
        res1 = resnet_blocks.discriminator_residual_block(
            x, D_DIM, True, "res1", use_sn=use_sn, reuse=reuse) # 32x32
        res2 = resnet_blocks.discriminator_residual_block(
            res1, D_DIM * 2, True, "res2", use_sn=use_sn, reuse=reuse) # 16x16
        nl = non_local.sn_non_local_block_sim(res2, None, name="nl")
        res3 = resnet_blocks.discriminator_residual_block(
            nl, D_DIM * 4, True, "res3", use_sn=use_sn, reuse=reuse) # 8x8
        res4 = resnet_blocks.discriminator_residual_block(
            res3, D_DIM * 8, True, "res4", use_sn=use_sn, reuse=reuse) # 4x4
        res5 = resnet_blocks.discriminator_residual_block(
            res4, D_DIM * 8, False, "res5", use_sn=use_sn, reuse=reuse) # 4x4

        res5 = tf.nn.relu(res5)
        res5_chanels = tf.reduce_sum(res5, [2, 3])
        f1_logit = ops.linear(res5_chanels, 1, scope="f1", use_sn=use_sn)

        embedding_map = tf.get_variable(
            name='embedding_map',
            shape=[1000, D_DIM * 8],
            initializer=tf.contrib.layers.xavier_initializer())

        label_embedding = tf.nn.embedding_lookup(embedding_map, labels)
        f1_logit += tf.reduce_sum(res5_chanels * label_embedding, axis=1, keepdims=True)

        f1 = tf.nn.sigmoid(f1_logit)
        return f1, f1_logit, None
def embedding_network(state, mask, seed=123):
    # Placeholder layer sizes
    d_e = [[64], [64, 128]]
    d_o = [128]

    # Build graph:
    initial_elems = state

    # Embedding Part
    for i, block in enumerate(d_e):
        el = initial_elems
        for j, layer in enumerate(block):
            context = c if j == 0 and not i == 0 else None
            el, _ = invariant_layer(el,
                                    layer,
                                    context=context,
                                    name='l' + str(i) + '_' + str(j),
                                    seed=seed + i + j)

        c = mask_and_pool(el, mask)  # pool to get context for next block

    # Fully connected part
    fc = c
    for i, layer in enumerate(d_o):
        fc, _, _ = linear(fc,
                          layer,
                          activation_fn=tf.nn.relu,
                          name='lO_' + str(i))

    # Output
    embedding = fc

    # Returns the network output and parameters
    return embedding, []
예제 #34
0
파일: model.py 프로젝트: dcfucheng/LAPGAN
 def discriminator(self, x, reuse=None):
     with tf.variable_scope('discriminator', reuse=reuse):
         h0 = lrelu(conv2d(x, self.df_dim, name='d_h0_conv'))
         h1 = lrelu(self.d_bn1(conv2d(h0, self.df_dim * 2, name='d_h1_conv')))
         h2 = lrelu(self.d_bn2(conv2d(h1, self.df_dim * 4, name='d_h2_conv')))
         h3 = lrelu(self.d_bn3(conv2d(h2, self.df_dim * 8, name='d_h3_conv')))
         h4 = linear(tf.reshape(h3, [self.batch_size, -1]), 1, 'd_h3_lin')
         return tf.nn.sigmoid(h4)
예제 #35
0
파일: model.py 프로젝트: dcfucheng/LAPGAN
 def generator(self, z, reuse=None):
     with tf.variable_scope('generator', reuse=reuse):
         # project `z` and reshape
         h0 = tf.reshape(linear(z, self.gf_dim * 8 * 4 * 4, 'g_h0_lin'), [-1, 4, 4, self.gf_dim * 8])
         h0 = tf.nn.relu(self.g_bn0(h0))
         h1 = deconv2d(h0, [self.batch_size, 8, 8, self.gf_dim * 4], name='g_h1')
         h1 = tf.nn.relu(self.g_bn1(h1))
         h2 = deconv2d(h1, [self.batch_size, 16, 16, self.gf_dim * 2], name='g_h2')
         h2 = tf.nn.relu(self.g_bn2(h2))
         h3 = deconv2d(h2, [self.batch_size, 32, 32, 3], name='g_h3')
         return tf.nn.tanh(h3)
예제 #36
0
def discriminator(image, reuse=False):
    d_bn1 = ops.batch_norm(FLAGS.batch_size, name='d_bn1')
    d_bn2 = ops.batch_norm(FLAGS.batch_size, name='d_bn2')
    d_bn3 = ops.batch_norm(FLAGS.batch_size, name='d_bn3')
    image = tf.reshape(image, [FLAGS.batch_size, FLAGS.image_size, FLAGS.image_size, 1])
    if reuse: tf.get_variable_scope().reuse_variables()
    h0 = ops.lrelu(ops.conv2d(image, FLAGS.df_dim, name='d_h0_conv'))
    h1 = ops.lrelu(d_bn1(ops.conv2d(h0, FLAGS.df_dim * 2, name='d_h1_conv')))
    h2 = ops.lrelu(d_bn2(ops.conv2d(h1, FLAGS.df_dim * 4, name='d_h2_conv')))
    h3 = ops.lrelu(d_bn3(ops.conv2d(h2, FLAGS.df_dim * 8, name='d_h3_conv')))
    h4 = ops.linear(tf.reshape(h3, [FLAGS.batch_size, -1]), 1, 'd_h3_lin')
    return tf.nn.sigmoid(h4)
예제 #37
0
    def discriminator(self, images, image_size, reuse=False):
        image_size /= 64
        with tf.variable_scope('discriminator', reuse=reuse):
            gd_h0 = lrelu(conv2d(images, 64, name="d_gd_h0_conv"))
            gd_h1 = lrelu(self.d_bns[0](conv2d(gd_h0, 128, name='d_gd_h1_conv')))
            gd_h2 = lrelu(self.d_bns[1](conv2d(gd_h1, 256, name='d_gd_h2_conv')))
            gd_h3 = lrelu(self.d_bns[2](conv2d(gd_h2, 512, name='d_gd_h3_conv')))
            gd_h4 = lrelu(self.d_bns[3](conv2d(gd_h3, 512, name='d_gd_h4_conv')))
            gd_h5 = lrelu(self.d_bns[4](conv2d(gd_h4, 512, name='d_gd_h5_conv')))
            gd_h = linear(tf.reshape(
                gd_h5, [self.batch_size, int(512 * image_size * image_size)]), 64 * image_size * image_size, 'd_gd_linear')

            #ld_h0 = lrelu(conv2d(masked_images, 64, name="d_ld_h0_conv"))
            #ld_h1 = lrelu(self.local_d_bns[0](conv2d(ld_h0, 128, name='d_ld_h1_conv')))
            #ld_h2 = lrelu(self.local_d_bns[1](conv2d(ld_h1, 256, name='d_ld_h2_conv')))
            #ld_h3 = lrelu(self.local_d_bns[2](conv2d(ld_h2, 512, name='d_ld_h3_conv')))
            #ld_h4 = lrelu(self.local_d_bns[3](conv2d(ld_h3, 512, name='d_ld_h4_conv')))
            #ld_h = linear(tf.reshape(
            #    ld_h4, [self.batch_size, int(512 * image_size * image_size)]), 64 * image_size * image_size, 'd_ld_linear')

            #h = linear(tf.concat([gd_h, ld_h], 1), 1, 'd_linear')
            h = linear(gd_h, 1, 'd_linear')
            return tf.nn.sigmoid(h), h
예제 #38
0
파일: model.py 프로젝트: liuaishan/AdvPGAN
    def naive_discriminator(self, image, y = None, reuse = False):
        with tf.variable_scope("discriminator") as scope:

            # image is 128 x 128 x (input_c_dim + output_c_dim)
            if reuse:
                tf.get_variable_scope().reuse_variables()
            else:
                assert tf.get_variable_scope().reuse == False

            h0 = lrelu(conv2d(image,  self.df_dim, name='adv_d_h0_conv'))
            # h0 is (128 x 128 x self.df_dim)
            h1 = lrelu(layer_norm((conv2d(h0, self.df_dim * 2, name='adv_d_h1_conv')), name="adv_d_ln1"))
            # h1 is (64 x 64 x self.df_dim*2)
            h2 = lrelu(layer_norm(conv2d(h1, self.df_dim * 4, name='adv_d_h2_conv'), name="adv_d_ln2"))
            # h2 is (32x 32 x self.df_dim*4)
            h3 = lrelu(layer_norm(conv2d(h2, self.df_dim * 8, d_h=1, d_w=1, name='adv_d_h3_conv'), name="adv_d_ln3"))
            # h3 is (16 x 16 x self.df_dim*8)
            h4 = linear(tf.reshape(h3, [self.batch_size, -1]), 1, 'adv_d_h3_lin')

            return tf.nn.sigmoid(h4), h4
예제 #39
0
def discriminator(self, image, reuse=False, y=None, prefix=""):

    num_classes = 1001

    if reuse:
        tf.get_variable_scope().reuse_variables()

    batch_size = int(image.get_shape()[0])
    assert batch_size == 2 * self.batch_size

    """
    # L1 distance to average value of corresponding pixel in positive and negative batch
    # Included as a feature to prevent early mode collapse
    b, r, c, ch = [int(e) for e in image.get_shape()]
    pos = tf.slice(image, [0, 0, 0, 0], [self.batch_size, r, c, ch])
    neg = tf.slice(image, [self.batch_size, 0, 0, 0], [self.batch_size, r, c, ch])
    pos = tf.reshape(pos, [self.batch_size, -1])
    neg = tf.reshape(neg, [self.batch_size, -1])
    mean_pos = tf.reduce_mean(pos, 0, keep_dims=True)
    mean_neg = tf.reduce_mean(neg, 0, keep_dims=True)

    # difference from mean, with each example excluding itself from the mean
    pos_diff_pos = (1. + 1. / (self.batch_size - 1.)) * pos - mean_pos
    pos_diff_neg = pos - mean_neg
    neg_diff_pos = neg - mean_pos
    neg_diff_neg = (1. + 1. / (self.batch_size - 1.)) * neg - mean_neg

    diff_feat = tf.concat(0, [tf.concat(1, [pos_diff_pos, pos_diff_neg]),
                              tf.concat(1, [neg_diff_pos, neg_diff_neg])])

    with tf.variable_scope("d_diff_feat"):
        scale = tf.get_variable("d_untied_scale", [128 * 128 * 3 * 2], tf.float32,
                                 tf.random_normal_initializer(mean=1., stddev=0.1))

    diff_feat = diff_feat = tf.exp(- tf.abs(scale) * tf.abs(diff_feat))
    diff_feat = self.bnx(diff_feat, name="d_bnx_diff_feat")
    """

    noisy_image = image + tf.random_normal([batch_size, 512, 512, 3],
            mean=0.0,
            stddev=.1)

    print "Discriminator shapes"
    print "image: ", image.get_shape()
    def tower(bn, suffix):
        assert not self.y_dim
        print "\ttower "+suffix
        h0 = lrelu(bn(conv2d(noisy_image, self.df_dim, name='d_h0_conv' + suffix, d_h=2, d_w=2,
            k_w=3, k_h=3), "d_bn_0" + suffix))
        print "\th0 ", h0.get_shape()
        h1 = lrelu(bn(conv2d(h0, self.df_dim * 2, name='d_h1_conv' + suffix, d_h=2, d_w=2,
            k_w=3, k_h=3), "d_bn_1" + suffix))
        print "\th1 ", h1.get_shape()
        h2 = lrelu(bn(conv2d(h1, self.df_dim * 4, name='d_h2_conv' + suffix, d_h=2, d_w=2,
            k_w=3, k_h=3), "d_bn_2" + suffix))
        print "\th2 ", h2.get_shape()

        h3 = lrelu(bn(conv2d(h2, self.df_dim*4, name='d_h3_conv' + suffix, d_h=1, d_w=1,
            k_w=3, k_h=3), "d_bn_3" + suffix))
        print "\th3 ", h3.get_shape()
        h4 = lrelu(bn(conv2d(h3, self.df_dim*4, name='d_h4_conv' + suffix, d_h=1, d_w=1,
            k_w=3, k_h=3), "d_bn_4" + suffix))
        print "\th4 ", h4.get_shape()
        h5 = lrelu(bn(conv2d(h4, self.df_dim*8, name='d_h5_conv' + suffix, d_h=2, d_w=2,
            k_w=3, k_h=3), "d_bn_5" + suffix))
        print "\th5 ", h5.get_shape()

        h6 = lrelu(bn(conv2d(h5, self.df_dim*8, name='d_h6_conv' + suffix,
            k_w=3, k_h=3), "d_bn_6" + suffix))
        print "\th6 ", h6.get_shape()
        # return tf.reduce_mean(h6, [1, 2])
        h6_reshaped = tf.reshape(h6, [batch_size, -1])
        print '\th6_reshaped: ', h6_reshaped.get_shape()

        h7 = lrelu(bn(linear(h6_reshaped, self.df_dim * 40, scope="d_h7" + suffix), "d_bn_7" + suffix))

        return h7

    h = tower(self.bnx, "")
    print "h: ", h.get_shape()

    n_kernels = 300
    dim_per_kernel = 50
    x = linear(h, n_kernels * dim_per_kernel, scope="d_h")
    activation = tf.reshape(x, (batch_size, n_kernels, dim_per_kernel))

    big = np.zeros((batch_size, batch_size), dtype='float32')
    big += np.eye(batch_size)
    big = tf.expand_dims(big, 1)

    abs_dif = tf.reduce_sum(tf.abs(tf.expand_dims(activation, 3) - tf.expand_dims(tf.transpose(activation, [1, 2, 0]), 0)), 2)
    mask = 1. - big
    masked = tf.exp(-abs_dif) * mask
    def half(tens, second):
        m, n, _ = tens.get_shape()
        m = int(m)
        n = int(n)
        return tf.slice(tens, [0, 0, second * self.batch_size], [m, n, self.batch_size])
    # TODO: speedup by allocating the denominator directly instead of constructing it by sum
    #       (current version makes it easier to play with the mask and not need to rederive
    #        the denominator)
    f1 = tf.reduce_sum(half(masked, 0), 2) / tf.reduce_sum(half(mask, 0))
    f2 = tf.reduce_sum(half(masked, 1), 2) / tf.reduce_sum(half(mask, 1))

    minibatch_features = [f1, f2]

    x = tf.concat(1, [h] + minibatch_features)
    print "x: ", x.get_shape()
    # x = tf.nn.dropout(x, .5)

    class_logits = linear(x, num_classes, 'd_indiv_logits')


    image_means = tf.reduce_mean(image, 0, keep_dims=True)
    mean_sub_image = image - image_means
    image_vars = tf.reduce_mean(tf.square(mean_sub_image), 0)

    generated_class_logits = tf.squeeze(tf.slice(class_logits, [0, num_classes - 1], [batch_size, 1]))
    positive_class_logits = tf.slice(class_logits, [0, 0], [batch_size, num_classes - 1])

    """
    # make these a separate matmul with weights initialized to 0, attached only to generated_class_logits, or things explode
    generated_class_logits = tf.squeeze(generated_class_logits) + tf.squeeze(linear(diff_feat, 1, stddev=0., scope="d_indivi_logits_from_diff_feat"))
    assert len(generated_class_logits.get_shape()) == 1
    # re-assemble the logits after incrementing the generated class logits
    class_logits = tf.concat(1, [positive_class_logits, tf.expand_dims(generated_class_logits, 1)])
    """

    mx = tf.reduce_max(positive_class_logits, 1, keep_dims=True)
    safe_pos_class_logits = positive_class_logits - mx

    gan_logits = tf.log(tf.reduce_sum(tf.exp(safe_pos_class_logits), 1)) + tf.squeeze(mx) - generated_class_logits
    assert len(gan_logits.get_shape()) == 1

    probs = tf.nn.sigmoid(gan_logits)

    return [tf.slice(class_logits, [0, 0], [self.batch_size, num_classes]),
            tf.slice(probs, [0], [self.batch_size]),
           tf.slice(gan_logits, [0], [self.batch_size]),
           tf.slice(probs, [self.batch_size], [self.batch_size]),
           tf.slice(gan_logits, [self.batch_size], [self.batch_size])]
예제 #40
0
    def __call__(self, is_ref):
        """
        Builds the graph propagating from z to x.
        On the first pass, should make variables.
        All variables with names beginning with "g_" will be used for the
        generator network.
        """
        dcgan = self.dcgan
        assert isinstance(dcgan, DCGAN)

        def make_z(shape, minval, maxval, name, dtype):
            assert dtype is tf.float32
            if is_ref:
                with tf.variable_scope(name) as scope:
                    z = tf.get_variable("z", shape,
                                initializer=tf.random_uniform_initializer(minval, maxval),
                                trainable=False)
                    if z.device != "/device:GPU:0":
                        print "z.device is " + str(z.device)
                        assert False
            else:
                z = tf.random_uniform(shape,
                                   minval=minval, maxval=maxval,
                                   name=name, dtype=tf.float32)
            return z


        z = make_z([dcgan.batch_size, dcgan.z_dim],
                                   minval=-1., maxval=1.,
                                   name='z', dtype=tf.float32)
        zs = [z]

        if hasattr(dcgan, 'generator_built'):
            tf.get_variable_scope().reuse_variables()
            make_vars = False
        else:
            make_vars = True


        def reuse_wrapper(packed, *args):
            """
            A wrapper that processes the output of TensorFlow calls differently
            based on whether we are reusing Variables or not.

            Parameters
            ----------
            packed: The output of the TensorFlow call
            args: List of names

            If make_vars is True, then `packed` will contain all the new Variables,
            and we need to assign them to dcgan.foo fields.
            If make_vars is False, then `packed` is just the output tensor, and we
            just return that.
            """
            if make_vars:
                assert len(packed) == len(args) + 1, len(packed)
                out = packed[0]
            else:
                out = packed
            return out

        assert not dcgan.y_dim
        # project `z` and reshape
        z_ = reuse_wrapper(linear(z, dcgan.gf_dim*8*4*4, 'g_h0_lin', with_w=make_vars), 'h0_w', 'h0_b')

        h0 = tf.reshape(z_, [-1, 4, 4, dcgan.gf_dim * 8])
        h0 = tf.nn.relu(dcgan.vbn(h0, "g_vbn_0"))
        h0z = make_z([dcgan.batch_size, 4, 4, dcgan.gf_dim],
                                   minval=-1., maxval=1.,
                                   name='h0z', dtype=tf.float32)
        zs.append(h0z)
        h0 = tf.concat(3, [h0, h0z])

        h1 = reuse_wrapper(deconv2d(h0,
            [dcgan.batch_size, 8, 8, dcgan.gf_dim*4], name='g_h1', with_w=make_vars),
            'h1_w', 'h1_b')
        h1 = tf.nn.relu(dcgan.vbn(h1, "g_vbn_1"))
        h1z = make_z([dcgan.batch_size, 8, 8, dcgan.gf_dim],
                                   minval=-1., maxval=1.,
                                   name='h1z', dtype=tf.float32)
        zs.append(h1z)
        h1 = tf.concat(3, [h1, h1z])


        h2 = reuse_wrapper(deconv2d(h1,
            [dcgan.batch_size, 16, 16, dcgan.gf_dim*2], name='g_h2', with_w=make_vars),
            'h2_w', 'h2_b')
        h2 = tf.nn.relu(dcgan.vbn(h2, "g_vbn_2"))
        half = dcgan.gf_dim // 2
        if half == 0:
            half = 1
        h2z = make_z([dcgan.batch_size, 16, 16, half],
                                   minval=-1., maxval=1.,
                                   name='h2z', dtype=tf.float32)
        zs.append(h2z)
        h2 = tf.concat(3, [h2, h2z])


        h3 = reuse_wrapper(deconv2d(h2,
            [dcgan.batch_size, 32, 32, dcgan.gf_dim*1], name='g_h3', with_w=make_vars),
            'h3_w', 'h3_b')
        if make_vars:
            h3_name = "h3_relu_first"
        else:
            h3_name = "h3_relu_reuse"
        h3 = tf.nn.relu(dcgan.vbn(h3, "g_vbn_3"), name=h3_name)
        print "h3 shape: ", h3.get_shape()

        quarter = dcgan.gf_dim // 4
        if quarter == 0:
            quarter = 1
        h3z = make_z([dcgan.batch_size, 32, 32, quarter],
                                   minval=-1., maxval=1.,
                                   name='h3z', dtype=tf.float32)
        zs.append(h3z)
        h3 = tf.concat(3, [h3, h3z])

        assert dcgan.image_shape[0] == 128

        h4 = reuse_wrapper(deconv2d(h3,
                [dcgan.batch_size, 64, 64, dcgan.gf_dim*1],
                name='g_h4', with_w=make_vars),
            'h4_w', 'h4_b')
        h4 = tf.nn.relu(dcgan.vbn(h4, "g_vbn_4"))
        print "h4 shape: ", h4.get_shape()

        eighth = dcgan.gf_dim // 8
        if eighth == 0:
            eighth = 1
        h4z = make_z([dcgan.batch_size, 64, 64, eighth],
                                   minval=-1., maxval=1.,
                                   name='h4z', dtype=tf.float32)
        zs.append(h4z)
        h4 = tf.concat(3, [h4, h4z])

        h5 = reuse_wrapper(deconv2d(h4,
                [dcgan.batch_size, 128, 128, dcgan.gf_dim * 1],
                name='g_h5', with_w=make_vars),
            'h5_w', 'h5_b')
        h5 = tf.nn.relu(dcgan.vbn(h5, "g_vbn_5"))
        print "h5 shape: ", h5.get_shape()

        sixteenth = dcgan.gf_dim // 16
        if sixteenth == 0:
            sixteenth = 1
        h5z = make_z([dcgan.batch_size, 128, 128, sixteenth],
                                   minval=-1., maxval=1.,
                                   name='h5z', dtype=tf.float32)
        zs.append(h5z)
        h5 = tf.concat(3, [h5, h5z])

        h6 = reuse_wrapper(deconv2d(h5,
                [dcgan.batch_size, 128, 128, 3],
                d_w = 1, d_h = 1,
                name='g_h6', with_w=make_vars,
                init_bias=dcgan.out_init_b,
                stddev=dcgan.out_stddev),
            'h6_w', 'h6_b')
        print 'h6 shape: ', h6.get_shape()

        out = tf.nn.tanh(h6)

        dcgan.generator_built = True
        return out, zs
예제 #41
0
 def _vgg_fully_connected(self, x, n_in, n_out, scope):
   with tf.variable_scope(scope):
     fc = ops.linear(x, n_in, n_out)
   return fc