Beispiel #1
0
 def generator(self, z):
     s_h, s_w = self.image_h, self.image_w
     s_h2, s_w2 = utils.compute_size(s_h, 2), utils.compute_size(s_w, 2)
     s_h4, s_w4 = utils.compute_size(s_h2, 2), utils.compute_size(s_w2, 2)
     s_h8, s_w8 = utils.compute_size(s_h4, 2), utils.compute_size(s_w4, 2)
     s_h16, s_w16 = utils.compute_size(s_h8, 2), utils.compute_size(s_w8, 2)
     fmap_dim = self.fmap_dim_g
     batch_size = self.batch_size
     with tf.variable_scope("generator") as scope:
         z_ = utils.fc(z, s_h16 * s_w16 * 8 * fmap_dim, name='g_l0_fc')
         gl0 = utils.lrelu(
             self.g_bn_l0(
                 tf.reshape(z_, [batch_size, s_h16, s_w16, fmap_dim * 8])))
         gl1 = utils.lrelu(
             self.g_bn_l1(
                 utils.deconv2d(gl0, [batch_size, s_h8, s_w8, fmap_dim * 4],
                                name='g_l1_deconv')))
         gl2 = utils.lrelu(
             self.g_bn_l2(
                 utils.deconv2d(gl1, [batch_size, s_h4, s_w4, fmap_dim * 2],
                                name='g_l2_deconv')))
         gl3 = utils.lrelu(
             self.g_bn_l3(
                 utils.deconv2d(gl2, [batch_size, s_h2, s_w2, fmap_dim * 1],
                                name='g_l3_deconv')))
         gl4 = utils.deconv2d(gl3, [batch_size, s_h, s_w, 3],
                              name='g_l4_deconv')
     return tf.nn.tanh(gl4)
    def discriminate(self, inputs, is_training, reuse=False):
        with tf.variable_scope(self.var_scope) as scope:
            if reuse:
                scope.reuse_variables()

            conv1 = self.conv(inputs, 64, 'conv1')
            batch_norm_1 = self.batch_norm(conv1, 'batch_norm1')
            activation1 = lrelu(batch_norm_1, n='activation1')
            
            conv2 = self.conv(activation1, 128, 'conv2')
            batch_norm_2 = self.batch_norm(conv2, 'batch_norm2')
            activation2 = lrelu(batch_norm_2, n='activation2')

            conv3 = self.conv(activation2, 256, 'conv3')
            batch_norm_3 = self.batch_norm(conv3, 'batch_norm3')
            activation3 = lrelu(batch_norm_3, n='activation3')

            conv4 = self.conv(activation3, 512, 'conv4')
            batch_norm_4 = self.batch_norm(conv4, 'batch_norm4')
            activation4 = lrelu(batch_norm_4, n='activation4')

            feature_dimension = np.prod(activation4.get_shape()[1:])
            fully_connected_1 = tf.reshape(activation4, shape=[-1, feature_dimension], name='fully_connected_1')

            weights2 = tf.get_variable('weights2', shape=[fully_connected_1.shape[-1], 1], dtype=tf.float32,
                                        initializer=tf.truncated_normal_initializer(stddev=0.02))
            bias2 = tf.get_variable('bias2', shape=[1], dtype=tf.float32, initializer=tf.constant_initializer(0.0))

            logits = tf.add(tf.matmul(fully_connected_1, weights2), bias2, name='logits')

            return logits
    def __call__(self, x, y=None, sn=False, is_training=True, reuse=False):
        with tf.variable_scope(self.name, reuse=reuse):
            batch_size = x.get_shape().as_list()[0]
            if y is not None:
                ydim = y.get_shape().as_list()[-1]
                y = tf.reshape(y, [batch_size, 1, 1, ydim])
                x = conv_cond_concat(x, y)  # [bz, 28, 28, 11]

            x = tf.reshape(x, (batch_size, -1))
            net = lrelu(dense(x, 512, sn=sn, name='d_fc1'), name='d_l1')
            net = lrelu(bn(dense(net, 256, sn=sn, name='d_fc2'),
                           is_training,
                           name='d_bn2'),
                        name='d_l2')
            net = lrelu(bn(dense(net, 128, sn=sn, name='d_fc3'),
                           is_training,
                           name='d_bn3'),
                        name='d_l3')
            yd = dense(net, 1, sn=sn, name="D_dense")

            if self.class_num:
                yc = dense(net, self.class_num, sn=sn, name='C_dense')
                return yd, net, yc
            else:
                return yd, net
Beispiel #4
0
    def add_prediction_op(self):
        fs = [5, 5] # filter sizes
        cs = [4, 40, 80] # cs[i] is output number of channels from layer i [where layer 0 is input layer]

        # First conv layer
        W_conv1 = utils.weight_variable([fs[0], cs[0], cs[1]])
        b_conv1 = utils.bias_variable([cs[1]])

        h_conv1 = utils.lrelu(utils.conv1d(self.x, W_conv1) + b_conv1)

        # Second conv layer
        W_conv2 = utils.weight_variable([fs[1], cs[1], cs[2]])
        b_conv2 = utils.bias_variable([cs[2]])

        h_conv2 = utils.lrelu(utils.conv1d(h_conv1, W_conv2) + b_conv2)

        # First fully connected layer. Reshape the convolution output to 1D vector
        W_fc1 = utils.weight_variable([self.config.strlen * cs[2], 1024])
        b_fc1 = utils.bias_variable([1024])

        h_conv2_flat = tf.reshape(h_conv2, [-1, self.config.strlen * cs[2]])
        h_fc1 = utils.lrelu(tf.matmul(h_conv2_flat, W_fc1) + b_fc1)

        # Dropout (should be added to earlier layers too...)
        h_fc1_drop = tf.nn.dropout(h_fc1, self.keep_prob)

        # Final fully-connected layer
        W_fc2 = utils.weight_variable([1024, 1])
        b_fc2 = utils.bias_variable([1])

        y_conv = tf.matmul(h_fc1_drop, W_fc2) + b_fc2
        y_out = tf.sigmoid(y_conv)
        is_zero = tf.clip_by_value(tf.reduce_sum(self.x), 0, 1) # basically will be 1 iff at least one entry of x is nonzero
        y_out = tf.multiply(y_out, is_zero)
        return y_out
    def create_discriminator(discrim_inputs, discrim_targets):
        n_layers = 3
        layers = []

        # 2x [batch, in_channels, height, width] => [batch, in_channels * 2, height, width]
        input = fluid.layers.concat(input=[discrim_inputs, discrim_targets], axis=1)

        # layer_1: [batch, in_channels * 2, 256, 256] => [batch, ndf, 128, 128]
        convolved = discrim_conv(input, a.ndf, stride=2)
        rectified = utils.lrelu(convolved, 0.2)
        layers.append(rectified)

        # layer_2: [batch, 128, 128, ndf] => [batch, 64, 64, ndf * 2]
        # layer_3: [batch, 64, 64, ndf * 2] => [batch, 32, 32, ndf * 4]
        # layer_4: [batch, 32, 32, ndf * 4] => [batch, 31, 31, ndf * 8]
        for i in range(n_layers):
            out_channels = a.ndf * min(2 ** (i + 1), 8)
            stride = 1 if i == n_layers - 1 else 2  # last layer here has stride 1
            convolved = discrim_conv(layers[-1], out_channels, stride=stride)
            normalized = utils.batchnorm(convolved)
            rectified = utils.lrelu(normalized, 0.2)
            layers.append(rectified)

        # layer_5: [batch, 31, 31, ndf * 8] => [batch, 30, 30, 1]
        convolved = discrim_conv(rectified, out_channels=1, stride=1)
        output = fluid.layers.sigmoid(convolved)
        layers.append(output)

        return layers[-1]
def generator(x, y, isTrain=True, reuse=False):
    with tf.variable_scope('generator', reuse=reuse):
        #        print x.shape
        # 1st hidden layer
        y1 = tf.expand_dims(tf.expand_dims(y, axis=1), axis=1)
        x = tf.concat((x, y1), axis=3)

        conv1 = tf.layers.conv2d_transpose(x, 1024, [4, 4], strides=(1, 1), padding='valid')
        lrelu1 = utils.lrelu(tf.layers.batch_normalization(conv1, training=isTrain), 0.2)

        conv2 = tf.layers.conv2d_transpose(lrelu1, 512, [3, 3], strides=(1, 1), padding='valid')
        lrelu2 = utils.lrelu(tf.layers.batch_normalization(conv2, training=isTrain), 0.2)

        conv3 = tf.layers.conv2d_transpose(lrelu2, 256, [2, 2], strides=(1, 1), padding='valid')
        lrelu3 = utils.lrelu(tf.layers.batch_normalization(conv3, training=isTrain), 0.2)

        # 4th hidden layer
        conv4 = tf.layers.conv2d_transpose(lrelu3, 128, [4, 4], strides=(2, 2), padding='same')
        lrelu4 = utils.lrelu(tf.layers.batch_normalization(conv4, training=isTrain), 0.2)
        #
        # output layer
        conv5 = tf.layers.conv2d_transpose(lrelu4, 1, [4, 4], strides=(2, 2), padding='same')

        #        lrelu5 = lrelu(tf.layers.batch_normalization(conv5, training=isTrain), 0.2)
        #
        #        # output layer
        #        conv6 = tf.layers.conv2d_transpose(lrelu5, 1, [4, 4], strides=(2, 2), padding='same')
        #        print conv6.shape
        o = tf.nn.tanh(conv5)

        return o
def discriminator(input, y, isTrain=True, reuse=False):
    epsilon = 1e-9
    #    if isTrain:
    with tf.variable_scope('discriminator', reuse=reuse) as scope:
        # reshape so it's batchx1x1xy_size
        y_dim = int(y.get_shape().as_list()[-1])
        y = tf.reshape(y, shape=[batch_size, 1, 1, y_dim])
        input_ = utils.conv_cond_concat(input, y)

        conv1 = tcl.conv2d(input_, 64, 5, 2, activation_fn=tf.identity,
                           weights_initializer=tf.random_normal_initializer(stddev=0.02), scope='d_conv1')
        conv1 = utils.lrelu(conv1)

        conv2 = tcl.conv2d(conv1, 128, 5, 2, activation_fn=tf.identity,
                           weights_initializer=tf.random_normal_initializer(stddev=0.02), scope='d_conv2')
        conv2 = utils.lrelu(conv2)

        conv3 = tcl.conv2d(conv2, 256, 5, 2, activation_fn=tf.identity,
                           weights_initializer=tf.random_normal_initializer(stddev=0.02), scope='d_conv3')
        conv3 = utils.lrelu(conv3)

        conv4 = tcl.conv2d(conv3, 512, 5, 2, activation_fn=tf.identity,
                           weights_initializer=tf.random_normal_initializer(stddev=0.02), scope='d_conv4')
        conv4 = utils.lrelu(conv4)

        conv5 = tcl.conv2d(conv4, 1, 4, 1, activation_fn=tf.identity,
                           weights_initializer=tf.random_normal_initializer(stddev=0.02), scope='d_conv5')
        return conv5
Beispiel #8
0
 def fusion_model(self, img):
   with tf.compat.v1.variable_scope('fusion_model'):
       with tf.compat.v1.variable_scope('layer1'):
           weights = tf.compat.v1.get_variable("w1", [5, 5, 2, 256], initializer=tf.truncated_normal_initializer(stddev=1e-3))
           weights = weights_spectral_norm(weights)
           bias = tf.compat.v1.get_variable("b1", [256], initializer=tf.constant_initializer(0.0))
           conv1_ir = tf.contrib.layers.batch_norm(tf.nn.conv2d(img, weights, strides=[1, 1, 1, 1], padding='VALID') + bias, decay=0.9, updates_collections=None, epsilon=1e-5, scale=True)
           conv1_ir = lrelu(conv1_ir)
       with tf.compat.v1.variable_scope('layer2'):
           weights = tf.compat.v1.get_variable("w2", [5, 5, 256, 128], initializer=tf.truncated_normal_initializer(stddev=1e-3))
           weights = weights_spectral_norm(weights)
           bias = tf.compat.v1.get_variable("b2", [128], initializer=tf.constant_initializer(0.0))
           conv2_ir = tf.contrib.layers.batch_norm(tf.nn.conv2d(conv1_ir, weights, strides=[1, 1, 1, 1], padding='VALID') + bias, decay=0.9, updates_collections=None, epsilon=1e-5, scale=True)
           conv2_ir = lrelu(conv2_ir)
       with tf.compat.v1.variable_scope('layer3'):
           weights = tf.compat.v1.get_variable("w3", [3, 3, 128, 64], initializer=tf.truncated_normal_initializer(stddev=1e-3))
           weights = weights_spectral_norm(weights)
           bias = tf.compat.v1.get_variable("b3", [64], initializer=tf.constant_initializer(0.0))
           conv3_ir = tf.contrib.layers.batch_norm(tf.nn.conv2d(conv2_ir, weights, strides=[1, 1, 1, 1], padding='VALID') + bias, decay=0.9, updates_collections=None, epsilon=1e-5, scale=True)
           conv3_ir = lrelu(conv3_ir)
       with tf.compat.v1.variable_scope('layer4'):
           weights = tf.compat.v1.get_variable("w4", [3, 3, 64, 32], initializer=tf.truncated_normal_initializer(stddev=1e-3))
           weights = weights_spectral_norm(weights)
           bias = tf.compat.v1.get_variable("b4", [32], initializer=tf.constant_initializer(0.0))
           conv4_ir = tf.contrib.layers.batch_norm(tf.nn.conv2d(conv3_ir, weights, strides=[1, 1, 1, 1], padding='VALID') + bias, decay=0.9, updates_collections=None, epsilon=1e-5, scale=True)
           conv4_ir = lrelu(conv4_ir)
       with tf.compat.v1.variable_scope('layer5'):
           weights = tf.compat.v1.get_variable("w5", [1, 1, 32, 1], initializer=tf.truncated_normal_initializer(stddev=1e-3))
           weights = weights_spectral_norm(weights)
           bias = tf.compat.v1.get_variable("b5", [1], initializer=tf.constant_initializer(0.0))
           conv5_ir = tf.nn.conv2d(conv4_ir, weights, strides=[1, 1, 1, 1], padding='VALID') + bias
           conv5_ir = tf.nn.tanh(conv5_ir)
   return conv5_ir
Beispiel #9
0
    def add_prediction_op(self):
        fs = [5, 5]  # filter sizes
        cs = [
            4, 40, 80
        ]  # cs[i] is output number of channels from layer i [where layer 0 is input layer]

        # First conv layer
        W_conv1 = utils.weight_variable([fs[0], cs[0], cs[1]])
        b_conv1 = utils.bias_variable([cs[1]])

        h_conv1 = utils.lrelu(utils.conv1d(self.x, W_conv1) + b_conv1)

        # Second conv layer
        W_conv2 = utils.weight_variable([fs[1], cs[1], cs[2]])
        b_conv2 = utils.bias_variable([cs[2]])

        h_conv2 = utils.lrelu(utils.conv1d(h_conv1, W_conv2) + b_conv2)

        # First fully connected layer. Reshape the convolution output to 1D vector
        W_fc1 = utils.weight_variable([self.config.strlen * cs[2], 1024])
        b_fc1 = utils.bias_variable([1024])

        h_conv2_flat = tf.reshape(h_conv2, [-1, self.config.strlen * cs[2]])
        h_fc1 = utils.lrelu(tf.matmul(h_conv2_flat, W_fc1) + b_fc1)

        # Dropout (should be added to earlier layers too...)
        h_fc1_drop = tf.nn.dropout(h_fc1, self.keep_prob)

        # Final fully-connected layer
        W_fc2 = utils.weight_variable([1024, 3])
        b_fc2 = utils.bias_variable([1])

        y_conv = tf.matmul(h_fc1_drop, W_fc2) + b_fc2

        return y_conv
def generator(x, isTrain=True, reuse=False):
    with tf.variable_scope('generator', reuse=reuse):
        conv1 = tf.layers.conv2d_transpose(x,
                                           1024, [4, 4],
                                           strides=(1, 1),
                                           padding='valid')
        lrelu1 = utils.lrelu(
            tf.layers.batch_normalization(conv1, training=isTrain), 0.2)
        conv2 = tf.layers.conv2d_transpose(lrelu1,
                                           512, [3, 3],
                                           strides=(1, 1),
                                           padding='valid')
        lrelu2 = utils.lrelu(
            tf.layers.batch_normalization(conv2, training=isTrain), 0.2)
        conv3 = tf.layers.conv2d_transpose(lrelu2,
                                           256, [2, 2],
                                           strides=(1, 1),
                                           padding='valid')
        lrelu3 = utils.lrelu(
            tf.layers.batch_normalization(conv3, training=isTrain), 0.2)
        conv4 = tf.layers.conv2d_transpose(lrelu3,
                                           128, [4, 4],
                                           strides=(2, 2),
                                           padding='same')
        lrelu4 = utils.lrelu(
            tf.layers.batch_normalization(conv4, training=isTrain), 0.2)
        conv5 = tf.layers.conv2d_transpose(lrelu4,
                                           1, [4, 4],
                                           strides=(2, 2),
                                           padding='same')
        o = tf.nn.tanh(conv5)
        return o
Beispiel #11
0
    def discriminative(self, images, reuse=False):
        with tf.variable_scope('discriminative') as scope:
            if reuse:
                scope.reuse_variables()

            if self.run_flags.run == 'train':
                is_training = True
            else:
                is_training = False

            conv1 = lrelu(batch_norm(conv2d(images, output_dim=64, kernel=7, stride=1, name='d_conv1'), \
                                     is_training=is_training, name='g_conv1_bn'))  # 128 x 128 x 64

            conv2 = lrelu(batch_norm(conv2d(conv1, output_dim=64, kernel=7, stride=2, name='d_conv2'), \
                                     is_training=is_training, name='g_conv2_bn'))  # 64 x 64 x 64

            conv3 = lrelu(batch_norm(conv2d(conv2, output_dim=32, kernel=3, stride=2, name='d_conv3'), \
                                     is_training=is_training, name='g_conv3_bn'))  # 32 x 32 x 32

            conv4 = lrelu(batch_norm(conv2d(conv3, output_dim=1, kernel=3, stride=2, name='d_conv4'), \
                                     is_training=is_training, name='g_conv4_bn'))  # 16 x 16 x 1

            # conv1 = lrelu(conv2d(images, output_dim=64, kernel=7, stride=1, name='d_conv1'))  # 128 x 128 x 64
            #
            # conv2 = lrelu(conv2d(conv1, output_dim=64, kernel=7, stride=2, name='d_conv2'))  # 64 x 64 x 64
            #
            # conv3 = lrelu(conv2d(conv2, output_dim=32, kernel=3, stride=2, name='d_conv3'))  # 32 x 32 x 32
            #
            # conv4 = lrelu(conv2d(conv3, output_dim=1, kernel=3, stride=2, name='d_conv4'))  # 16 x 16 x 1

            fc = tf.reshape(conv4, [-1, 16 * 16 * 1])

            fc = ful_connect(fc, output_size=1, name='d_fc')

        return fc
Beispiel #12
0
 def discriminator(self,img,reuse,update_collection=None):
   with tf.variable_scope('discriminator',reuse=reuse):
       print(img.shape)
       with tf.variable_scope('layer_1'):
           weights=tf.get_variable("w_1",[3,3,1,32],initializer=tf.truncated_normal_initializer(stddev=1e-3))
           weights=weights_spectral_norm(weights,update_collection=update_collection)
           bias=tf.get_variable("b_1",[32],initializer=tf.constant_initializer(0.0))
           conv1_vi=tf.nn.conv2d(img, weights, strides=[1,2,2,1], padding='VALID') + bias
           conv1_vi = lrelu(conv1_vi)
       with tf.variable_scope('layer_2'):
           weights=tf.get_variable("w_2",[3,3,32,64],initializer=tf.truncated_normal_initializer(stddev=1e-3))
           weights=weights_spectral_norm(weights,update_collection=update_collection)
           bias=tf.get_variable("b_2",[64],initializer=tf.constant_initializer(0.0))
           conv2_vi= tf.contrib.layers.batch_norm(tf.nn.conv2d(conv1_vi, weights, strides=[1,2,2,1], padding='VALID') + bias, decay=0.9, updates_collections=None, epsilon=1e-5, scale=True)
           conv2_vi = lrelu(conv2_vi)
       with tf.variable_scope('layer_3'):
           weights=tf.get_variable("w_3",[3,3,64,128],initializer=tf.truncated_normal_initializer(stddev=1e-3))
           weights=weights_spectral_norm(weights,update_collection=update_collection)
           bias=tf.get_variable("b_3",[128],initializer=tf.constant_initializer(0.0))
           conv3_vi= tf.contrib.layers.batch_norm(tf.nn.conv2d(conv2_vi, weights, strides=[1,2,2,1], padding='VALID') + bias, decay=0.9, updates_collections=None, epsilon=1e-5, scale=True)
           conv3_vi=lrelu(conv3_vi)
       with tf.variable_scope('layer_4'):
           weights=tf.get_variable("w_4",[3,3,128,256],initializer=tf.truncated_normal_initializer(stddev=1e-3))
           weights=weights_spectral_norm(weights,update_collection=update_collection)
           bias=tf.get_variable("b_4",[256],initializer=tf.constant_initializer(0.0))
           conv4_vi= tf.contrib.layers.batch_norm(tf.nn.conv2d(conv3_vi, weights, strides=[1,2,2,1], padding='VALID') + bias, decay=0.9, updates_collections=None, epsilon=1e-5, scale=True)
           conv4_vi=lrelu(conv4_vi)
           conv4_vi = tf.reshape(conv4_vi,[self.batch_size,6*6*256])
       with tf.variable_scope('line_5'):
           weights=tf.get_variable("w_5",[6*6*256,2],initializer=tf.truncated_normal_initializer(stddev=1e-3))
           weights=weights_spectral_norm(weights,update_collection=update_collection)
           bias=tf.get_variable("b_5",[2],initializer=tf.constant_initializer(0.0))
           line_5=tf.matmul(conv4_vi, weights) + bias
   return line_5
    def discriminative_gan(self, images, reuse=False):
        '''Discriminate 128 x 128 x 3 images fake or real within the range [fake, real] = [0, 1].'''

        with tf.variable_scope('discriminator') as scope:
            if reuse:
                scope.reuse_variables()

            if self.run_flags.run == 'train':
                is_training = True
            else:
                is_training = False

            conv1 = conv2d(images,
                           output_dim=64,
                           kernel=7,
                           stride=1,
                           name='d_conv1')
            conv1 = batch_norm(conv1,
                               is_training=is_training,
                               name='d_conv1_bn')
            conv1 = lrelu(conv1, 0.01)
            # 128 x 128 x 64

            conv2 = conv2d(conv1,
                           output_dim=64,
                           kernel=7,
                           stride=2,
                           name='d_conv2')
            conv2 = batch_norm(conv2,
                               is_training=is_training,
                               name='d_conv2_bn')
            conv2 = lrelu(conv2, 0.01)
            # 64 x 64 x 64

            conv3 = conv2d(conv2,
                           output_dim=32,
                           kernel=3,
                           stride=2,
                           name='d_conv3')
            conv3 = batch_norm(conv3,
                               is_training=is_training,
                               name='d_conv3_bn')
            conv3 = lrelu(conv3, 0.01)
            # 32 x 32 x 32

            conv4 = conv2d(conv3,
                           output_dim=1,
                           kernel=3,
                           stride=2,
                           name='d_conv4')
            conv4 = batch_norm(conv4,
                               is_training=is_training,
                               name='d_conv4_bn')
            conv4 = lrelu(conv4, 0.01)
            # 16 x 16 x 1

            fc = tf.reshape(conv4, [-1, 16 * 16 * 1])
            fc = ful_connect(fc, output_size=1, name='d_fc')

        return fc
Beispiel #14
0
def predictor(xyl):
    batch_size = xyl.get_shape().as_list()[0]
    with tf.variable_scope("pred"):
        l0 = lrelu(linear(tf.reshape(xyl, [batch_size, -1]), 16, "l0"))
        l1 = lrelu(linear(l0, 16, "l1"))
        l2 = linear(l1, 3, "l2")
        tf.summary.histogram('l2', l2)
    return tf.nn.tanh(l2)
    def add_prediction_op(self):
        fs = [5, 5] # filter sizes
        cs = [4, 40, 80] # cs[i] is output number of channels from layer i [where layer 0 is input layer]

        # First conv layer
        W_conv1 = utils.weight_variable([fs[0], cs[0], cs[1]])
        b_conv1 = utils.bias_variable([cs[1]])

        h_conv1 = utils.lrelu(utils.conv1d(self.x, W_conv1) + b_conv1)

        # Second conv layer
        W_conv2 = utils.weight_variable([fs[1], cs[1], cs[2]])
        b_conv2 = utils.bias_variable([cs[2]])

        h_conv2 = utils.lrelu(utils.conv1d(h_conv1, W_conv2) + b_conv2)

        # Conv layer on top of the coverage
        W_conv_coverage = utils.weight_variable([fs[0], 1, cs[2]])
        b_conv_coverage = utils.bias_variable([cs[2]])

        conv_c = tf.expand_dims(self.e, -1)
        #print(conv_c.shape, W_conv_coverage.shape, b_conv_coverage.shape)
        h_conv_coverage = utils.lrelu(utils.conv1d(conv_c, W_conv_coverage) + b_conv_coverage)

        h_concatenated = tf.concat([h_conv2, h_conv_coverage], axis = -1)
        # First fully connected layer. Reshape the convolution output to 1D vector

        orig_shape = h_concatenated.get_shape().as_list()
        flat_shape = np.prod(orig_shape[1:])
        new_shape = [-1,] + [flat_shape]
        h_concatenated_flat = tf.reshape(h_concatenated, new_shape)
        h_concat_drop = tf.nn.dropout(h_concatenated_flat, self.keep_prob)
        fc1_in = h_concatenated_flat.get_shape().as_list()[-1]
        W_fc1 = utils.weight_variable([fc1_in, 1024])
        b_fc1 = utils.bias_variable([1024])
        h_fc1 = utils.lrelu(tf.matmul(h_concat_drop, W_fc1) + b_fc1)

        # Fully-connected layer on top of the coverage
        #W_fc_coverage = utils.weight_variable([self.config.strlen, cs[2]])
        #b_fc_coverage = utils.bias_variable([cs[2]])

        #h_fc_coverage = tf.nn.relu(tf.matmul(self.e, W_fc_coverage) + b_fc_coverage)
        #h_concatenated = tf.concat([h_fc1, h_fc_coverage], axis = -1)

        # Dropout (should be added to earlier layers too...)
        #h_concatenated_drop = tf.nn.dropout(h_concatenated, self.keep_prob)
        h_fc1_drop = tf.nn.dropout(h_fc1, self.keep_prob)

        # Final fully-connected layer
        W_fc2 = utils.weight_variable([1024, 1])
        b_fc2 = utils.bias_variable([1])

        y_conv = tf.matmul(h_fc1_drop, W_fc2) + b_fc2
        y_out = tf.sigmoid(y_conv)

        return y_out
 def discriminator(self, img, cond, reuse):
     dim = len(img.get_shape())
     with tf.variable_scope("disc", reuse=reuse):
         image = tf.concat([img, cond], dim - 1)
         feature = conf.conv_channel_base
         h0 = lrelu(conv2d(image, feature, name="h0"))
         h1 = lrelu(batch_norm(conv2d(h0, feature * 2, name="h1"), "h1"))
         h2 = lrelu(batch_norm(conv2d(h1, feature * 4, name="h2"), "h2"))
         h3 = lrelu(batch_norm(conv2d(h2, feature * 8, name="h3"), "h3"))
         h4 = linear(tf.reshape(h3, [1, -1]), 1, "linear")
     return h4
Beispiel #17
0
    def discriminator(self, image, reuse=False):
        with tf.variable_scope('discriminator') as scope:
            if reuse:
                scope.reuse_variables()

            h0 = lrelu(conv2d(image, self.df_dim, name='d_h0_conv'))
            h1 = lrelu(self.d_bns[0](conv2d(h0, self.df_dim * 2, name='d_h1_conv'), self.is_training))
            h2 = lrelu(self.d_bns[1](conv2d(h1, self.df_dim * 4, name='d_h2_conv'), self.is_training))
            h3 = lrelu(self.d_bns[2](conv2d(h2, self.df_dim * 8, name='d_h3_conv'), self.is_training))
            h4 = linear(tf.reshape(h3, [-1, 8192]), 1, 'd_h4_lin')

            return tf.nn.sigmoid(h4), h4
Beispiel #18
0
def build_disc1(h1, testing=False, reuse=False):
    # 16 x 16 --> 8x8
    disc1_conv1 = utils.batch_normalization(utils.lrelu(
        utils.conv2d(h1, (3, 3, 3, 32),
                     stride=[1, 2, 2, 1],
                     name='disc1_conv1')),
                                            name='disc1_bn1',
                                            reuse=reuse)

    disc1_conv2 = utils.batch_normalization(utils.lrelu(
        utils.conv2d(disc1_conv1, (3, 3, 32, 64), name='disc1_conv2')),
                                            name='disc1_bn2',
                                            reuse=reuse)

    # 8x8 --> 8x8
    disc1_conv3 = utils.batch_normalization(utils.lrelu(
        utils.conv2d(disc1_conv2, (3, 3, 64, 64), name='disc1_conv3')),
                                            name='disc1_bn3',
                                            reuse=reuse)

    disc1_conv3 = tf.nn.dropout(disc1_conv3, 0.1 if testing else 1)

    # 8x8 --> 6x6
    disc1_conv4 = tf.layers.batch_normalization(utils.lrelu(
        utils.conv2d(disc1_conv3, (3, 3, 64, 64),
                     padding='VALID',
                     name='disc1_conv4')),
                                                name='bn4',
                                                reuse=reuse)

    disc1_l5 = tf.reshape(disc1_conv4, [100, 6, 6, 64])

    disc1_shared = utils.lrelu(
        utils.network_in_network(disc1_l5,
                                 64,
                                 num_units=64,
                                 name='disc1_shared'))
    disc1_shared_flat = tf.reshape(disc1_shared, [-1, 64 * 6 * 6])
    disc1_z_recon = utils.dense(disc1_shared_flat,
                                num_inputs=64 * 6 * 6,
                                num_units=50,
                                name='disc1_z_recon')

    disc1_shared_pool = tf.reduce_mean(disc1_shared, [1, 2])
    disc1_adv = utils.dense(disc1_shared_pool,
                            num_inputs=64,
                            num_units=1,
                            name='disc1_z_adv')
    # disc1_adv is the pre-sigmoid output of the discriminator

    return disc1_adv, disc1_z_recon
Beispiel #19
0
    def add_prediction_op(self):
        left_half, right_half = tf.split(
            self.x, [self.config.window, self.config.window + 1], axis=1)
        # First conv layer
        W_convleft1 = utils.weight_variable([5, 4, 40])
        b_convleft1 = utils.bias_variable([40])

        W_convright1 = utils.weight_variable([5, 4, 40])
        b_convright1 = utils.bias_variable([40])

        h_convleft1 = utils.lrelu(
            utils.conv1d(left_half, W_convleft1) + b_convleft1)
        h_convright1 = utils.lrelu(
            utils.conv1d(right_half, W_convright1) + b_convright1)

        # Second conv layer
        W_convleft2 = utils.weight_variable([5, 40, 80])
        b_convleft2 = utils.bias_variable([80])

        W_convright2 = utils.weight_variable([5, 40, 80])
        b_convright2 = utils.bias_variable([80])

        h_convleft2 = utils.lrelu(
            utils.conv1d(h_convleft1, W_convleft2) + b_convleft2)
        h_convright2 = utils.lrelu(
            utils.conv1d(h_convright1, W_convright2) + b_convright2)

        h_convout = tf.concat([h_convleft2, h_convright2], 1)

        # First fully connected layer. Reshape the convolution output to 1D vector
        fc_dim_1 = int(self.config.strlen * 80 / 7.89)
        W_fc1 = utils.weight_variable([self.config.strlen * 80, fc_dim_1])
        b_fc1 = utils.bias_variable([fc_dim_1])

        h_conv_flat = tf.reshape(h_convout, [-1, self.config.strlen * 80])
        #h_conv_flat = tf.nn.dropout(h_conv_flat, self.keep_prob)
        h_fc1 = utils.lrelu(tf.matmul(h_conv_flat, W_fc1) + b_fc1)
        h_fc1 = tf.nn.dropout(h_fc1, self.keep_prob)

        # Final fully-connected layer
        W_fc2 = utils.weight_variable([fc_dim_1, 1])
        b_fc2 = utils.bias_variable([1])

        y_conv = tf.matmul(h_fc1, W_fc2) + b_fc2
        y_out = tf.sigmoid(y_conv)
        #TODO: Add separate filter with unshared weights that looks at center?

        return y_out
Beispiel #20
0
def discriminator(x, is_training=True, reuse=False):
    # Network Architecture is exactly same as in infoGAN (https://arxiv.org/abs/1606.03657)
    batch_size = x.get_shape().as_list()[0]
    with tf.variable_scope("discriminator", reuse=reuse):
        net = conv2d(x,
                     output_dim=64,
                     kernel=(4, 4),
                     stride=(2, 2),
                     activation='lrelu',
                     name='conv1')
        net = conv2d(net,
                     output_dim=128,
                     kernel=(4, 4),
                     stride=(2, 2),
                     activation='lrelu',
                     use_bn=True,
                     is_training=is_training,
                     name='conv2')
        net = tf.reshape(net, [batch_size, -1])
        net = lrelu(
            bn(linear(net, 1024, scope='d_fc3'),
               is_training=is_training,
               scope='linear1'))
        out_logit = linear(net, 1, scope='linear2')
        out = tf.nn.sigmoid(out_logit)
        return out, out_logit, net
Beispiel #21
0
    def generator(self, noise, caption):
        s = self.image_size
        s2, s4, s8, s16 = int(s / 2), int(s / 4), int(s / 8), int(s / 16)

        reduced_caption = utils.lrelu(
            utils.linear(caption, self.reduced_text_dim, 'g_embedding'))
        noise_concat = tf.concat([noise, reduced_caption], 1)
        new_noise = utils.linear(noise_concat,
                                 self.channel_dim * 8 * s16 * s16, 'g_h0_lin')

        h0 = tf.reshape(new_noise, [-1, s16, s16, self.channel_dim * 8])
        h0 = tf.nn.relu(self.g_bn0(h0))

        h1 = utils.deconv2d(h0,
                            [self.batch_size, s8, s8, self.channel_dim * 4],
                            name='g_h1')
        h1 = tf.nn.relu(self.g_bn1(h1))

        h2 = utils.deconv2d(h1,
                            [self.batch_size, s4, s4, self.channel_dim * 2],
                            name='g_h2')
        h2 = tf.nn.relu(self.g_bn2(h2))

        h3 = utils.deconv2d(h2, [self.batch_size, s2, s2, self.channel_dim],
                            name='g_h3')
        h3 = tf.nn.relu(self.g_bn3(h3))

        h4 = utils.deconv2d(h3, [self.batch_size, s, s, 3], name='g_h4')

        return (tf.tanh(h4) / 2. + 0.5)
Beispiel #22
0
    def generative(self, x, reuse=False):
        with tf.variable_scope('generator') as scope:
            if reuse:
                scope.reuse_variables()

            if self.run_flags.run == 'train':
                is_training = True
            else:
                is_training = False

            conv1 = lrelu(batch_norm(conv2d(x, output_dim=32, stride=1, name='g_conv1'), \
                                     is_training=is_training, name='g_conv1_bn')) # 64 x 64 x 32

            conv2 = lrelu(batch_norm(conv2d(conv1, output_dim=128, stride=1, name='g_conv2'), \
                                     is_training=is_training, name='g_conv2_bn')) # 64 x 64 x 128

            conv3 = lrelu(batch_norm(conv2d(conv2, output_dim=128, stride=1, name='g_conv3'), \
                                     is_training=is_training, name='g_conv3_bn')) # 64 x 64 x 128

            conv3_up = tf.image.resize_images(conv3, size=[128, 128])

            conv4 = lrelu(batch_norm(conv2d(conv3_up, output_dim=128, stride=1, name='g_conv4'), \
                                     is_training=is_training, name='g_conv4_bn')) # 128 x 128 x 128

            conv5 = lrelu(batch_norm(conv2d(conv4, output_dim=64, stride=1, name='g_conv5'), \
                                     is_training=is_training, name='g_conv5_bn'))  # 128 x 128 x 64

            conv6 = tf.nn.sigmoid(
                conv2d(conv5, output_dim=3, stride=1,
                       name='g_conv6'))  #128 x 128 x 3

            # conv1 = lrelu(conv2d(x, output_dim=32, stride=1, name='g_conv1')) # 64 x 64 x 32
            #
            # conv2 = lrelu(conv2d(conv1, output_dim=128, stride=1, name='g_conv2')) # 64 x 64 x 128
            #
            # conv3 = lrelu(conv2d(conv2, output_dim=128, stride=1, name='g_conv3')) # 64 x 64 x 128
            #
            # conv3_up = tf.image.resize_images(conv3, size=[128, 128])
            #
            # conv4 = lrelu(conv2d(conv3_up, output_dim=128, stride=1, name='g_conv4')) # 128 x 128 x 128
            #
            # conv5 = lrelu(conv2d(conv4, output_dim=64, stride=1, name='g_conv5'))  # 128 x 128 x 64
            #
            # conv6 = tf.nn.sigmoid(conv2d(conv5, output_dim=3, stride=1, name='g_conv6')) #128 x 128 x 3

        return conv6
Beispiel #23
0
    def generator(self, cond):
        with tf.variable_scope("gen"):
            feature = conf.conv_channel_base
            e1 = conv2d(cond, feature, name="e1")
            e2 = batch_norm(conv2d(lrelu(e1), feature*2, name="e2"), "e2")
            e3 = batch_norm(conv2d(lrelu(e2), feature*4, name="e3"), "e3")
            e4 = batch_norm(conv2d(lrelu(e3), feature*8, name="e4"), "e4")
            e5 = batch_norm(conv2d(lrelu(e4), feature*8, name="e5"), "e5")
            e6 = batch_norm(conv2d(lrelu(e5), feature*8, name="e6"), "e6")
            e7 = batch_norm(conv2d(lrelu(e6), feature*8, name="e7"), "e7")
            e8 = batch_norm(conv2d(lrelu(e7), feature*8, name="e8"), "e8")

            size = conf.img_size
            num = [0] * 9
            for i in range(1,9):
                num[9-i]=size
                size =(size+1)/2

            d1 = deconv2d(tf.nn.relu(e8), [1,num[1],num[1],feature*8], name="d1")
            d1 = tf.concat(3, [tf.nn.dropout(batch_norm(d1, "d1"), 0.5), e7])
            d2 = deconv2d(tf.nn.relu(d1), [1,num[2],num[2],feature*8], name="d2")
            d2 = tf.concat(3, [tf.nn.dropout(batch_norm(d2, "d2"), 0.5), e6])
            d3 = deconv2d(tf.nn.relu(d2), [1,num[3],num[3],feature*8], name="d3")
            d3 = tf.concat(3, [tf.nn.dropout(batch_norm(d3, "d3"), 0.5), e5]) 
            d4 = deconv2d(tf.nn.relu(d3), [1,num[4],num[4],feature*8], name="d4")
            d4 = tf.concat(3, [batch_norm(d4, "d4"), e4])
            d5 = deconv2d(tf.nn.relu(d4), [1,num[5],num[5],feature*4], name="d5")
            d5 = tf.concat(3, [batch_norm(d5, "d5"), e3]) 
            d6 = deconv2d(tf.nn.relu(d5), [1,num[6],num[6],feature*2], name="d6")
            d6 = tf.concat(3, [batch_norm(d6, "d6"), e2])
            d7 = deconv2d(tf.nn.relu(d6), [1,num[7],num[7],feature], name="d7")
            d7 = tf.concat(3, [batch_norm(d7, "d7"), e1]) 
            d8 = deconv2d(tf.nn.relu(d7), [1,num[8],num[8],conf.img_channel], name="d8")

            return tf.nn.tanh(d8)
Beispiel #24
0
    def generator(self, cond):
        with tf.variable_scope("gen"):
            feature = conf.conv_channel_base
            e1 = conv2d(cond, feature, name="e1")
            e2 = batch_norm(conv2d(lrelu(e1), feature*2, name="e2"), "e2")
            e3 = batch_norm(conv2d(lrelu(e2), feature*4, name="e3"), "e3")
            e4 = batch_norm(conv2d(lrelu(e3), feature*8, name="e4"), "e4")
            e5 = batch_norm(conv2d(lrelu(e4), feature*8, name="e5"), "e5")
            e6 = batch_norm(conv2d(lrelu(e5), feature*8, name="e6"), "e6")
            e7 = batch_norm(conv2d(lrelu(e6), feature*8, name="e7"), "e7")
            e8 = batch_norm(conv2d(lrelu(e7), feature*8, name="e8"), "e8")

            d1 = deconv2d(tf.nn.relu(e8), [1,2,2,feature*8], name="d1")
            d1 = tf.concat(3, [tf.nn.dropout(batch_norm(d1, "d1"), 0.5), e7])
            d2 = deconv2d(tf.nn.relu(d1), [1,4,4,feature*8], name="d2")
            d2 = tf.concat(3, [tf.nn.dropout(batch_norm(d2, "d2"), 0.5), e6])
            d3 = deconv2d(tf.nn.relu(d2), [1,8,8,feature*8], name="d3")
            d3 = tf.concat(3, [tf.nn.dropout(batch_norm(d3, "d3"), 0.5), e5])
            d4 = deconv2d(tf.nn.relu(d3), [1,16,16,feature*8], name="d4")
            d4 = tf.concat(3, [batch_norm(d4, "d4"), e4])
            d5 = deconv2d(tf.nn.relu(d4), [1,32,32,feature*4], name="d5")
            d5 = tf.concat(3, [batch_norm(d5, "d5"), e3])
            d6 = deconv2d(tf.nn.relu(d5), [1,64,64,feature*2], name="d6")
            d6 = tf.concat(3, [batch_norm(d6, "d6"), e2])
            d7 = deconv2d(tf.nn.relu(d6), [1,128,128,feature], name="d7")
            d7 = tf.concat(3, [batch_norm(d7, "d7"), e1])
            d8 = deconv2d(tf.nn.relu(d7), [1,256,256,conf.img_channel], name="d8")

            return tf.nn.tanh(d8)
Beispiel #25
0
 def discriminator(self, im, reuse):
     fmap_dim = self.fmap_dim_d
     with tf.variable_scope("discriminator", reuse=reuse) as scope:
         dl0 = utils.lrelu(utils.conv2d(im, fmap_dim, name='d_l0_conv'))
         dl1 = utils.lrelu(
             self.d_bn_l0(utils.conv2d(dl0, fmap_dim * 2,
                                       name='d_l1_conv')))
         dl2 = utils.lrelu(
             self.d_bn_l1(utils.conv2d(dl1, fmap_dim * 4,
                                       name='d_l2_conv')))
         dl3 = utils.lrelu(
             self.d_bn_l2(utils.conv2d(dl2, fmap_dim * 8,
                                       name='d_l3_conv')))
         dim = 1
         for d in dl3.get_shape()[1:].as_list():
             dim *= d
         dl4 = utils.fc(tf.reshape(dl3, [-1, dim]), 1, name='d_l4_fc')
     return tf.nn.sigmoid(dl4), dl4
 def __call__(self, x, y=None, sn=False, is_training=True, reuse=False):
     with tf.variable_scope(self.name, reuse=reuse):
         batch_size = x.get_shape().as_list()[0]
         if y is not None:
             ydim = y.get_shape().as_list()[-1]
             y = tf.reshape(y, [batch_size, 1, 1, ydim])
             x = conv_cond_concat(x, y)  # [bz, 28, 28, 11]
         # [bz, 14, 14, 64]
         net = lrelu(conv2d(x,
                            64,
                            4,
                            4,
                            2,
                            2,
                            sn=sn,
                            padding="SAME",
                            name='d_conv1'),
                     name='d_l1')
         # [bz, 7, 7, 128]
         net = lrelu(bn(conv2d(net,
                               128,
                               4,
                               4,
                               2,
                               2,
                               sn=sn,
                               padding="SAME",
                               name='d_conv2'),
                        is_training,
                        name='d_bn2'),
                     name='d_l2')
         net = tf.reshape(net, [batch_size, 7 * 7 * 128])
         # [bz, 1024]
         net = lrelu(bn(dense(net, 1024, sn=sn, name='d_fc3'),
                        is_training,
                        name='d_bn3'),
                     name='d_l3')
         # [bz, 1]
         yd = dense(net, 1, sn=sn, name='D_dense')
         if self.class_num:
             yc = dense(net, self.class_num, sn=sn, name='C_dense')
             return yd, net, yc
         else:
             return yd, net
Beispiel #27
0
    def dis(self,x,training):
        x = tf.reshape(x,shape=[-1,self.shape,self.shape,3])
        scope = 'dis_'
        layer = lrelu(conv2d(x,self.weights[scope+'w_conv1'])+self.biases[scope+'b_conv1'])
        
        for i in range(1,4):
        	conv = prelu(conv2d(layer,self.weights[scope+'w_conv'+str(i+1)])+self.biases[scope+'b_conv'+str(i+1)],scope+'w_conv'+str(i+1))
        	conv = maxpool2d(conv)
        	conv = tf.nn.dropout(conv,self.keep_rate)
        	layer = conv

        fc = tf.reshape(layer,[-1, int(self.shape/8)*int(self.shape/8)*256])
        fc = lrelu(tf.matmul(fc,self.weights[scope+'w_fc'])+self.biases[scope+'b_fc'])
        fc = tf.nn.dropout(fc,self.keep_rate)
        
        output = tf.matmul(fc,self.weights[scope+'out'])+self.biases[scope+'out']
        output = (tanh(output)+1.0)*0.5

        return output
Beispiel #28
0
    def __call__(self, x, is_training=True, reuse=False):
        with tf.variable_scope(self.name, reuse):
            net = lrelu(bn(dense(x, 64, name='c_fc1'),
                           is_training,
                           name='c_bn1'),
                        name='c_l1')
            out_logit = dense(net, self.class_num, name='c_l2')
            out = tf.nn.softmax(out_logit)

            return out_logit, out
Beispiel #29
0
 def discriminator(self, x, prior):
     inputs = tf.concat([x, prior], axis=1)
     self.D_h1 = lrelu(tf.matmul(inputs, self.D_W1) + self.D_b1)
     self.D_h2 = tf.nn.sigmoid(tf.matmul(self.D_h1, self.D_W2) + self.D_b2)
     '''
     self.D_h3 = lrelu(tf.matmul(self.D_h2, self.D_W3) + self.D_b3)
     self.D_h4 = lrelu(tf.matmul(self.D_h3, self.D_W4) + self.D_b4)
     self.D_h5 = lrelu(tf.matmul(self.D_h4, self.D_W5) + self.D_b5)
     self.D_h6 = tf.nn.sigmoid(tf.matmul(self.D_h5, self.D_W6) + self.D_b6)
     '''
     return self.D_h2
Beispiel #30
0
def code_discriminator(inputs):
    '''
    inputs: code的tensor[-1, 128]
    '''
    x = inputs
    units = 512
    initializer = tf.random_normal_initializer(mean=0.0, stddev=0.02)

    # -----
    x = tf.layers.dense(x, units=units, kernel_initializer=initializer)
    x = lrelu(x)

    # -----
    x = tf.layers.dense(x, units=units, kernel_initializer=initializer)
    x = lrelu(x)

    # -----
    x = tf.layers.dense(x, units=1, kernel_initializer=initializer)

    return x