示例#1
0
    def build(self):
        self.X3 = tf.reshape(self.X3, shape=[-1, 28, 28, 1])

        conv1 = conv2d(self.X3, self.weights['wc1'], self.biases['bc1'])
        conv2 = conv2d(conv1, self.weights['wc2'], self.biases['bc2'])

        conv2 = max_pool_2x2(conv2)
        conv2 = dropout_layer(conv2, self.keep_prob3)

        conv2 = max_pool_2x2(conv2)
        conv2 = dropout_layer(conv2, self.keep_prob3)

        # Flatten
        conv3 = tf.layers.flatten(conv2)
        # Dense
        fc1 = tf.add(tf.matmul(conv3, self.weights['wd1']), self.biases['bd1'])
        fc1 = tf.nn.relu(fc1)
        # Dropout
        fc1 = dropout_layer(fc1, self.keep_prob3)
        # Dense
        out = tf.add(tf.matmul(fc1, self.weights['out']), self.biases['out'])

        predictions = tf.nn.softmax(out)

        return out, predictions
示例#2
0
def model3(x, weights, biases, dropout):
    # # Make the tensorflow graph here
    with tf.name_scope('reshape'):
        x = tf.reshape(x, shape=[-1, 28, 28, 1])

    with tf.name_scope('conv1'):
        conv1 = conv2d(x, weights['wc1'], biases['bc1'])
        conv1 = max_pool_2x2(conv1)

    with tf.name_scope('conv2'):
        conv2 = conv2d(conv1, weights['wc2'], biases['bc2'])
        conv2 = max_pool_2x2(conv2)

    with tf.name_scope('conv3'):
        conv3 = conv2d(conv2, weights['wc3'], biases['bc3'])
        conv3 = max_pool_2x2(conv3)

        conv3 = tf.layers.flatten(conv3)

    with tf.name_scope('fc1'):
        fc1 = tf.add(tf.matmul(conv3, weights['wd1']), biases['bd1'])
        fc1 = tf.nn.relu(fc1)
        out = tf.add(tf.matmul(fc1, weights['out']), biases['out'], name="out")

    return out
示例#3
0
def discriminator(gan, inp, num, keep_prob, reuse=False):
    gpu_num = 0
    hidden_units = gan.h_adv
    print(hidden_units)
    print(keep_prob)
    with tf.device('/gpu:%d' % gpu_num):
        with tf.variable_scope('discriminator_%d' % num):
            if reuse:
                tf.get_variable_scope().reuse_variables()
            with tf.variable_scope('conv0'):
                h0 = conv2d(inp, [3, 3, gan.num_channels, hidden_units / 4], [hidden_units / 4],
                            stride=2, name='h0')
                h0 = leaky_relu(0.2, h0)
                h0 = tf.nn.dropout(h0, keep_prob)
            with tf.variable_scope('conv1'):
                h1 = conv2d(h0, [3, 3, hidden_units / 4, hidden_units / 2], [hidden_units / 2],
                            stride=2, name='h0')
                h1 = leaky_relu(0.2, h1)
                h1 = tf.nn.dropout(h1, keep_prob)
            with tf.variable_scope('conv2'):
                h2 = conv2d(h1, [3, 3, hidden_units / 2, hidden_units], [hidden_units],
                            stride=1, name='h0')
                h2 = leaky_relu(0.2, h2)
            with tf.variable_scope('reshape'):
                shape = h2.get_shape().as_list()
                num_units = shape[1] * shape[2] * shape[3]
                flattened = tf.reshape(h2, [gan.batch_size, num_units])
            with tf.variable_scope('prediction'):
                pred = dense(flattened, [num_units, 1], [1])
    return pred
示例#4
0
    def build(self):
        self.X = tf.reshape(self.X, shape=[-1, 28, 28, 1])

        conv1 = conv2d(self.X, self.weights["wc1"], self.biases['bc1'])
        conv1 = max_pool_2x2(conv1, ksize=2)

        conv2 = conv2d(conv1, self.weights['wc2'], self.biases['bc2'])
        conv2 = max_pool_2x2(conv2, ksize=2)

        fc1 = tf.reshape(conv2, [-1, self.weights['wd1'].get_shape().as_list()[0]])
        fc1 = tf.add(tf.matmul(fc1, self.weights['wd1']), self.biases['bd1'])
        fc1 = tf.nn.relu(fc1)
        fc1 = tf.nn.dropout(fc1, self.keep_prob)
        out = tf.add(tf.matmul(fc1, self.weights['out']), self.biases['out'])

        predictions = tf.nn.softmax(out)
        
        return out, predictions
def discriminator(gan, image, reuse=False, name='Discriminator'):
    """
    Args:
        gan : instance of a generative adversarial network 
        reuse : Whether you want to reuse variables from previous 
        share_params : Whether weights are tied in initial layers

        gan.batch_size: The size of batch. Should be specified before training. [64]
        gan.output_size:  The resolution in pixels of the images. [64]
        gan.df_dim:  Dimension of gen filters in first conv layer. [64]
        gan.dfc_dim:  Dimension of gen units for for fully connected layer. [1024]
        gan.c_dim:  Dimension of image color. For grayscale input, set to 1. [3]        
        



    """

    # layers that don't share variable
    d_bn1 = batch_norm(name='d_bn1')
    d_bn2 = batch_norm(name='d_bn2')
    with tf.variable_scope(name):
        if reuse:
            tf.get_variable_scope().reuse_variables()

        h0 = prelu(conv2d(image, gan.c_dim, name='d_h0_conv', reuse=False),
                   name='d_h0_prelu',
                   reuse=False)
        h1 = prelu(d_bn1(conv2d(h0, gan.df_dim, name='d_h1_conv', reuse=False),
                         reuse=reuse),
                   name='d_h1_prelu',
                   reuse=False)
        h1 = tf.reshape(h1, [self.batch_size, -1])

        # layers that share variables
        h2 = prelu(d_bn2(linear(h1, gan.dfc_dim, 'd_h2_lin', reuse=False),
                         reuse=False),
                   name='d_h2_prelu',
                   reuse=False)
        h3 = linear(h2, 1, 'd_h3_lin', reuse=False)

        return tf.nn.sigmoid(h3), h3
示例#6
0
    def build(self):
        self.X2 = tf.reshape(self.X2, shape=[-1, 28, 28, 1])

        conv1 = conv2d(self.X2, self.weights['wc1'], self.biases['bc1'])
        conv1 = max_pool_2x2(conv1)

        conv2 = conv2d(conv1, self.weights['wc2'], self.biases['bc2'])
        conv2 = max_pool_2x2(conv2)

        conv3 = conv2d(conv2, self.weights['wc3'], self.biases['bc3'])
        conv3 = max_pool_2x2(conv3)

        conv3 = tf.layers.flatten(conv3)

        fc1 = tf.add(tf.matmul(conv3, self.weights['wd1']), self.biases['bd1'])
        fc1 = tf.nn.relu(fc1)
        out = tf.add(tf.matmul(fc1, self.weights['out']), self.biases['out'])

        predictions = tf.nn.softmax(out)

        return out, predictions
示例#7
0
def model1(x, weights, biases, dropout):

    with tf.name_scope('reshape'):
        x = tf.reshape(x, shape=[-1, 28, 28, 1])

    with tf.name_scope('conv1'):
        conv1 = conv2d(x, weights["wc1"], biases['bc1'])
        conv1 = max_pool_2x2(conv1, ksize=2)

    with tf.name_scope('conv2'):
        conv2 = conv2d(conv1, weights['wc2'], biases['bc2'])
        conv2 = max_pool_2x2(conv2, ksize=2)

    with tf.name_scope('fc1'):
        fc1 = tf.reshape(conv2, [-1, weights['wd1'].get_shape().as_list()[0]])
        fc1 = tf.add(tf.matmul(fc1, weights['wd1']), biases['bd1'])
        fc1 = tf.nn.relu(fc1)
        fc1 = tf.nn.dropout(fc1, DROPOUT)
        out = tf.add(tf.matmul(fc1, weights['out']), biases['out'], name="out")

    return out