Exemple #1
0
    def build_network(self, scope_name):
        net_shape = [None] + [s for s in self.input_shape]
        print(net_shape)
        with tf.variable_scope(scope_name):
            self.is_train = tf.placeholder(tf.bool, name="is_train");
            self.global_step = tf.placeholder(tf.int32, name="global_step")

            # Input Layer
            self.network_inputs[scope_name] = tf.placeholder(tf.float32, shape=net_shape, name="inputs")

            # Conv Layers "Tower"
            conv = ops.conv_layer(self.network_inputs[scope_name], filters, kernel_size, stride, activation,
                                  "conv_first", useBatchNorm, drop_out, self.is_train, weight_initializer)
            for i in range(num_blocks):
                conv = ops.residual_conv_block(conv, filters, kernel_size, stride, activation, "conv" + str(i),
                                               useBatchNorm, drop_out, self.is_train, weight_initializer)

            # Policy and value heads
            # - Compute conv output size
            tower_conv_out_size = ops.conv_out_size(self.input_size, kernel_size, 1, stride)
            # TODO - manage correctly padding (if stride and/or filter size change)
            value_conv_out_size = ops.conv_out_size(tower_conv_out_size, v_kernel_size, 0, v_stride) * v_filters
            policy_conv_out_size = ops.conv_out_size(tower_conv_out_size, p_kernel_size, 0, p_stride) * p_filters

            # - Declare dense shape
            policy_shape = [policy_conv_out_size, self.policy_size]
            value_shape = [value_conv_out_size, v_dense_size]
            value_out_shape = [v_dense_size, 1]
            
            """policy_shape2 = [policy_conv_out_size, 512]
            policy_shape = [512, self.policy_size]"""

            # - Policy head
            policy_conv = ops.conv_layer(conv, p_filters, p_kernel_size, p_stride, activation, "policy_conv",
                                         useBatchNorm, drop_out, self.is_train, weight_initializer)
            policy_conv = ops.conv_layer(policy_conv, p_filters, p_kernel_size, p_stride, activation, "policy_conv2",
                                         useBatchNorm, drop_out, self.is_train, weight_initializer)                                         
            policy_conv = tf.contrib.layers.flatten(policy_conv)
            #policy_conv = ops.dense_layer(policy_conv, policy_shape2, activation, "policy2", False, head_drop_out, self.is_train)
            self.policy_out = ops.dense_layer(policy_conv, policy_shape, tf.identity, "policy", False, 0.0, self.is_train)
            self.policy_out_prob = p_activation(self.policy_out)

            # - Value head
            value_conv = ops.conv_layer(conv, v_filters, v_kernel_size, v_stride, activation, "value_conv",
                                        useBatchNorm, drop_out, self.is_train, weight_initializer)
            value_conv = tf.contrib.layers.flatten(value_conv)
            value_out = ops.dense_layer(value_conv, value_shape, activation, "value", False, head_drop_out, self.is_train,
                                        weight_initializer=weight_initializer)
            self.value_out = ops.dense_layer(value_out, value_out_shape, v_activation, "value_out", False, 0.0, self.is_train)
Exemple #2
0
 def discriminator(x, label):
     with tf.variable_scope('Discriminator'):
         if layers > 1:
             with tf.variable_scope('rgb_layer_{}'.format(layers - 2)):
                 d0 = pool(x)
                 d0 = leaky_relu(conv2d(d0, self.channels[layers - 1], 1))
         with tf.variable_scope('rgb_layer_{}'.format(layers - 1)):
             d1 = leaky_relu(conv2d(x, self.channels[layers], 1))
         for i in reversed(range(layers)):
             with tf.variable_scope('layer_{}'.format(i)):
                 if i == 0:
                     d1 = minibatch_stddev(d1)
                 with tf.variable_scope('1'):
                     d1 = leaky_relu(conv2d(d1, self.channels[i]))
                 with tf.variable_scope('2'):
                     if i == 0:
                         d1 = leaky_relu(conv2d(d1, self.channels[0], 2, 2))
                     else:
                         d1 = leaky_relu(conv2d(d1, self.channels[i]))
                 if i > 0:
                     d1 = pool(d1)
                 if i == layers - 1 and layers > 1:
                     d1 = self._reparameterize(d0, d1)
         with tf.variable_scope('dense'):
             d = tf.concat([tf.layers.flatten(d1), tf.layers.flatten(label)], axis=1)
             d = dense_layer(d, 1)
     return d
Exemple #3
0
def discriminator(image_data, train=True):
    """ Creates convolutional discriminator model.
        
    See https://arxiv.org/abs/1511.06434.pdf.

    Args:
        image_data (tf.placeholder): Tensor containing real/fake images to classify.
        train (bool, optional): Flag for whether to freeze batch-norm layer vars. If unspecified, defaults to `True`.
    Returns:
        Tensors containing probabilites and logits pertaining to input images being real/fake.
    """
    with tf.variable_scope('discriminator', reuse=tf.AUTO_REUSE) as scope:
        conv_1 = conv_layer(image_data,
                            train,
                            kernel_dims=(3, 3),
                            in_channels=3,
                            out_channels=32,
                            name='conv_1')
        conv_2 = conv_layer(conv_1,
                            train,
                            kernel_dims=(3, 3),
                            in_channels=32,
                            out_channels=32,
                            name='conv_2',
                            strides=(2, 2))
        dim = np.prod(conv_2.get_shape().as_list()[1:])
        flattened_1 = tf.reshape(conv_2, [-1, dim])
        dense_1 = dense_layer(flattened_1, train, 256, name='dense_1')
        logits = dense_layer(dense_1,
                             train,
                             1,
                             name='logits',
                             use_batchnorm=False,
                             activation=None)
        probs = tf.nn.sigmoid(logits, name=scope.name)
    return probs, logits
Exemple #4
0
def deep_gen(z, batch_size):
    """
    A deeper generator. Inspired by the tensorflow implementation.
    
    Args :
        - z (Tensor) : latent vectors.
    """
    with tf.variable_scope('generator'):
        dense1 = dense_layer(z, 7 * 7 * 256, "dense1")
        reshape = tf.reshape(dense1, [-1, 7, 7, 256], name='reshape')
        conv_tr1 = conv_tr_layer(reshape, 5, [batch_size, 7, 7, 128],\
            'conv_tr1', strides=1)
        conv_tr2 = conv_tr_layer(conv_tr1, 5, [batch_size, 14, 14, 64],
                                 'conv_tr2')
        conv_tr3 = sigmoid_conv_tr_layer(\
            conv_tr2, 5, [batch_size, 28, 28, 1], 'conv_tr3')
    return conv_tr3
Exemple #5
0
def generator(input_noise, train=True):
    """ Creates convolutional generator model.
        
    See https://arxiv.org/abs/1511.06434.pdf.

    Args:
        input_noise (tf.placeholder): Input noise distribution tensor. 
        train (bool, optional): Flag for whether to freeze batch-norm layer vars. If unspecified, defaults to `True`.
    Returns:
        Tensor containing images generated from the noise distribution.
    """
    dense_1_shape = [8, 8, 10]
    dense_1_units = np.prod(dense_1_shape)

    # We need to pass `batch_size` for using in `output_shape` in deconv op.
    # See https://riptutorial.com/tensorflow/example/29767/using-tf-nn-conv2d-transpose-for-arbitary-batch-sizes-and-with-automatic-output-shape-calculation-
    batch_size = tf.shape(input_noise)[0]

    with tf.variable_scope('generator', reuse=tf.AUTO_REUSE) as scope:
        dense_1 = dense_layer(input_noise,
                              train,
                              units=dense_1_units,
                              name='dense_1')
        dense_1_reshaped = tf.reshape(dense_1,
                                      shape=[
                                          -1,
                                      ] + dense_1_shape,
                                      name='dense_1_reshaped')
        deconv_1 = deconv_layer(dense_1_reshaped,
                                train,
                                kernel_dims=(5, 5),
                                in_channels=dense_1_shape[-1],
                                out_channels=64,
                                batch_size=batch_size,
                                name='deconv_1')
        deconv_2 = deconv_layer(deconv_1,
                                train,
                                kernel_dims=(5, 5),
                                in_channels=64,
                                out_channels=64,
                                batch_size=batch_size,
                                name='deconv_2')
        # H, W = deconv_2.get_shape().as_list()[1: 3]
        # upsampled_deconv_2 = tf.image.resize_nearest_neighbor(deconv_2, (2 * H, 2 * W), name='upsampled_deconv_2')
        upsampled_deconv_2 = tf.keras.layers.UpSampling2D(size=(2,
                                                                2))(deconv_2)
        deconv_3 = deconv_layer(upsampled_deconv_2,
                                train,
                                kernel_dims=(7, 7),
                                in_channels=64,
                                out_channels=32,
                                batch_size=batch_size,
                                name='deconv_3')
        logits = conv_layer(deconv_3,
                            train,
                            kernel_dims=(3, 3),
                            in_channels=32,
                            out_channels=3,
                            name='logits',
                            padding='VALID',
                            use_avgpool=False,
                            use_batchnorm=False,
                            activation=None)
        out = tf.nn.tanh(logits, name=scope.name)
    return out