Exemplo n.º 1
0
def generator(images, options, reuse=False, name='gen'):
    # reuse or not
    with tf.variable_scope(name):
        if reuse:
            tf.get_variable_scope().reuse_variables()
        else:
            assert tf.get_variable_scope().reuse is False
            
        # down sampling
        x = relu(instance_norm(conv2d(images, options.nf, ks=7, s=1, name='gen_ds_conv1'), 'in1_1'))
        x = relu(instance_norm(conv2d(x, 2*options.nf, ks=4, s=2, name='gen_ds_conv2'), 'in1_2'))
        x = relu(instance_norm(conv2d(x, 4*options.nf, ks=4, s=2, name='gen_ds_conv3'), 'in1_3'))
        
        # bottleneck
        x = relu(instance_norm(conv2d(x, 4*options.nf, ks=3, s=1, name='gen_bn_conv1'), 'in2_1'))
        x = relu(instance_norm(conv2d(x, 4*options.nf, ks=3, s=1, name='gen_bn_conv2'), 'in2_2'))
        x = relu(instance_norm(conv2d(x, 4*options.nf, ks=3, s=1, name='gen_bn_conv3'), 'in2_3'))
        x = relu(instance_norm(conv2d(x, 4*options.nf, ks=3, s=1, name='gen_bn_conv4'), 'in2_4'))
        x = relu(instance_norm(conv2d(x, 4*options.nf, ks=3, s=1, name='gen_bn_conv5'), 'in2_5'))
        x = relu(instance_norm(conv2d(x, 4*options.nf, ks=3, s=1, name='gen_bn_conv6'), 'in2_6'))
        
        # up sampling
        x = relu(instance_norm(deconv2d(x, 2*options.nf, ks=4, s=2, name='gen_us_deconv1'), 'in3_1'))
        x = relu(instance_norm(deconv2d(x, options.nf, ks=4, s=2, name='gen_us_deconv2'), 'in3_2'))
        x = tanh(deconv2d(x, 3, ks=7, s=1, name='gen_us_dwconv3'))
        
        return x
Exemplo n.º 2
0
    def generator(self, inputs, scope='generator', reuse=None):
        with tf.variable_scope(scope, reuse=reuse):

            # generator will upscale a tiny image with layers of convolution
            # until it reaches the final output image dimensions. These vars
            # are the starting tiny image dimensions.
            # This is why this arch needs images that are divisible by 32
            minirows = self.img_shape[0] // 32
            minicols = self.img_shape[1] // 32

            # batch normalization, which needs to know whether this is training or
            # application
            bn = BN(self.is_training)

            # dense (i.e. fully connected) layer followed by reshaping into the tiny
            # image. The tiny image has a Z dim of 512 that gradually gets reduced
            # to 3 channels (r, g, b)
            t = dense(inputs, minirows * minicols * 512)
            t = lrelu(bn(reshape(t,
                                 (tf.shape(t)[0], minirows, minicols, 512))))

            t = lrelu(bn(conv2dtr(t, 512)))
            t = lrelu(bn(conv2dtr(t, 256)))
            t = lrelu(bn(conv2dtr(t, 128)))
            t = lrelu(bn(conv2dtr(t, 64)))

            # final conv2d  transpose to get to filter depth of 3, for rgb channels
            logits = conv2dtr(t, self.img_shape[2])
            return tanh(logits)  # common final activation in GANs
Exemplo n.º 3
0
def test_tanh():
    batch = np.array([[1.0, 2.0], [3.0, 4.0]])
    edugrad_batch = Tensor(batch)
    torch_batch = torch.tensor(batch, requires_grad=True)
    # forward
    outputs = ops.tanh(edugrad_batch)
    torch_outputs = torch.tanh(torch_batch)
    np.testing.assert_allclose(outputs.value, torch_outputs.detach().numpy())
    # backward
    reduce_mean(outputs).backward()
    torch_outputs.mean().backward()
    np.testing.assert_allclose(edugrad_batch.grad, torch_batch.grad.numpy())
Exemplo n.º 4
0
    def generator(self, x, c, reuse=False):
        print("Generator ...........")

        with tf.variable_scope('generator') as scope:
            if (reuse):
                scope.reuse_variables()

            c = tf.reshape(c, [-1, 1, 1, self.feature_length])
            c = tf.tile(c, [1, 64, 64, 1])
            inputs = tf.concat([x, c], axis=3)
            # print(inputs)

            # Iinitial concat conv layer
            # pad 3 => https://arxiv.org/pdf/1711.09020.pdf
            outputs = tf.pad(inputs, [[0, 0], [3, 3], [3, 3], [0, 0]])
            outputs = ops.conv2d(outputs, 64, 7, 1, 'VALID', scope="g_init")

            # Downsize twice
            outputs = ops.conv2d(outputs, 128, 4, 2, scope="g_down_sample_1")
            outputs = ops.conv2d(outputs, 256, 4, 2, scope="g_down_sample_2")

            # Resblock  CONV-(N256, K3x3, S1, P1), IN, ReLU
            outputs = residule_block(outputs, 256, 3, 1, scope="g_resblock_1")
            outputs = residule_block(outputs, 256, 3, 1, scope="g_resblock_2")
            outputs = residule_block(outputs, 256, 3, 1, scope="g_resblock_3")
            outputs = residule_block(outputs, 256, 3, 1, scope="g_resblock_4")
            outputs = residule_block(outputs, 256, 3, 1, scope="g_resblock_5")
            outputs = residule_block(outputs, 256, 3, 1, scope="g_resblock_6")

            # Upsampling twice
            outputs = ops.instance_norm(
                ops.relu(
                    ops.deconv2d(outputs, 128, 4, 2, scope='g_upsampling_1')),
                'g_in_1')
            outputs = ops.instance_norm(
                ops.relu(
                    ops.deconv2d(outputs, 64, 4, 2, scope='g_upsampling_2')),
                'g_in_2')

            # pad 3 => https://arxiv.org/pdf/1711.09020.pdf
            outputs = tf.pad(outputs, [[0, 0], [3, 3], [3, 3], [0, 0]])
            outputs = ops.tanh(
                ops.conv2d(outputs, 3, 7, 1, 'VALID', scope='g_out'))

        return outputs
Exemplo n.º 5
0
def generator(images, options, reuse=False, name='gen'):
    # down sampling
    x = relu(instance_norm(conv2d(images, options.nf, ks=7, s=1, name='gen_ds_conv1'), 'in1_1'))
    x = relu(instance_norm(conv2d(x, 2*options.nf, ks=4, s=2, name='gen_ds_conv2'), 'in1_2'))
    x = relu(instance_norm(conv2d(x, 4*options.nf, ks=4, s=2, name='gen_ds_conv3'), 'in1_3'))
    
    # bottleneck
    x = relu(instance_norm(conv2d(x, 4*options.nf, ks=3, s=1, name='gen_bn_conv1'), 'in2_1'))
    x = relu(instance_norm(conv2d(x, 4*options.nf, ks=3, s=1, name='gen_bn_conv2'), 'in2_2'))
    x = relu(instance_norm(conv2d(x, 4*options.nf, ks=3, s=1, name='gen_bn_conv3'), 'in2_3'))
    x = relu(instance_norm(conv2d(x, 4*options.nf, ks=3, s=1, name='gen_bn_conv4'), 'in2_4'))
    x = relu(instance_norm(conv2d(x, 4*options.nf, ks=3, s=1, name='gen_bn_conv5'), 'in2_5'))
    x = relu(instance_norm(conv2d(x, 4*options.nf, ks=3, s=1, name='gen_bn_conv6'), 'in2_6'))
    
    # up sampling
    x = relu(instance_norm(deconv2d(x, 2*options.nf, ks=4, s=2, name='gen_us_deconv1'), 'in3_1'))
    x = relu(instance_norm(deconv2d(x, options.nf, ks=4, s=2, name='gen_us_deconv2'), 'in3_2'))
    x = tanh(deconv2d(x, 3, ks=7, s=1, name='gen_us_dwconv3'))
    
    return x
Exemplo n.º 6
0
    def forward(self, word_indices: list[Tensor]) -> Tensor:
        """Executes the forward pass of a FeedForwardLanguageModel.

        Args:
            word_indices: list of [batch_size] tensors
                length = number of previous characters / n-gram length
                each one contains indices of chars at that position

        Returns:
            [batch_size, vocab_size] Tensor
            containing logits (not full probabilities, i.e. pre-softmax)
            over the vocab for each example in the batch
        """
        # TODO: (~7 lines) implement the forward pass of FFNN LM here
        # HINT: use ops.concat to concatenate word embeddings together
        # It takes a variable-length list of Tensors as its input, so you can
        # call it using as ops.concat(*embeddings), where embeddings is a list
        # of Tensors, corresponding to the relevant embeddings
        # [batch_size, num_words * embedding_size]
        embs = ops.concat(*[self.embedding(index) for index in word_indices])
        return self.output(ops.tanh(self.fc(embs)))
Exemplo n.º 7
0
 def fprop(self):
     logger.debug('%s prop: %s' % (str(self), str(self.pred.out.shape)))
     self.out = tanh(self.pred.out)
Exemplo n.º 8
0
Arquivo: nodes.py Projeto: xiamike/nn
 def fprop(self):
     logger.debug("%s prop: %s" % (str(self), str(self.pred.out.shape)))
     self.out = tanh(self.pred.out)