Example #1
0
def main():
    import problem_unittests as tests

    tests.test_model_inputs(model_inputs)
    tests.test_discriminator(discriminator, tf)
    tests.test_generator(generator, tf)
    tests.test_model_loss(model_loss)
    tests.test_model_opt(model_opt, tf)
Example #2
0
        #x4 = tf.layers.conv2d(relu3, 512, 3, strides=2, padding='same')
        #x4 = tf.layers.batch_normalization(x4, training=True)
        #relu4 = tf.maximum(0.2*x4, x4)

        #flat = tf.reshape(relu3, (-1,4*4*256))
        flat = tf.reshape(relu3, [-1, 1])
        logits = tf.layers.dense(flat, 1)
        out = tf.sigmoid(logits)
        return out, logits


"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_discriminator(discriminator, tf)

# ## 生成器(Generator)
# 部署 `generator` 函数以使用 `z` 生成图像。该函数应能够重复使用神经网络中的各种变量。
# 在 [`tf.variable_scope`](https://www.tensorflow.org/api_docs/python/tf/variable_scope) 中使用 "generator" 的变量空间名来重复使用该函数中的变量。
#
# 该函数应返回所生成的 28 x 28 x `out_channel_dim` 维度图像。

# In[7]:


def generator(z, out_channel_dim, is_train=True):
    """
    Create the generator network
    :param z: Input z
    :param out_channel_dim: The number of channels in the output image
Example #3
0
        :return: Discriminator logits; the output of the neural network
        """
        # define feedforward behavior
        out = F.leaky_relu(self.conv1(x), 0.2)
        out = F.leaky_relu(self.conv2(out), 0.2)
        out = F.leaky_relu(self.conv3(out), 0.2)
        out = F.leaky_relu(self.conv4(out), 0.2)
        out = out.view(-1, self.conv_dim * 8 * 2 * 2)
        out = self.fc(out)
        return out


"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_discriminator(Discriminator)


class Generator(nn.Module):
    def __init__(self, z_size, conv_dim):
        """
        Initialize the Generator Module
        :param z_size: The length of the input latent vector, z
        :param conv_dim: The depth of the inputs to the *last* transpose convolutional layer
        """
        super(Generator, self).__init__()

        # complete init function
        self.conv_dim = conv_dim
        self.fc = nn.Linear(z_size, conv_dim * 8 * 2 * 2)
        # reshape
        
        #4*4*256
        # Flatten it
        flat = tf.reshape(relu2, (-1, 7*7*128))
        #flat = tf.nn.dropout(flat,0.8)
        logits = tf.layers.dense(flat, 1)
      
        out = tf.sigmoid(logits)

    return  out, logits


"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_discriminator(discriminator, tf)


# ### 生成器(Generator)
# 部署 `generator` 函数以使用 `z` 生成图像。该函数应能够重复使用神经网络中的各种变量。
# 在 [`tf.variable_scope`](https://www.tensorflow.org/api_docs/python/tf/variable_scope) 中使用 "generator" 的变量空间名来重复使用该函数中的变量。 
# 
# 该函数应返回所生成的 28 x 28 x `out_channel_dim` 维度图像。

# In[120]:

def generator(z, out_channel_dim, is_train=True):
    """
    Create the generator network
    :param z: Input z
    :param out_channel_dim: The number of channels in the output image
Example #5
0
 def unit_test(self):
     tests.test_model_inputs(self.model_inputs)
     tests.test_discriminator(self.discriminator, tf)
     tests.test_generator(self.generator, tf)
     tests.test_model_loss(self.model_loss)
     tests.test_model_opt(self.model_opt, tf)
Example #6
0
 def test_structure(self):
     test_discriminator(Discriminator)