コード例 #1
0
def main():
    import problem_unittests as tests

    tests.test_model_inputs(model_inputs)
    tests.test_discriminator(discriminator, tf)
    tests.test_generator(generator, tf)
    tests.test_model_loss(model_loss)
    tests.test_model_opt(model_opt, tf)
コード例 #2
0
        #x4 = tf.layers.batch_normalization(x4, training=is_train)
        #x4 = tf.maximum(0.2*x4, x4)#1,28,28,64

        logits = tf.layers.conv2d_transpose(x3,
                                            out_channel_dim,
                                            5,
                                            strides=2,
                                            padding='same')
        out = tf.tanh(logits)  #1,28,28,3
        return out


"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_generator(generator, tf)

# ### 损失函数(Loss)
# 部署 `model_loss` 函数训练并计算 GANs 的损失。该函数应返回形如 (discriminator loss, generator loss) 的元组。
#
# 使用你已实现的函数:
# - `discriminator(images, reuse=False)`
# - `generator(z, out_channel_dim, is_train=True)`

# In[8]:


def model_loss(input_real, input_z, out_channel_dim):
    """
    Get the loss for the discriminator and generator
    :param input_real: Images from the real dataset
コード例 #3
0
        :return: A 32x32x3 Tensor image as output
        """
        # define feedforward behavior
        out = self.fc(x)
        out = out.view(-1, self.conv_dim * 8, 2, 2)
        out = F.relu(self.deconv1(out))
        out = F.relu(self.deconv2(out))
        out = F.relu(self.deconv3(out))
        out = torch.tanh(self.deconv4(out))
        return out


"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_generator(Generator)


def weights_init_normal(m):
    """
    Applies initial weights to certain layers in a model .
    The weights are taken from a normal distribution 
    with mean = 0, std dev = 0.02.
    :param m: A module or layer in a network    
    """
    # classname will be something like:
    # `Conv`, `BatchNorm2d`, `Linear`, etc.
    print("Initializing weights")
    classname = m.__class__.__name__
    #     if ('Conv' in classname) or ('Linear' in classname):
    #         m.weight.data.normal_(0.0, 0.02)
コード例 #4
0
                                        padding='same')
        x2 = tf.layers.batch_normalization(x2, training=is_train)
        x2 = tf.maximum(0.1 * x2, x2)
       
       
        # 14x14x128
        logits = tf.layers.conv2d_transpose(x2, out_channel_dim, 5, strides=2,
                                           padding='same')
        # 28x28x3 now
        out = tf.tanh(logits)
    return out

"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_generator(generator, tf)


# ### 损失函数(Loss)
# 部署 `model_loss` 函数训练并计算 GANs 的损失。该函数应返回形如 (discriminator loss, generator loss) 的元组。
# 
# 使用你已实现的函数:
# - `discriminator(images, reuse=False)`
# - `generator(z, out_channel_dim, is_train=True)`

# In[121]:

def model_loss(input_real, input_z, out_channel_dim):
    """
    Get the loss for the discriminator and generator
    :param input_real: Images from the real dataset
コード例 #5
0
ファイル: gan.py プロジェクト: wyllmein2000/dlnd-project-5
 def unit_test(self):
     tests.test_model_inputs(self.model_inputs)
     tests.test_discriminator(self.discriminator, tf)
     tests.test_generator(self.generator, tf)
     tests.test_model_loss(self.model_loss)
     tests.test_model_opt(self.model_opt, tf)
コード例 #6
0
 def test_structure(self):
     test_generator(Generator)