コード例 #1
0
ファイル: plot_graph.py プロジェクト: Hagihara-A/dl-scratch-3
def network():
    x = Variable(np.array(1.0), name="x")
    y = F.exp(x)
    y.name = "y"
    y.backward(create_graph=True)
    gx = x.grad
    gx.name = "gx"
    gx.backward(create_graph=True)
    return gx
コード例 #2
0
    def __call__(self, x, C=1.0, k=1):
        """Call loss function of VAE.
        The loss value is equal to ELBO (Evidence Lower Bound)
        multiplied by -1.

        Args:
            x (Variable or ndarray): Input variable.
            C (int): Usually this is 1.0. Can be changed to control the
                second term of ELBO bound, which works as regularization.
            k (int): Number of Monte Carlo samples used in encoded vector.
        """
        z_mean, z_log_var = self.encoder(x)

        rec_loss = 0
        for l in range(k):
            z = self.encoder.sampling(z_mean, z_log_var)
            y = self.decoder(z)
            rec_loss += F.binary_cross_entropy(F.flatten(y), F.flatten(x)) / k

        kl_loss = C * (z_mean ** 2 + F.exp(z_log_var) - z_log_var - 1) * 0.5
        kl_loss = F.sum(kl_loss) / len(x)
        return rec_loss + kl_loss
コード例 #3
0
def softmax1d(x):
    x = as_variable(x)
    y = F.exp(x)
    sum_y = F.sum(y)
    return y / sum_y
コード例 #4
0
 def sampling(self, z_mean, z_log_var):
     batch_size = len(z_mean)
     xp = dezero.cuda.get_array_module(z_mean.data)
     epsilon = xp.random.randn(batch_size, self.latent_size)
     return z_mean + F.exp(z_log_var) * epsilon
コード例 #5
0
if '__file__' in globals():
    import os, sys

    sys.path.append(os.path.join(os.path.dirname(__file__), '..'))

import numpy as np

from dezero import Variable
from dezero.functions import square, exp

x = Variable(np.array(0.5))
y = square(exp(square(x)))
y.backward()
print(x.grad)
コード例 #6
0
 def test(self):
     with no_grad():
         x = Variable(np.array(10))
         y = exp(x)
         self.assertIsNone(y.creator)
コード例 #7
0
def softmax1d(x):
    y = F.exp(x)
    sum_y = F.sum(y)
    return y / sum_y