Ejemplo n.º 1
0
    def test_sum(self):
        x = Variable(np.array([[1, 2, 3], [4, 5, 6]]))

        assert_array_equal(sum(x).data, x.data.sum())
        assert_array_equal(sum(x, axis=0).data, x.data.sum(axis=0))
        assert_array_equal(
            sum(x, keepdims=True).data, x.data.sum(keepdims=True))

        y = 2 * sum(x, axis=0)
        y.backward()
        assert_array_equal(x.grad.data, np.array([[2, 2, 2], [2, 2, 2]]))
 def test_shape_check(self):
     x = Variable(np.random.randn(1, 10))
     b = Variable(np.random.randn(10))
     y = x + b
     loss = F.sum(y)
     loss.backward()
     self.assertEqual(b.grad.shape, b.shape)
Ejemplo n.º 3
0
    def __call__(self, x, C=1.0, k=1):
        """Call loss function of VAE.
        The loss value is equal to ELBO (Evidence Lower Bound)
        multiplied by -1.

        Args:
            x (Variable or ndarray): Input variable.
            C (int): Usually this is 1.0. Can be changed to control the
                second term of ELBO bound, which works as regularization.
            k (int): Number of Monte Carlo samples used in encoded vector.
        """
        z_mean, z_log_var = self.encoder(x)

        rec_loss = 0
        for l in range(k):
            z = self.encoder.sampling(z_mean, z_log_var)
            y = self.decoder(z)
            rec_loss += F.binary_cross_entropy(F.flatten(y), F.flatten(x)) / k

        kl_loss = C * (z_mean ** 2 + F.exp(z_log_var) - z_log_var - 1) * 0.5
        kl_loss = F.sum(kl_loss) / len(x)
        return rec_loss + kl_loss
Ejemplo n.º 4
0
# coding: utf-8
if '__file__' in globals():
    import os, sys
    sys.path.append(os.path.join(os.path.dirname(__file__), '..'))

import numpy as np
from dezero import Variable
import dezero.functions as F

x = Variable(np.array([[1, 2, 3], [4, 5, 6]]))
c = Variable(np.array([[10, 20, 30], [40, 50, 60]]))
t = x + c
print(t)
y = F.sum(t, axis=0)
y.backward()
print(y)
print(x.grad)

x = Variable(np.random.randn(2, 3, 4, 5))
y = x.sum(keepdims=True)
print(y.shape)
Ejemplo n.º 5
0
 def test_backward4(self):
     x_data = np.random.rand(10, 20, 20)
     f = lambda x: F.sum(x, axis=None)
     self.assertTrue(gradient_check(f, x_data))
Ejemplo n.º 6
0
 def test_datatype(self):
     x = Variable(np.random.rand(10))
     y = F.sum(x)
     # np.float64ではなく0次元のnp.ndarrayを返す
     self.assertFalse(np.isscalar(y))
Ejemplo n.º 7
0
if '__file__' in globals():
    import os, sys
    sys.path.append(os.path.join(os.path.dirname(__file__), '..'))

import numpy as np
from dezero import Variable
import dezero.functions as F

x = Variable(np.array([1, 2, 3, 4, 5, 6]))
y = F.sum(x)
y.backward()
print(y)
print(x.grad)

x = Variable(np.array([[1, 2, 3], [4, 5, 6]]))
y = F.sum(x)
y.backward()
print(y)
print(x.grad)

x = Variable(np.array([[1, 2, 3], [4, 5, 6]]))
y = F.sum(x, axis=0)
y.backward()
print(y)
print(x.grad)

x = Variable(np.random.randn(2, 3, 4, 5))
y = x.sum(keepdims=True)
print(y.shape)
Ejemplo n.º 8
0
def total_varitaion_loss(x):
    a = (x[:, :, :-1, :-1] - x[:, :, 1:, :-1])**2
    b = (x[:, :, :-1, :-1] - x[:, :, :-1, 1:])**2
    return F.sum(a + b)
Ejemplo n.º 9
0
import numpy as np
from dezero import Variable
import dezero.functions as F

x = Variable(np.array([1, 2, 3, 4, 5, 6]))
y = F.sum(x)
y.backward()
print(y)
print(x.grad)

x = Variable(np.array([[1, 2, 3], [4, 5, 6]]))
y = F.sum(x)
y.backward()
print(y)
print(x.grad)

x = Variable(np.array([[1, 2, 3], [4, 5, 6]]))
y = F.sum(x, axis=0)
y.backward()
print(y)
print(x.grad)

x = Variable(np.random.randn(2, 3, 4, 5))
y = F.sum(x, keepdims=True)
print(y.shape)
Ejemplo n.º 10
0
def mean_squared_error(x0, x1):
    diff = x0 - x1
    return F.sum(diff**2) / len(diff)
Ejemplo n.º 11
0
 def test_forward(self):
     x = Variable(np.arange(6).reshape(2, 3))
     y = sum(x)
     self.assertEqual(y.data, np.array(15))
Ejemplo n.º 12
0
def mean_squared_error(x0, x1): # より良いversionはFの中に実装されている(メモリ節約)
  diff = x0 - x1
  return F.sum(diff ** 2) / len(diff)
def softmax1d(x):
    y = F.exp(x)
    sum_y = F.sum(y)
    return y / sum_y
Ejemplo n.º 14
0
 def test_backward5(self):
     x_data = np.random.rand(10, 20, 20) * 100
     f = lambda x: F.sum(x, axis=None, keepdims=True)
     self.assertTrue(gradient_check(f, x_data))
Ejemplo n.º 15
0
if '__file__' in globals():
    import os, sys
    sys.path.append(os.path.join(os.path.dirname(__file__), '..'))

import numpy as np
from dezero import Variable
from dezero.utils import plot_dot_graph
import dezero.functions as F

x = Variable(np.array([1, 2, 3, 4, 5, 6]))
y = F.sum(x)
y.backward()
print(y)
print(x.grad)
Ejemplo n.º 16
0
 def test_forward1(self):
     x = Variable(np.random.rand(10))
     y = F.sum(x)
     expected = np.sum(x.data)
     self.assertTrue(np.allclose(y.data, expected))
Ejemplo n.º 17
0
 def test_backward3(self):
     x_data = np.random.rand(10, 20, 20)
     f = lambda x: F.sum(x, axis=2)
     self.assertTrue(check_backward(f, x_data))
Ejemplo n.º 18
0
 def test_backward(self):
     x = Variable(np.arange(6).reshape(2, 3))
     y = sum(x)
     y.backward()
     assert_equal(x.grad.data, np.ones_like(x.data))
if "__file__" in globals():
    import os, sys

    sys.path.append(os.path.join(os.path.dirname(__file__), ".."))

import numpy as np
import dezero.functions as F
from dezero import Variable

x = Variable(np.array([[1, 2, 3], [4, 5, 6]]))
c = Variable(np.array([[10, 20, 30], [40, 50, 60]]))
t = x + c
y = F.sum(t)  # 오류 발생. step 39에서 구현할 함수임

y = backward(retain_grad=True)

print(y.grad)
print(t.grad)
print(x.grad)
print(c.grad)
 def test_forward2(self):
     x = Variable(np.random.rand(10, 20, 30))
     y = F.sum(x, axis=1)
     expected = np.sum(x.data, axis=1)
     self.assertTrue(np.allclose(y.data, expected))
Ejemplo n.º 21
0
 def test_forward1(self):
     x = Variable(np.array(2.0))
     y = F.sum(x)
     expected = np.sum(x.data)
     self.assertTrue(array_allclose(y.data, expected))
def mean_squared_error(y1, y2):
    N = y1.shape[0]
    diff = y1 - y2
    loss = F.sum(diff * diff) / N
    return loss
Ejemplo n.º 23
0
 def test_forward3(self):
     x = Variable(np.random.rand(10, 20, 30))
     y = F.sum(x, axis=1, keepdims=True)
     expected = np.sum(x.data, axis=1, keepdims=True)
     self.assertTrue(array_allclose(y.data, expected))
Ejemplo n.º 24
0
def softmax1d(x):
    x = as_variable(x)
    y = F.exp(x)
    sum_y = F.sum(y)
    return y / sum_y
Ejemplo n.º 25
0
 def test_backward1(self):
     x_data = np.random.rand(10)
     f = lambda x: F.sum(x)
     self.assertTrue(gradient_check(f, x_data))
Ejemplo n.º 26
0
if '__file__' in globals():
    import os
    import sys
    sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
    import numpy as np
    from dezero.utils import plot_dot_graph
    from dezero import Variable
    import dezero.functions as F

x = Variable(np.array([[1, 2, 3], [4, 5, 6]]))
c = Variable(np.array([[10, 20, 30], [40, 50, 60]]))
t = x + c
y = F.sum(t)
y.backward(retain_grad=True)
print(y.grad)
print(t.grad)
print(x.grad)
print(c.grad)
import numpy as np
from dezero import Variable
import dezero.functions as F

x0 = Variable(np.array([1, 2, 3]))
x1 = Variable(np.array([10]))
z = x0 + x1
# y = z.sum()
y = F.sum(z)
print(y)

y.backward()
print(x1.grad)