def __init__(self, in_channels, out_channels, kernel_size, stride=1,
                 pad=0, nobias=False):
        """畳込みレイヤ

        Parameters
        ----------
        in_channels : int or None
            入力データのチャンネル数。Noneの場合はforward時のxからin_channelsを取得する
        out_channels : int
            出力データのチャンネル数
        kernel_size : int or (int, int)
            :カーネルサイズ
        stride : int or (int, int)
            ストライド
        pad : int or (int, int)
            パディング
        nobias : bool
            バイアスを使用するかどうか
        """
        super().__init__()
        self.in_channels = in_channels
        self.out_channels = out_channels
        self.kernel_size = kernel_size
        self.stride = stride
        self.pad = pad

        self.W = Parameter(None, name='W')
        if nobias:
            self.b = None
        else:
            b_data = np.zeros(out_channels).astype(np.float32)
            self.b = Parameter(b_data, name='b')
    def __init__(self, in_channels, out_channels, kernel_size, stride=1,
                 pad=0, nobias=False, dtype=np.float32):
        """Two-dimensional convolutional layer.

        Args:
            in_channels (int or None): Number of channels of input arrays. If
            `None`, parameter initialization will be deferred until the first
            forward data pass at which time the size will be determined.
            out_channels (int): Number of channels of output arrays.
            kernel_size (int or (int, int)): Size of filters.
            stride (int or (int, int)): Stride of filter applications.
            pad (int or (int, int)): Spatial padding width for input arrays.
            nobias (bool): If `True`, then this function does not use the bias.
        """
        super().__init__()
        self.in_channels = in_channels
        self.out_channels = out_channels
        self.kernel_size = kernel_size
        self.stride = stride
        self.pad = pad
        self.dtype = dtype

        self.W = Parameter(None, name='W')
        if in_channels is not None:
            self._init_W()

        if nobias:
            self.b = None
        else:
            self.b = Parameter(np.zeros(out_channels, dtype=dtype), name='b')
 def __init__(self, in_size, out_size, nobias=False, dtype=np.float32):
     super().__init__()
     I, O = in_size, out_size
     W_data = np.random.randn(I, O).astype(dtype) * np.sqrt(1 / I)
     self.W = Parameter(W_data, name='W')
     if nobias:
         self.b = None
     else:
         self.b = Parameter(np.zeros(O, dtype=dtype), name='b')
 def __init__(self):
     super().__init__()
     # `.avg_mean` and `.avg_var` are `Parameter` objects, so they will be
     # saved to a file (using `save_weights()`).
     # But they don't need grads, so they're just used as `ndarray`.
     self.avg_mean = Parameter(None, name='avg_mean')
     self.avg_var = Parameter(None, name='avg_var')
     self.gamma = Parameter(None, name='gamma')
     self.beta = Parameter(None, name='beta')
Beispiel #5
0
    def __init__(self, out_size, nobias=False, dtype=np.float32, in_size=None):
        super().__init__()
        self.in_size = in_size
        self.out_size = out_size
        self.dtype = dtype

        self.W = Parameter(None, name='W')
        if self.in_size is not None:
            self.__init_W()
        if nobias:
            self.b = None
        else:
            self.b = Parameter(np.zeros(out_size, dtype=dtype), name='b')
    def __init__(self, in_size, out_size=None, nobias=False):
        super().__init__()

        if out_size is None:
            in_size, out_size = None, in_size
        self.in_size = in_size
        self.out_size = out_size

        self.W = Parameter(None, name='W')
        if nobias:
            self.b = None
        else:
            self.b = Parameter(np.zeros(out_size, dtype=np.float32), name='b')
Beispiel #7
0
    def __init__(self,
                 out_size: int,
                 nobias: bool = False,
                 dtype=np.float32,
                 in_size: Optional[int] = None) -> None:
        super().__init__()
        self.in_size = in_size
        self.out_size = out_size
        self.dtype = dtype
        self.W = Parameter(None, name="W")
        if self.in_size is not None:
            self._init_W()

        if nobias:
            self.b = None
        else:
            self.b = Parameter(np.zeros(out_size, dtype=dtype), name='b')
    def __init__(self,
                 in_channels,
                 out_channels,
                 kernel_size,
                 stride=1,
                 pad=0,
                 nobias=False):
        super().__init__()
        self.kernel_size = _pair(kernel_size)
        self.stride = stride
        self.pad = pad

        I, O = in_channels, out_channels
        KH, KW = self.kernel_size

        W_data = np.random.randn(O, I, KH, KW).astype('f') * np.sqrt(
            1 / I * KH * KW)
        self.W = Parameter(W_data, name='W')
        if nobias:
            self.b = None
        else:
            self.b = Parameter(np.zeros(O).astype('f'), name='b')
Beispiel #9
0
    def __init__(self,
                 out_channels,
                 kernel_size,
                 stride=1,
                 pad=0,
                 nobias=False,
                 dtype=np.float32,
                 in_channels=None):
        super().__init__()
        self.in_channels = in_channels
        self.out_channels = out_channels
        self.kernel_size = kernel_size
        self.stride = stride
        self.pad = pad
        self.dtype = dtype

        self.W = Parameter(None, name='W')
        if in_channels is not None:
            self._init_W()

        if nobias:
            self.b = None
        else:
            self.b = Parameter(np.zeros(out_channels, dtype=dtype), name='b')
 def __init__(self, in_size, out_size):
     super().__init__()
     self.W = Parameter(np.random.randn(in_size, out_size), name='W')
Beispiel #11
0
    y = F.sigmoid(y)
    y = l2(y)
    return y


lr = 0.2
iters = 10000

for i in range(iters):
    y_pred = predict(x)
    loss = F.mean_squared_error(y, y_pred)

    l1.cleargrads()
    l2.cleargrads()
    loss.backward()

    for l in [l1, l2]:
        for p in l.params():
            p.data -= lr * p.grad.data
    if i % 1000 == 0:
        print(loss)

# ここからはテスト
from dezero.core import Parameter
layer = L.Layer()
layer.p1 = Parameter(np.array(1))
print(type(layer.p1))
print(layer._params)
print(layer.p1)
for name in layer._params:
    print(name, layer.__dict__[name])
Beispiel #12
0
import numpy as np
from dezero.core import Parameter
from dezero.layers import TestClass

test = TestClass()
test.p1 = Parameter(np.array(1.0))
test.p2 = Parameter(np.array(2.0))