예제 #1
0
 def __init__(self, hidden_size=100):
     super().__init__()
     self.conv1 = L.Conv2d(30, kernel_size=3, stride=1, pad=1)
     #self.conv2 = L.Conv2d(1, kernel_size=3, stride=1, pad=1)
     self.fc3 = L.Linear(hidden_size)
     #self.fc4 = L.Linear(hidden_size)
     self.fc5 = L.Linear(10)
    def test_linear_forward(self, linear_object):
        l1 = L.Linear(10)
        l2 = L.Linear(1)
        y_pred = predict(linear_object[0], l1, l2)
        loss = F.mean_squared_error(linear_object[1], y_pred)

        assert np.allclose(loss.data, 0.81651785)
예제 #3
0
    def test_neural_regression_layer(self):
        np.random.seed(0)
        x = np.random.rand(100, 1)
        y = np.sin(2 * np.pi * x) + np.random.rand(100, 1)

        model = L.Layer()
        model.l1 = L.Linear(10)
        model.l2 = L.Linear(1)

        def predict(model, x):
            y = model.l1(x)
            y = F.sigmoid(y)
            y = model.l2(y)
            return y

        lr = 0.2
        iters = 10000

        for i in range(iters):
            y_pred = predict(model, x)
            loss = F.mean_squared_error(y, y_pred)

            model.cleargrads()
            loss.backward()

            for p in model.params():
                p.data -= lr * p.grad.data
예제 #4
0
 def __init__(self, hidden_size=100):
     super().__init__()
     self.conv1_1 = L.Conv2d(16, kernel_size=3, stride=1, pad=1)
     self.conv1_2 = L.Conv2d(16, kernel_size=3, stride=1, pad=1)
     self.conv2_1 = L.Conv2d(32, kernel_size=3, stride=1, pad=1)
     self.conv2_2 = L.Conv2d(32, kernel_size=3, stride=1, pad=1)
     self.fc3 = L.Linear(hidden_size)
     self.fc4 = L.Linear(10)
    def __init__(self, in_size, hidden_size, out_size):
        super().__init__()

        I, H, O = in_size, hidden_size, out_size
        with self.init_scope():
            self.x2h = L.Linear(I, H)
            self.h2h = L.Linear(H, H)
            self.h2y = L.Linear(H, O)

        self.h = None
예제 #6
0
 def __init__(self, latent_size):
     super().__init__()
     self.latent_size = latent_size
     self.conv1 = L.Conv2d(32, kernel_size=3, stride=1, pad=1)
     self.conv2 = L.Conv2d(64, kernel_size=3, stride=2, pad=1)
     self.conv3 = L.Conv2d(64, kernel_size=3, stride=1, pad=1)
     self.conv4 = L.Conv2d(64, kernel_size=3, stride=1, pad=1)
     self.linear1 = L.Linear(32)
     self.linear2 = L.Linear(latent_size)
     self.linear3 = L.Linear(latent_size)
 def test_linear_backward(self, linear_object):
     l1 = L.Linear(10)
     l2 = L.Linear(1)
     y_pred = predict(linear_object[0], l1, l2)
     loss = F.mean_squared_error(linear_object[1], y_pred)
     l1.cleargrads()
     l2.cleargrads()
     loss.backward()
     for l in [l1, l2]:
         for p in l.params():
             assert p.grad.data is not None
    def __init__(self, sizes, activation=F.sigmoid):
        super().__init__()
        self.activation = activation
        self.layers = []

        for i, (in_size, out_size) in enumerate(zip(sizes[:-1], sizes[1:])):
            layer = L.Linear(in_size, out_size)
            setattr(self, 'l' + str(i), layer)
            self.layers.append(layer)
 def __init__(self):
     super().__init__()
     self.conv1_1 = L.Conv2d(3, 64, 3, 1, 1)
     self.conv1_2 = L.Conv2d(64, 64, 3, 1, 1)
     self.conv2_1 = L.Conv2d(64, 128, 3, 1, 1)
     self.conv2_2 = L.Conv2d(128, 128, 3, 1, 1)
     self.conv3_1 = L.Conv2d(128, 256, 3, 1, 1)
     self.conv3_2 = L.Conv2d(256, 256, 3, 1, 1)
     self.conv3_3 = L.Conv2d(256, 256, 3, 1, 1)
     self.conv4_1 = L.Conv2d(256, 512, 3, 1, 1)
     self.conv4_2 = L.Conv2d(512, 512, 3, 1, 1)
     self.conv4_3 = L.Conv2d(512, 512, 3, 1, 1)
     self.conv5_1 = L.Conv2d(512, 512, 3, 1, 1)
     self.conv5_2 = L.Conv2d(512, 512, 3, 1, 1)
     self.conv5_3 = L.Conv2d(512, 512, 3, 1, 1)
     self.fc6 = L.Linear(512 * 7 * 7, 4096)
     self.fc7 = L.Linear(4096, 4096)
     self.fc8 = L.Linear(4096, 1000)
예제 #10
0
파일: models.py 프로젝트: khiro112/dezero
    def __init__(self, fc_output_sizes, activation=F.sigmoid):
        super().__init__()
        self.activation = activation
        self.layers = []

        for i, out_size in enumerate(fc_output_sizes):
            layer = L.Linear(out_size)
            setattr(self, 'l' + str(i), layer)
            self.layers.append(layer)
예제 #11
0
 def __init__(self):
     super().__init__()
     self.conv1_1 = L.Conv2d(64,kernel_size=3,stride=1,pad=1)
     self.conv1_2 = L.Conv2d(64,kernel_size=3,stride=1,pad=1)
     self.conv2_1 = L.Conv2d(128,kernel_size=3,stride=1,pad=1)
     self.conv2_2 = L.Conv2d(128,kernel_size=3,stride=1,pad=1)
     self.conv3_1 = L.Conv2d(256,kernel_size=3,stride=1,pad=1)
     self.conv3_2 = L.Conv2d(256,kernel_size=3,stride=1,pad=1)
     self.conv3_3 = L.Conv2d(256,kernel_size=3,stride=1,pad=1)
     self.conv4_1 = L.Conv2d(512,kernel_size=3,stride=1,pad=1)
     self.conv4_2 = L.Conv2d(512,kernel_size=3,stride=1,pad=1)
     self.conv4_3 = L.Conv2d(512,kernel_size=3,stride=1,pad=1)
     self.conv5_1 = L.Conv2d(512,kernel_size=3,stride=1,pad=1)
     self.conv5_2 = L.Conv2d(512,kernel_size=3,stride=1,pad=1)
     self.conv5_3 = L.Conv2d(512,kernel_size=3,stride=1,pad=1)
     self.fc6 = L.Linear(4096)
     self.fc7 = L.Linear(4096)
     self.fc8 = L.Linear(1000)
예제 #12
0
    def __init__(self, fc_output_sizes, activation=F.sigmoid):
        super().__init__()
        self.activation = activation
        self.layers = []

        for i, out_size in enumerate(fc_output_sizes):
            layer = L.Linear(out_size)
            setattr(self, 'l' + str(i),
                    layer)  # layer name과 layer object를 속성으로써 반복저장
            self.layers.append(layer)  # layers 리스트 속성에 layer object 반복저장
예제 #13
0
 def __init__(self, pretrained=False) -> None:
     super().__init__()
     self.conv1_1 = L.Conv2d(64, kernel_size=3, stride=1, pad=1)
     self.conv1_2 = L.Conv2d(64, kernel_size=3, stride=1, pad=1)
     self.conv2_1 = L.Conv2d(128, kernel_size=3, stride=1, pad=1)
     self.conv2_2 = L.Conv2d(128, kernel_size=3, stride=1, pad=1)
     self.conv3_1 = L.Conv2d(256, kernel_size=3, stride=1, pad=1)
     self.conv3_2 = L.Conv2d(256, kernel_size=3, stride=1, pad=1)
     self.conv3_3 = L.Conv2d(256, kernel_size=3, stride=1, pad=1)
     self.conv4_1 = L.Conv2d(512, kernel_size=3, stride=1, pad=1)
     self.conv4_2 = L.Conv2d(512, kernel_size=3, stride=1, pad=1)
     self.conv4_3 = L.Conv2d(512, kernel_size=3, stride=1, pad=1)
     self.conv5_1 = L.Conv2d(512, kernel_size=3, stride=1, pad=1)
     self.conv5_2 = L.Conv2d(512, kernel_size=3, stride=1, pad=1)
     self.conv5_3 = L.Conv2d(512, kernel_size=3, stride=1, pad=1)3
     self.fc6 = L.Linear(4096)
     self.fc7 = L.Linear(4096)
     self.fc8 = L.Linear(1000)
     if pretrained:
         weights_path = utils.get_file(VGG16.WEIGHT_PATH)
         self.load_weights(weights_path)
예제 #14
0
    def __init__(self, pretrained=False):
        super().__init__()
        self.conv1_1 = L.Conv2d(3, 64, 3, 1, 1)
        self.conv1_2 = L.Conv2d(64, 64, 3, 1, 1)
        self.conv2_1 = L.Conv2d(64, 128, 3, 1, 1)
        self.conv2_2 = L.Conv2d(128, 128, 3, 1, 1)
        self.conv3_1 = L.Conv2d(128, 256, 3, 1, 1)
        self.conv3_2 = L.Conv2d(256, 256, 3, 1, 1)
        self.conv3_3 = L.Conv2d(256, 256, 3, 1, 1)
        self.conv4_1 = L.Conv2d(256, 512, 3, 1, 1)
        self.conv4_2 = L.Conv2d(512, 512, 3, 1, 1)
        self.conv4_3 = L.Conv2d(512, 512, 3, 1, 1)
        self.conv5_1 = L.Conv2d(512, 512, 3, 1, 1)
        self.conv5_2 = L.Conv2d(512, 512, 3, 1, 1)
        self.conv5_3 = L.Conv2d(512, 512, 3, 1, 1)
        self.fc6 = L.Linear(4096)
        self.fc7 = L.Linear(4096)
        self.fc8 = L.Linear(1000)

        if pretrained:
            weights_path = utils.get_file(VGG16.WEIGHTS_PATH)
            self.load_weights(weights_path)
예제 #15
0
    def __init__(self, fc_output_sizes, activation=F.sigmoid):
        """

        Parameters
        ----------
        fc_output_sizes : tuple or list
            full connect output size for each layer
        activation : function
            activation function by default sigmoid function is used
        """
        super().__init__()
        self.activation = activation
        self.layers = []

        for i, out_size in enumerate(fc_output_sizes):
            layer = L.Linear(out_size)
            setattr(self, f'l{i}', layer)
            self.layers.append(layer)
예제 #16
0
    def __init__(self, n_layers=152, pretrained=False):
        super().__init__()

        if n_layers == 50:
            block = [3, 4, 6, 3]
        elif n_layers == 101:
            block = [3, 4, 23, 3]
        elif n_layers == 152:
            block = [3, 8, 36, 3]
        else:
            raise ValueError('The n_layers argument should be either 50, 101,'
                             ' or 152, but {} was given.'.format(n_layers))

        self.conv1 = L.Conv2d(3, 64, 7, 2, 3)
        self.bn1 = L.BatchNorm()
        self.res2 = BuildingBlock(block[0], 64, 64, 256, 1)
        self.res3 = BuildingBlock(block[1], 256, 128, 512, 2)
        self.res4 = BuildingBlock(block[2], 512, 256, 1024, 2)
        self.res5 = BuildingBlock(block[3], 1024, 512, 2048, 2)
        self.fc6 = L.Linear(1000)

        if pretrained:
            weights_path = utils.get_file(ResNet.WEIGHTS_PATH.format(n_layers))
            self.load_weights(weights_path)
예제 #17
0
 def __init__(self, in_size, hidden_size, out_size, activation=F.sigmoid):
     super().__init__()
     self.f = activation
     self.l1 = L.Linear(in_size, hidden_size)
     self.l2 = L.Linear(hidden_size, out_size)
예제 #18
0
 def __init__(self, hidden_size, out_size):
     super().__init__()
     self.rnn = L.RNN(hidden_size)
     self.h2y = L.Linear(out_size)
예제 #19
0
import numpy as np
from dezero import Variable
import dezero.functions as F
import dezero.layers as L

# dataset
np.random.seed(0)
x = np.random.rand(100, 1)
y = np.sin(2 * np.pi * x) + np.random.rand(100, 1)

l1 = L.Linear(10) # output size
l2 = L.Linear(1)

def predict(x):
    y = l1(x)
    y = F.sigmoid_simple(y)
    y = l2(y)
    return y

lr = 0.2
iters = 10000

for i in range(iters):
    y_pred = predict(x)
    loss = F.mean_squared_error(y, y_pred)
    l1.clear_grads()
    l2.clear_grads()
    loss.backward()

    for l in [l1, l2]:
        for p in l.params():
예제 #20
0
 def __init__(self, hidden_size, out_size):
     super().__init__()
     self.rnn = L.LSTM(hidden_size)
     self.fc = L.Linear(out_size)
예제 #21
0
# WIP

import numpy as np
import matplotlib.pyplot as plt
import dezero
import dezero.functions as F
import dezero.layers as L
from dezero import DataLoader
from dezero.models import Sequential
from dezero.optimizers import Adam

C, H, W = 512, 3, 3
G = Sequential(L.Linear(C * H * W), F.Reshape((-1, C, H, W)), L.BatchNorm(),
               F.ReLU(), L.Deconv2d(C // 2, kernel_size=2, stride=2, pad=1),
               L.BatchNorm(), F.ReLU(),
               L.Deconv2d(C // 4,
                          kernel_size=2, stride=2, pad=1), L.BatchNorm(),
               F.ReLU(), L.Deconv2d(C // 8, kernel_size=2, stride=2, pad=1),
               L.BatchNorm(), F.ReLU(),
               L.Deconv2d(1, kernel_size=3, stride=3, pad=1), F.Sigmoid())

D = Sequential(L.Conv2d(64, kernel_size=3, stride=3, pad=1), F.LeakyReLU(0.1),
               L.Conv2d(128, kernel_size=2, stride=2, pad=1), L.BatchNorm(),
               F.LeakyReLU(0.1), L.Conv2d(256, kernel_size=2, stride=2, pad=1),
               L.BatchNorm(), F.LeakyReLU(0.1),
               L.Conv2d(512, kernel_size=2, stride=2, pad=1), L.BatchNorm(),
               F.LeakyReLU(0.1), F.flatten, L.Linear(1))

D.layers[0].W.name = 'conv1_W'
D.layers[0].b.name = 'conv1_b'
예제 #22
0
파일: gan.py 프로젝트: EriHata/my_dezero
import matplotlib.pyplot as plt
import dezero
import dezero.functions as F
import dezero.layers as L
from dezero import DataLoader
from dezero.models import Sequential
from dezero.optimizers import Adam

use_gpu = dezero.cuda.gpu_enable
max_epoch = 5
batch_size = 128
hidden_size = 62

fc_channel, fc_height, fc_width = 128, 7, 7

gen = Sequential(L.Linear(1024), L.BatchNorm(), F.relu,
                 L.Linear(fc_channel * fc_height * fc_width), L.BatchNorm(),
                 F.relu,
                 lambda x: F.reshape(x, (-1, fc_channel, fc_height, fc_width)),
                 L.Deconv2d(fc_channel // 2, kernel_size=4, stride=2, pad=1),
                 L.BatchNorm(), F.relu,
                 L.Deconv2d(1, kernel_size=4, stride=2, pad=1), F.sigmoid)

dis = Sequential(L.Conv2d(64, kernel_size=4, stride=2, pad=1), F.leaky_relu,
                 L.Conv2d(128, kernel_size=4, stride=2, pad=1),
                 L.BatchNorm(), F.leaky_relu, F.flatten, L.Linear(1024),
                 L.BatchNorm(), F.leaky_relu, L.Linear(1), F.sigmoid)


def init_weight(dis, gen, hidden_size):
    # Input dummy data to initialize weights
예제 #23
0
 def __init__(self, hidden_size, out_size):
     super().__init__()
     self.l1 = L.Linear(hidden_size)
     self.l2 = L.Linear(out_size)
예제 #24
0
파일: step44.py 프로젝트: tokuma09/DeZero
    def __init__(self, H, O, seed=0):

        self.l1 = L.Linear(H)
        self.l2 = L.Linear(O)
 def __init__(self):
     super().__init__()
     self.l1 = L.Linear(100)  # hidden_size
     self.l2 = L.Linear(4)  # action_size
import matplotlib.pyplot as plt
import numpy as np

import dezero.functions as F
import dezero.layers as L
from dezero import Variable

np.random.rand(0)
x = np.random.rand(100, 1)
y = np.sin(2 * np.pi * x) + np.random.rand(100, 1)
x, y = Variable(x), Variable(y)

I, H, O = 1, 10, 1
l1 = L.Linear(H)
l2 = L.Linear(O)


def predict(x):
    y = l1.forward(x)
    y = F.sigmoid(y)
    y = l2.forward(y)
    return y


lr = 0.2
iters = 10000

for i in range(iters):
    y_pred = predict(x)
    loss = F.mean_squared_error(y, y_pred)
 def __init__(self, action_size):
     super().__init__()
     self.l1 = L.Linear(128)
     self.l2 = L.Linear(128)
     self.l3 = L.Linear(action_size)
예제 #28
0
 def __init__(self):
     super().__init__()
     self.to_shape = (64, 14, 14)  # (C, H, W)
     self.linear = L.Linear(np.prod(self.to_shape))
     self.deconv = L.Deconv2d(32, kernel_size=4, stride=2, pad=1)
     self.conv = L.Conv2d(1, kernel_size=3, stride=1, pad=1)
예제 #29
0
import numpy as np
import matplotlib.pyplot as plt

from dezero import Variable
import dezero.functions as F
import dezero.layers as L

# step43の改良版

# データセット
np.random.seed(0)
x = np.random.rand(100, 1)
y = np.sin(2 * np.pi * x) + np.random.rand(100, 1)

# 重みの初期化
l1 = L.Linear(10) # 出力サイズを指定
l2 = L.Linear(1)

# ニューラルネットワークの推論
def predict(x):
  y = l1(x)
  y = F.sigmoid(y)
  y = l2(y)
  return y

lr = 0.2
iters = 10000

# ニューラルネットワークの学習
for i in range(iters):
  y_pred = predict(x)
예제 #30
0
    sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
    import numpy as np
    import matplotlib.pyplot as plt
    from dezero import Variable
    from dezero import setup_variable
    from dezero.utils import plot_dot_graph
    import dezero.functions as F
    import dezero.layers as L

setup_variable()
if __name__ == '__main__':
    np.random.seed(0)
    x = np.random.rand(100, 1)
    y = np.sin(2 * np.pi * x) + np.random.rand(100, 1)

    l1 = L.Linear(10)
    l2 = L.Linear(1)

    def predict(x):
        y = l1(x)
        y = F.sigmoid(y)
        y = l2(y)
        return y

    lr = 0.2
    iters = 10001

    for i in range(iters):
        y_pred = predict(x)
        loss = F.mean_squared_error(y, y_pred)