Exemplo n.º 1
0
# coding: utf-8
import sys, os
sys.path.append(os.pardir)  # 为了导入父目录的文件而进行的设定
import numpy as np
from dataset.mnist import load_mnist
from two_layer_net import TwoLayerNet

# 读入数据
(x_train, t_train), (x_test, t_test) = load_mnist(normalize=True,
                                                  one_hot_label=True)

network = TwoLayerNet(input_size=784, hidden_size=50, output_size=10)

x_batch = x_train[:3]
t_batch = t_train[:3]

grad_numerical = network.numerical_gradient(x_batch, t_batch)
grad_backprop = network.gradient(x_batch, t_batch)

for key in grad_numerical.keys():
    diff = np.average(np.abs(grad_backprop[key] - grad_numerical[key]))
    print(key + ":" + str(diff))
Exemplo n.º 2
0
from datasets import load_data
import matplotlib.pyplot as plt
from two_layer_net import TwoLayerNet
from optimizer import SGD
import numpy as np

max_epoch = 300
batch_size = 30
hidden_size = 10
learning_rate = 1

x, t =load_data()
model = TwoLayerNet(2, hidden_size, 3)
optimizer = SGD(lr=learning_rate)

data_size = len(x)
max_iters = data_size // batch_size
total_loss = 0
loss_count = 0
loss_list = []

for epoch in range(max_epoch):
    idx = np.random.permutation(data_size)
    x = x[idx]
    t = t[idx]

    for iters in range(max_iters):
        batch_x = x[iters*batch_size:(iters+1)*batch_size]
        batch_t = t[iters*batch_size:(iters+1)*batch_size]
        loss = model.forward(batch_x, batch_t)
        model.backward()
Exemplo n.º 3
0
import sys
sys.path.append('..')  # 親ディレクトリのファイルをインポートするための設定
import numpy as np
from common.optimizer import SGD
from dataset import spiral
import matplotlib.pyplot as plt
from two_layer_net import TwoLayerNet  #他のファイルから

# ハイパーパラメータの設定
max_epoch = 300  #過去問何周
batch_size = 30
hidden_size = 10
learning_rate = 1.0

x, t = spiral.load_data()
model = TwoLayerNet(input_size=2, hidden_size=hidden_size, output_size=3)
optimizer = SGD(lr=learning_rate)

# 学習で使用する変数
data_size = len(x)
max_iters = data_size // batch_size
total_loss = 0
loss_count = 0
loss_list = []

for epoch in range(max_epoch):
    # データのシャッフル
    idx = np.random.permutation(data_size)  #permutate=ならべかえる
    x = x[idx]  #この辺ランダム配列の内容をqueueしていく感じ=シャッフル?
    t = t[idx]
    '''
Exemplo n.º 4
0
# coding: utf-8
import sys, os
sys.path.append(os.pardir)  # 부모 디렉터리의 파일을 가져올 수 있도록 설정
import numpy as np
import matplotlib.pyplot as plt
from dataset.mnist import load_mnist
from two_layer_net import TwoLayerNet

# 데이터 읽기, default = False
(x_train, t_train), (x_test, t_test) = load_mnist(normalize=True,
                                                  one_hot_label=True)

network = TwoLayerNet(784, 50, 10)

# hyperparameter
iters_num = 10000  # 반복 횟수
train_size = x_train.shape[0]
batch_size = 100
learning_rate = 0.1

train_loss_list = []
train_acc_list = []
test_acc_list = []

iter_per_epoch = max(train_size / batch_size, 1)

for i in range(iters_num):
    # get mini batch
    batch_mask = np.random.choice(train_size, batch_size)
    x_batch = x_train[batch_mask]
    t_batch = t_train[batch_mask]
Exemplo n.º 5
0
 def __init__(self):
     self.network = TwoLayerNet(input_size=784,
                                hidden_size=50,
                                output_size=10)
     self.train_status = 'not trained'
Exemplo n.º 6
0
# coding: utf-8
import sys, os
sys.path.append(os.pardir)

import numpy as np
from dataset.mnist import load_mnist
from two_layer_net import TwoLayerNet

W1 = np.array( [[-0.0032973, -0.02393722], [-0.02107663, 0.01280219]] )
W2 = np.array( [[-0.01024167], [-0.01514217]] )

network = TwoLayerNet(input_size=2, hidden_size=2, output_size=1)
network.changeParam(W1, W2)

iters_num = 10
batch_size = 4
learning_rate = 0.1

train_loss_list = []
train_acc_list = []
test_acc_list = []

iter_per_epoch = 1

x_batch = np.array([ [0, 0], [0, 1], [1, 0], [1, 1] ] )
t_batch = np.array([ [0], [1], [1], [0]] )

for i in range(iters_num):

    # 勾配
    #grad = network.numerical_gradient(x_batch, t_batch)
Exemplo n.º 7
0
 def setUp(self):
     model = TwoLayerNet(input_size=2, hidden_size=10, output_size=3)
     optimizer = SGD(lr=1.0)
     self.trainer = Trainer(model, optimizer)
     self.x, self.t = load_data()
     self.data_size = len(self.x)
Exemplo n.º 8
0
# from two_layer_net import TwoLayerNet

import numpy as np
from two_layer_net import TwoLayerNet

X = np.random.rand(10, 2)

nn = TwoLayerNet(2, 4, 3)

print(nn.predict(X))

print(nn.params)