Esempio n. 1
0
#!/usr/bin/env python3
# -*- coding: UTF-8 -*-

# Author: Sphantix
# Mail: [email protected]
# created time: 日 24  3 2019 04:57:29 下午 CST

import sys, os
sys.path.append(os.pardir)  # 为了导入父目录而进行的设定
import numpy as np
import matplotlib.pyplot as plt
from mnist.mnist import load_mnist
from deep_cnn_net import DeepConvNet
from common.trainer import Trainer

(x_train, t_train), (x_test, t_test) = load_mnist(flatten=False)

network = DeepConvNet()
trainer = Trainer(network,
                  x_train,
                  t_train,
                  x_test,
                  t_test,
                  epochs=20,
                  mini_batch_size=100,
                  optimizer='Adam',
                  optimizer_param={'lr': 0.001},
                  evaluate_sample_num_per_epoch=1000)
trainer.train()

# 保存参数
#!/usr/bin/env python3
# -*- coding: UTF-8 -*-

# Author: Sphantix
# Mail: [email protected]
# created time: Wed 16 Jan 2019 12:39:51 PM CST

import sys, os
import numpy as np
import matplotlib.pyplot as plt
sys.path.append(os.pardir)
from mnist.mnist import load_mnist
from two_layer_net import TwoLayerNet

(x_train, t_train), (x_test, t_test) = load_mnist(normalize=True,
                                                  one_hot_label=True)

train_loss_list = []
train_acc_list = []
test_acc_list = []

# 超参数
iters_num = 20000
train_size = x_train.shape[0]
batch_size = 100
learning_rate = 0.1

# 平均每个epoch的迭代次数
iter_per_epoch = max(train_size / batch_size, 1)

network = TwoLayerNet(input_size=784, hidden_size=50, output_size=10)
Esempio n. 3
0
def get_testdata():
    (x_train,t_train),(x_test,t_test) = load_mnist(flatten=True,normalize=False,one_hot_label=False)
    return x_test,t_test
import numpy as np

from mnist.mnist import load_mnist
import NeuralNetwork

# データの読み込み
(x_train, t_train), (x_test, t_test) = \
    load_mnist(flatten=True, normalize=True, one_hot_label=True)

# ニューラルネットワーク構築
shape = [784, 100, 100, 100, 100, 100, 10]
NN = NeuralNetwork.NeuralNetwork(shape,
                                 batchNorm=True,
                                 activation='tanh',
                                 loss='CrossEntropy',
                                 dropoutRatio=0.2)

# 学習
NN.learn(x_train,
         t_train,
         epoch=3000,
         learningRate=0.01,
         batchSize=100,
         optimizer='Adam',
         graph=True)

# テスト
NN.test(x_test, t_test)

# 重みファイル出力
NN.output(directory='param')
Esempio n. 5
0
# 学習・推論処理時に利用する。
def get_normed_traindata():