Exemplo n.º 1
0
# coding: utf-8
import sys, os
sys.path.append(os.pardir)  # 親ディレクトリのファイルをインポートするための設定
import numpy as np
import matplotlib.pyplot as plt
from DeepLearning.dataset.mnist import load_mnist
from DeepLearning.ch07.simple_convnet import SimpleConvNet
from DeepLearning.common.trainer import Trainer

# データの読み込み
(x_train, t_train), (x_test, t_test) = load_mnist(flatten=False)

# 処理に時間のかかる場合はデータを削減
#x_train, t_train = x_train[:5000], t_train[:5000]
#x_test, t_test = x_test[:1000], t_test[:1000]

max_epochs = 20

network = SimpleConvNet(input_dim=(1, 28, 28),
                        conv_param={
                            'filter_num': 30,
                            'filter_size': 5,
                            'pad': 0,
                            'stride': 1
                        },
                        hidden_size=100,
                        output_size=10,
                        weight_init_std=0.01)

trainer = Trainer(network,
                  x_train,
Exemplo n.º 2
0
# coding: utf-8
import os
import sys
sys.path.append(os.pardir)  # 親ディレクトリのファイルをインポートするための設定

import numpy as np
import matplotlib.pyplot as plt
from DeepLearning.dataset.mnist import load_mnist
from DeepLearning.common.multi_layer_net_extend import MultiLayerNetExtend
from DeepLearning.common.trainer import Trainer

(x_train, t_train), (x_test, t_test) = load_mnist(normalize=True)

# 過学習を再現するために、学習データを削減
x_train = x_train[:300]
t_train = t_train[:300]

# Dropuoutの有無、割り合いの設定 ========================
use_dropout = True  # Dropoutなしのときの場合はFalseに
dropout_ratio = 0.2
# ====================================================

network = MultiLayerNetExtend(input_size=784,
                              hidden_size_list=[100, 100, 100, 100, 100, 100],
                              output_size=10,
                              use_dropout=use_dropout,
                              dropout_ration=dropout_ratio)
trainer = Trainer(network,
                  x_train,
                  t_train,
                  x_test,
def get_data():
    (x_train, t_train), (x_test, t_test) = load_mnist(normalize=True, flatten=True, one_hot_label=False)
    return x_test, t_test
Exemplo n.º 4
0
# coding: utf-8
import sys, os
sys.path.append(os.pardir)  # 親ディレクトリのファイルをインポートするための設定

import numpy as np
from DeepLearning.dataset.mnist import load_mnist
from DeepLearning.ch05.two_layer_net import TwoLayerNet

# データの読み込み
(x_train, t_train), (x_test, t_test) = load_mnist(normalize=True, one_hot_label=True)

network = TwoLayerNet(input_size=784, hidden_size=50, output_size=10)

x_batch = x_train[:3]
t_batch = t_train[:3]

grad_numerical = network.numerical_gradient(x_batch, t_batch)
grad_backprop = network.gradient(x_batch, t_batch)

for key in grad_numerical.keys():
    diff = np.average( np.abs(grad_backprop[key] - grad_numerical[key]) )
    print(key + ":" + str(diff))
Exemplo n.º 5
0
# coding: utf-8
import sys, os
sys.path.append(os.pardir)  # 親ディレクトリのファイルをインポートするための設定
import numpy as np
from DeepLearning.dataset.mnist import load_mnist
from PIL import Image


def img_show(img):
    pil_img = Image.fromarray(np.uint8(img))
    pil_img.show()


(x_train, t_train), (x_test, t_test) = load_mnist(flatten=True,
                                                  normalize=False)

img = x_train[0]
label = t_train[0]
print('label = {0}'.format(label))  # 5

print('t_train[0].shape = {0}'.format(img.shape))  # (784,)
img = img.reshape(28, 28)  # 形状を元の画像サイズに変形
print('t_train[0].reshape(28, 28) = {0}'.format(img.shape))  # (28, 28)

img_show(img)

print('t_tain[20].label = {0}'.format(t_train[20]))
img = x_train[20]
img = img.reshape(28, 28)
img_show(img)