Exemplo n.º 1
0
def main():
    (x_train, t_train), (x_test, t_test) = load_mnist(flatten=False)

    network = DeepConvNet()
    trainer = Trainer(network, x_train, t_train, x_test, t_test,
                      epochs=20, mini_batch_size=100,
                      optimizer='Adam', optimizer_param={'lr':0.001},
                      evaluate_sample_num_per_epoch=1000)
    trainer.train()

    # パラメータの保存
    network.save_params("deep_convnet_params.pkl")
    print("Saved Network Parameters!")
def gradient_check():
    (x_train, t_train), (_, _) = \
        load_mnist(normalize=True, one_hot_label=True)

    network = TwoLayerNet(input_size=784, hidden_size=50, output_size=10)

    batch_size = 3
    x_batch = x_train[:batch_size]
    t_batch = t_train[:batch_size]
    grad_numerical = network.numerical_gradient(x_batch, t_batch)
    grad_backprop = network.gradient(x_batch, t_batch)

    for key in grad_numerical.keys():
        diff = np.average(np.abs(grad_backprop[key] - grad_numerical[key]))
        print(key + ": " + str(diff))
def train():
    (x_train, t_train), (x_test, t_test) = load_mnist(normalize=True, one_hot_label=True)

    network = TwoLayerNet(input_size=784, hidden_size=50, output_size=10)

    iters_num = 20000
    train_size = x_train.shape[0]
    batch_size = 100

    train_loss_list = []
    train_acc_list = []
    test_acc_list = []
    iter_per_epoch = max(train_size / batch_size, 1)
    epoch_list = []

    for i in range(iters_num):
        batch_mask = np.random.choice(train_size, batch_size)
        x_batch = x_train[batch_mask]
        t_batch = t_train[batch_mask]

        optimizer = Adam()
        network.train(x_batch, t_batch, optimizer)

        loss = network.loss(x_batch, t_batch)
        train_loss_list.append(loss)

        if i % iter_per_epoch == 0:
            epoch_list.append(i/iter_per_epoch)
            train_acc = network.accuracy(x_train, t_train)
            test_acc = network.accuracy(x_test, t_test)
            train_acc_list.append(train_acc)
            test_acc_list.append(test_acc)
            print("train accuracy : " + str(train_acc) + ", test accuracy : " + str(test_acc))
    print("final loss : " + str(loss))
    draw_acc_graph(epoch_list, train_acc_list, test_acc_list)
    draw_loss_graph(train_loss_list)
Exemplo n.º 4
0
def get_minst_data():
    (x_train, t_train), (x_test, t_test) = load_mnist(normalize=True)
    return x_train, t_train, x_test, t_test
Exemplo n.º 5
0
def get_data():
    (x_train, t_train), (x_test, t_test) = load_mnist(normalize=True, flatten=True, one_hot_label=False)
    return x_test, t_test
Exemplo n.º 6
0

if __name__ == '__main__':
    np.random.seed(2020)
    # 1차원 softmax 테스트
    a = np.random.randint(10, size=5)
    print(a)
    print(softmax(a))

    # 2차원 softmax 테스트
    A = np.random.randint(10, size=(2, 3))
    print(A)
    print(softmax(A))

    # (Train/Test) 데이터 세트 로드.
    (X_train, y_train), (X_test, y_test) = load_mnist()
    print('X_test.shape :', X_test.shape)
    print('y_test.shape :', y_test.shape)
    # print(X_test[0])
    # print(y_test[0])

    # 신경망 생성 (W1, b1, ...)
    with open('sample_weight.pkl', 'rb') as f:
        network = pickle.load(f)
    print('network:', network.keys())
    print('W1:', network['W1'].shape)
    print('W2:', network['W2'].shape)
    print('W3:', network['W3'].shape)

    batch_size = 100
    y_pred = mini_batch(network, X_test, batch_size)
Exemplo n.º 7
0
from PIL import Image
import numpy as np
import os
import sys

os.chdir(
    '/Users/edamame88/Projects/study/Deep01/deep-learning-from-scratch/ch03')

sys.path.append(os.pardir)
from dataset.mnist import load_mnist


def img_show(img):
    pil_img = Image.fromarray(np.uint8(img))
    pil_img.show()


(x_train, t_train), (x_test, t_test) = \
    load_mnist(flatten=True, normalize=False)

img = x_train[0]
label = t_train[0]

print(label)
img = img.reshape(28, 28)
print(img.shape)

img_show(img)
Exemplo n.º 8
0
 def getData(self):
     (x_train, y_train), (x_test, y_test) = load_mnist(False, True)
     return x_test, y_test
Exemplo n.º 9
0
import sys, os
sys.path.append(os.pardir)
import numpy as np
from dataset.mnist import load_mnist
# 画像の表示モジュール
from PIL import Image


def img_show(img):
    pil_img = Image.fromarray(np.uint8(img))
    pil_img.show()


(x_train, t_train), (x_test, t_test) = load_mnist(flatten=True,
                                                  normalize=False)

img = x_train[0]
label = t_train[0]
print(label)

# (784, )
print(img.shape)
# Reshape from 784 to 1 * 28 * 28.
img = img.reshape(28, 28)
# (28, 28)
print(img.shape)

img_show(img)
Exemplo n.º 10
0
def get_data():
    (x_train, t_train), (x_test, t_test) = load_mnist(normalize=True, flatten=True, one_hot_label=False)
    return x_test, t_test
Exemplo n.º 11
0
def SGD_Two_layers(hidden=50):
    from TwoLayerNet import TwoLayerNet
    from dataset.mnist import load_mnist
    (x_train, t_train), (x_test, t_test) = load_mnist(normalize=True, one_hot_label=True);

    network = TwoLayerNet(input_size=784, hidden_size=hidden, output_size=10);
    optimizer = SGD(0.1);

    # hyper-parameter
    iters_num = 10000;
    train_size = x_train.shape[0];
    batch_size = 100;

    # data memory during training
    train_loss_list = [];
    train_acc_list = [];
    test_acc_list = [];

    # epoch
    iter_per_epoch = max(train_size / batch_size, 1)

    for i in range(iters_num):
        batch_mask = np.random.choice(train_size, batch_size);
        x_batch = x_train[batch_mask];
        t_batch = t_train[batch_mask];

        # grad = network.numerical_gradient(x_batch, t_batch);
        grad = network.gradient(x_batch, t_batch)
        params = network.params;
        # update parameters by using SGD
        optimizer.update(params, grad);

        # record the train procedure
        loss = network.loss(x_batch, t_batch);
        train_loss_list.append(loss);

        if i % iter_per_epoch == 0:
            train_acc = network.accuracy(x_train, t_train)
            test_acc = network.accuracy(x_test, t_test)
            train_acc_list.append(train_acc)
            test_acc_list.append(test_acc)
            print("\n===========Iter_per_epoch===================")
            print("Iteration : {}".format(i));
            print("train accuracy : {}".format(train_acc));
            print("test accuracy : {}".format(test_acc));
            print("============================================\n");

    # Final Report
    train_acc = network.accuracy(x_train, t_train)
    test_acc = network.accuracy(x_test, t_test)
    print("\n===========Final Result===================");
    print("The number of neurons in the hidden layer : {}".format(hidden))
    print("train accuracy : {}".format(train_acc));
    print("test accuracy : {}".format(test_acc));
    print("============================================\n");

    # plot the graph
    import matplotlib.pyplot as plt
    f, (ax1, ax2) = plt.subplots(1, 2, figsize=(10, 20));

    x = np.arange(len(train_acc_list));
    ax1.plot(x, train_acc_list, label='train acc')
    ax1.plot(x, test_acc_list, label='test acc', linestyle='--')
    ax1.set_xlabel("epochs");
    ax1.set_ylabel("accuracy");
    ax1.set_ylim(0, 1.0);
    ax1.legend(loc='lower right');
    ax1.set_title("Accuracy");

    num_itr = np.arange(len(train_loss_list));
    ax2.plot(num_itr, train_loss_list);
    ax2.set_xlabel("iteration");
    ax2.set_ylabel("loss");
    ax2.set_ylim(0, 3.0);
    ax2.set_title("Loss per iteration");

    plt.show()
Exemplo n.º 12
0
import Ooptimizer as opt
import numpy as np
from GraphLayer import *
from collections import OrderedDict
from Common import *
from TwoLayerNet_BackProp import *

from dataset.mnist import load_mnist

np.random.seed(1)
(x_train, y_train), (x_test, y_test) = load_mnist(True, True, True)

epoch = 200

x_test = x_test[:100]
y_test = y_test[:100]

inputSize = 784
hiddenSize = 50
outputSize = 10

network = Twolayernet_BackProp(inputSize, hiddenSize, outputSize)

predictList = []
Losslist = []
AccList = []

for i in range(epoch):
    res = network.Predict(x_test)
    grad = network.BGradient(x_test, y_test)
Exemplo n.º 13
0
# - 손으로 직접 쓴 숫자(필기체 숫자)들로 이루어진 데이터 셋
# - 0 ~ 9까지의 숫자 이미지로 구성되며, 60,000개의 트레이닝 데이터와
#   10,000개의 테스트 데이터로 이루어져 있음.
# - 28x28 size

import numpy as np
from dataset.mnist import load_mnist
from PIL import Image  # Image 패키지는 Anaconde Prompt를 통해서 받아야 한다.


def img_show(img):
    pil_img = Image.fromarray(np.uint8(img))  # uint8  : java에서 byte
    pil_img.show()


# (이미지, 레이블)
(x_train, t_train),(x_test, t_test) = \
    load_mnist(flatten=True, normalize=False, one_hot_label=False)

img = x_train[100]
print(img.shape)  # (784,)

for i in range(10):
    img = x_train[i]
    label = t_train[i]
    print(label)
    print(img.shape)

img.reshape(28, 28)  # 형상을 원래 이미지의 크기로 변형
img_show(img)
Exemplo n.º 14
0
# coding: utf-8
import os
import sys

sys.path.append(os.pardir)
import numpy as np
import pandas as pd
from dataset.mnist import load_mnist
import progressbar
import bayesian_tools as tools

# Load mnist Data
(xTrain, tTrain), (xTest, tTest) = load_mnist(normalize=True)

# Read Previously Trained Data for gaussian process regression
train_history = pd.read_csv("Training Results.csv")
x_gaussian = train_history.as_matrix(
    columns=["dropout_ratio", "weight_decay", "learning_rate"])
y_gaussian = train_history.as_matrix(columns=["test_acc"])

# Start Optimization
iteration_count = 30
axes = np.linspace(0, 0.2, num=15)
#todo : There needs to be more effective way to explore through grids
search_grid = tools.create_3d_grid(axes)
training_history = []

with progressbar.ProgressBar(max_value=iteration_count) as bar:
    for i in np.arange(iteration_count):
        # Update best accuracy for each iteration
        best_accuracy = np.argmax(y_gaussian)
def MultiLayerNet_MNIST_example():
    from dataset.mnist import load_mnist
    import Optimizer as op

    (x_train, t_train), (x_test, t_test) = load_mnist(normalize=True,
                                                      one_hot_label=True)

    network = MultiLayerNet(input_size=784,
                            hidden_size_list=[150, 100],
                            output_size=10,
                            activation="relu",
                            weight_init_std='relu',
                            weight_decay_lambda=0,
                            use_dropout=True,
                            dropout_ratio=0.5,
                            use_batchNorm=True)

    # hyper-parameter
    iters_num = 10000

    train_size = x_train.shape[0]
    batch_size = 100
    learning_rate = 0.1

    optimizer = op.SGD(learning_rate)

    # data memory during training
    train_loss_list = []
    train_acc_list = []
    test_acc_list = []

    # epoch
    iter_per_epoch = max(train_size / batch_size, 1)

    network.showNetwork()

    for i in range(iters_num):

        batch_mask = np.random.choice(train_size, batch_size)
        x_batch = x_train[batch_mask]
        t_batch = t_train[batch_mask]

        # grad = network.numerical_gradient(x_batch, t_batch);
        grad = network.gradient(x_batch, t_batch)
        params = network.params

        # update parameters by SGD
        optimizer.update(params, grad)

        # for key in network.params.keys():
        #     network.params[key] -= learning_rate * grad[key];

        # record the train procedure
        loss = network.loss(x_batch, t_batch)
        train_loss_list.append(loss)

        if i % iter_per_epoch == 0:
            train_acc = network.accuracy(x_train, t_train)
            test_acc = network.accuracy(x_test, t_test)
            train_acc_list.append(train_acc)
            test_acc_list.append(test_acc)
            print("\n===========Iter_per_epoch===================")
            print("Iteration : {}".format(i))
            print("train accuracy : {}".format(train_acc))
            print("test accuracy : {}".format(test_acc))
            print("============================================\n")

    # Final Report
    train_acc = network.accuracy(x_train, t_train)
    test_acc = network.accuracy(x_test, t_test)
    print("\n===========Final Result===================")
    print("The number of neurons in the hidden layer : {}".format(
        network.hidden_size_list))
    print("train accuracy : {}".format(train_acc))
    print("test accuracy : {}".format(test_acc))
    print("============================================\n")

    # plot the graph
    import matplotlib.pyplot as plt
    f, (ax1, ax2) = plt.subplots(1, 2, figsize=(10, 20))

    x = np.arange(len(train_acc_list))
    ax1.plot(x, train_acc_list, label='train acc')
    ax1.plot(x, test_acc_list, label='test acc', linestyle='--')
    ax1.set_xlabel("epochs")
    ax1.set_ylabel("accuracy")
    ax1.set_ylim(0, 1.0)
    ax1.legend(loc='lower right')
    ax1.set_title("Accuracy")

    num_itr = np.arange(len(train_loss_list))
    ax2.plot(num_itr, train_loss_list)
    ax2.set_xlabel("iteration")
    ax2.set_ylabel("loss")
    ax2.set_ylim(0, 3.0)
    ax2.set_title("Loss per iteration")

    plt.show()

    print("Save the network information....................")
    network.save_params("MultiLayer_params_SGD_01.pkl")
    print("saved parameters!!!!")
Exemplo n.º 16
0
        # self.x_col.dot(self.W_col)

        out = out.reshape(n, oh, ow, -1).transpose(0, 3, 1, 2)
        return out


if __name__ == '__main__':
    # Convolution을 생성
    W = np.zeros(
        (1, 1, 4, 4),
        dtype=np.uint8)  # (filter 개수, color 수, fh, fw)  # dtype: 8bit 부호없는 정수
    W[0, 0, 1, :] = 1
    b = np.zeros(1)
    conv = Convolution(W, b)  # Convolution class의 생성자 호출
    # MNIST 데이터를 forward
    (x_train, y_train), (x_test, y_test) = load_mnist(normalize=False,
                                                      flatten=False)
    # 다운로드 받은 이미지 파일을 ndarray로 변환해서 forward
    input = x_train[0:1]  # 4차원 데이터를 뽑아내려면 slicing을 사용!
    print('input:', input.shape)

    out = conv.forward(input)
    print('out:', out.shape)
    img = out.squeeze()  # 차원의 원소가 1개이면 해당 차원을 지운다
    print('img:', img.shape)
    plt.imshow(img, cmap='gray')
    plt.show()

    img = Image.open('pengsoo.jpg')

    img_pixel = np.array(img)
Exemplo n.º 17
0
import numpy as np
import matplotlib.pyplot as plt

from ch05.e10_twolayer import TwoLayerNetwork
from ch06.e05_Adam import Adam
from dataset.mnist import load_mnist

if __name__ == '__main__':
    (X_train, Y_train), (X_test, Y_test) = load_mnist()

    neural_net = TwoLayerNetwork(input_size=784,
                                 hidden_size=32,
                                 output_size=10)
    train_losses = []

    iterations = 2_000
    batch_size = 128
    train_size = X_train.shape[0]

    np.random.seed(111)
    adam = Adam()
    for i in range(iterations):
        batch_mask = np.random.choice(train_size, batch_size)
        X_batch = X_train[batch_mask]
        Y_batch = Y_train[batch_mask]

        gradients = neural_net.gradient(X_batch, Y_batch)
        adam.update(neural_net.params, gradients)

        loss = neural_net.loss(X_batch, Y_batch)
        train_losses.append(loss)
Exemplo n.º 18
0
# coding: utf-8
import sys, os
sys.path.append(os.pardir)  # 親ディレクトリのファイルをインポートするための設定
import numpy as np
import matplotlib.pyplot as plt
from dataset.mnist import load_mnist
from two_layer_net import TwoLayerNet

# データの読み込み
(x_train, t_train), (x_test, t_test) = load_mnist(normalize=True, one_hot_label=True)

network = TwoLayerNet(input_size=784, hidden_size=50, output_size=10)

iters_num = 10000  # 繰り返しの回数を適宜設定する
train_size = x_train.shape[0]
batch_size = 100
learning_rate = 0.1

train_loss_list = []
train_acc_list = []
test_acc_list = []

iter_per_epoch = max(train_size / batch_size, 1)

for i in range(iters_num):
    batch_mask = np.random.choice(train_size, batch_size)
    x_batch = x_train[batch_mask]
    t_batch = t_train[batch_mask]
    
    # 勾配の計算
    grad = network.numerical_gradient(x_batch, t_batch)
Exemplo n.º 19
0

def hand_craft(old_X):
    new_X = list()  # 변환한 data를 담을 list를 초기화
    for n in range(old_X.shape[0]):  # data의 개수 만큼 반복한다.
        temp = list()  # 변환한 data를 담을 list를 초기화
        for i in range(0, old_X[n].shape[0], 4):  # feature 을 4개씩 나눠준다
            sum = 0  # 값들의 합을 구한다
            for j in range(4):
                sum += old_X[n][i + j]  # 실수 형태의 data를 실수로 받기위해 그대로 더한다
            temp.append(sum)
        new_X.append(temp)
    return np.array(new_X)  # list 를 numpy 형식으로 바꿔준다

(x_train, t_train), (x_test, t_test) = \
    load_mnist(normalize=True, flatten=True)

label_name = np.array(['0', '1', '2', '3', '4', '5', '6', '7', '8', '9'])

hc_x_train = hand_craft(x_train)  # 기존의 data 를 넣어 feature 이 변화된 형태의 data 를 받는다
hc_x_test = hand_craft(x_test)  # 기존의 data 를 넣어 feature 이 변화된 형태의 data 를 받는다

K_list = [3]  # feature 가 많기 때문에 시간이 오래걸려 K의 값을 3만 시뮬레이션 해본다 넣는다.
size = 50
sample = np.random.randint(0, t_test.shape[0], size)

for K in K_list:  # list 에 있는 K값 모두에 대해서 test 를 진행한다

    knn_iris = KNN(
        K, hc_x_train, t_train,
        label_name)  # K, train 값들과 target 의 이름을 넘겨 KNN class 를 생성한다.
def get_data():
    # Return MNIST data.
    # (訓練画像, 訓練ラベル), (テスト画像, テストラベル)
    # normarize=Trueで正規化(0.0~1.0の間に値を収めてくれる)
    (x_train, t_train), (x_test, t_test) = load_mnist(flatten=True, normalize=True, one_hot_label=False)
    return x_test, t_test
Exemplo n.º 21
0
import sys, os

sys.path.append(os.pardir)
import numpy as np
from dataset.mnist import load_mnist
from PIL import Image


def img_show(img):
    pil_img = Image.fromarray(np.uint(img))
    pil_img.show()


(x_train, t_train), (x_test,
                     t_test) = load_mnist(flatten=True,
                                          normalize=False)  #1차원 배열로 저장, 정규화X

print(x_train.shape)  # 학습데이터
print(t_train.shape)  # 학습데이터 레이블(정답)
print(x_test.shape)  # 테스트데이터
print(t_test.shape)  # 테스트데이터 레이블(정답)

img = x_train[0]  #0번째 훈련 이미지를 가져옴
label = t_train[0]  #0번째 훈련 레이블을 가져옴
print(label)  # 5

print(img.shape)  #(784, ) - 1차원 배열 형태
img = img.reshape(28, 28)  #원래 이미지의 모양으로 변형
print(img.shape)  #(28, 28)

img_show(img)
Exemplo n.º 22
0
# coding: utf-8
import sys, os
sys.path.append(os.pardir)
import numpy as np
import matplotlib.pyplot as plt
from dataset.mnist import load_mnist
from common.multi_layer_net import MultiLayerNet
from common.optimizer import SGD, Adam

dataset_dir = os.path.dirname(os.path.abspath('__file__'))
save_file = dataset_dir + "/mnist.pkl"
(x_train, t_train), (x_test, t_test) = load_mnist(dataset_dir,
                                                  save_file,
                                                  normalize=True)

# 적은 데이터만 사용
x_train = x_train[:1000]
t_train = t_train[:1000]

max_epochs = 20
train_size = x_train.shape[0]
batch_size = 100
learning_rate = 0.01


def __train(weight_init_std):
    bn_network = MultiLayerNet(input_size=784,
                               hidden_size_list=[100, 100, 100, 100, 100],
                               output_size=10,
                               weight_init_std=weight_init_std,
                               use_batchnorm=True)
Exemplo n.º 23
0
def get_data():
    (x_train, t_train), (x_test, t_test) = mnist.load_mnist(normalize=True)
    return x_test, t_test
Exemplo n.º 24
0
Created on Mon Feb 25 15:04:43 2019

@author: 
    

"""

import sys, os

sys.path.append(os.pardir)
from dataset.mnist import load_mnist
import numpy as np
from common.functions import softmax, cross_entropy_error

(x_train,y_train),(x_test,y_test) = \
     load_mnist(normalize=True, one_hot_label=True)


class simpleNet:
    def __init__(self):
        self.w = np.random.randn(2, 3)

    def predict(self, x):
        return np.dot(x, self.w)

    def loss(self, x, t):
        z = self.predict(self.w, x)
        y = softmax(z)
        loss = cross_entropy_error(y, t)

        return loss
Exemplo n.º 25
0
if __name__ == '__main__':
    np.random.seed(2020)
    # 1차원 softmax 테스트
    a = np.random.randint(10, size=5)
    print(a)
    print(softmax(a))

    # 2차원 softmax 테스트
    A = np.random.randint(10, size=(2, 3))
    print(A)
    print(softmax(A))

    # (Train/Test) 데이터 세트 로드.
    (X_train, y_train), (X_test, y_test) = load_mnist(normalize=True,
                                                      flatten=True,
                                                      one_hot_label=False)
    print('X_test.shape:', X_test.shape)
    print('y_test.shape:', y_test.shape)

    # 신경망 생성 (W1, b1, ...)
    with open('sample_weight.pkl', 'rb') as file:
        network = pickle.load(file)
        W1, W2, W3 = network['W1'], network['W2'], network['W3']
        b1, b2, b3 = network['b1'], network['b2'], network['b3']
    print('network:', network.keys())
    print('W1:', network['W1'].shape)
    print('W2:', network['W2'].shape)
    print('W3:', network['W3'].shape)

    batch_size = 77
Exemplo n.º 26
0
# coding: utf-8
import os
import sys
file_dir = os.path.dirname(__file__)  # abs file dir
file_pdir = os.path.join(os.path.split(file_dir)[0])  # abs pre file dir
print(file_pdir)
sys.path.append(file_pdir)  # 親ディレクトリのファイルをインポートするための設定
import numpy as np
import matplotlib.pyplot as plt
from dataset.mnist import load_mnist
from common.multi_layer_net_extend import MultiLayerNetExtend
from common.trainer import Trainer

(x_train, t_train), (x_test, t_test) = load_mnist(normalize=True)

# 過学習を再現するために、学習データを削減
x_train = x_train[:300]
t_train = t_train[:300]

# Dropuoutの有無、割り合いの設定 ========================
use_dropout = True  # Dropoutなしのときの場合はFalseに
dropout_ratio = 0.2
# ====================================================

network = MultiLayerNetExtend(input_size=784,
                              hidden_size_list=[100, 100, 100, 100, 100, 100],
                              output_size=10,
                              use_dropout=use_dropout,
                              dropout_ration=dropout_ratio)
trainer = Trainer(network,
                  x_train,
Exemplo n.º 27
0
def get_data():
    (x_train, t_train), (x_test, t_test) = \
        load_mnist(normalize=True,flatten=True)

    return (x_test, t_test)
Exemplo n.º 28
0
import sys, os
sys.path.append("/Users/aualrxse/git/ME/Deep")  # 親ディレクトリのファイルをインポートするための設定
import numpy as np
from dataset.mnist import load_mnist

(x_train, t_train), (x_test, t_test) = load_mnist(normalize=True,
                                                  one_hot_label=True)

print(x_train.shape)
print(t_train.shape)

train_size = x_train.shape[0]
batch_size = 10
batch_mask = np.random.choice(train_size, batch_size)
x_batch = x_train[batch_mask]
t_batch = t_train[batch_mask]
Exemplo n.º 29
0
import tensorflow as tf
from dataset.mnist import load_mnist

(x_train, y_train), (x_test, y_test) = load_mnist(flatten=True,
                                                  normalize=True,
                                                  one_hot_label=True)

train_data = tf.data.Dataset.from_tensor_slices((x_train, y_train))
train_data = train_data.repeat().shuffle(60000).batch(50)
train_data_iter = iter(train_data)


class CNN(tf.keras.Model):
    def __init__(self):
        super(CNN, self).__init__()
        self.conv_layer1 = tf.keras.layers.Conv2D(filters=32,
                                                  kernel_size=5,
                                                  strides=1,
                                                  padding='same',
                                                  activation='relu')
        self.pool_layer1 = tf.keras.layers.MaxPool2D(pool_size=(2, 2),
                                                     strides=2)
        self.conv_layer2 = tf.keras.layers.Conv2D(filters=64,
                                                  kernel_size=5,
                                                  strides=1,
                                                  padding='same',
                                                  activation='relu')
        self.pool_layer2 = tf.keras.layers.MaxPool2D(pool_size=(2, 2),
                                                     strides=2)
        self.flatten_layer = tf.keras.layers.Flatten()
        self.fc_layer = tf.keras.layers.Dense(1024, activation='relu')
Exemplo n.º 30
0
import sys, os
sys.path.append(os.pardir)
import numpy as np
import pickle
from dataset.mnist import load_mnist

(x_train, t_train), (x_test, t_test) = \
    load_mnist(normalize=True, one_hot_label=True)

print(x_train.shape)
print(t_train.shape)

train_size = x_train.shape[0]
batch_size = 10
batch_mask = np.random.choice(train_size,batch_size)
x_batch = x_train[batch_mask]
t_batch = t_train[batch_mask]

print(x_batch)
print(t_batch)
Exemplo n.º 31
0
# coding: utf-8
import sys, os
sys.path.append(os.pardir)  # 부모 디렉터리의 파일을 가져올 수 있도록 설정
import numpy as np
from dataset.mnist import load_mnist
from PIL import Image


def img_show(img):
    pil_img = Image.fromarray(np.uint8(img))
    pil_img.show()

(x_train, t_train), (x_test, t_test) = load_mnist(flatten=True, normalize=False)

img = x_train[0]
label = t_train[0]
print(label)  # 5

print(img.shape)  # (784,)
img = img.reshape(28, 28)  # 형상을 원래 이미지의 크기로 변형
print(img.shape)  # (28, 28)

img_show(img)
Exemplo n.º 32
0
for key,type in weight_init_types.items():
    neural_nets[key] = MultiLayerNet(input_size= 784,
                                     hidden_size_list= [100, 100, 100, 100], # 4개의 레이어 그리고 각 레이어의 뉴런 갯수
                                     output_size= 10,
                                     weight_init_std = type)
                                     # activation = 'sigmoid')
    # the default activation is relu, but when changed to sigmoid, the result was really bad


    train_losses[key] = [] # 빈 리스트 생성 - 실험(학습)하면서 손실값들을 저장

# there are two keys: optimizers and weight_init_type
# need to arrange on this

#MNIST train/test 데이터 로드
(X_train, Y_train), (X_test, Y_test) = load_mnist(one_hot_label = True)

iterations = 2_000 # 학습 횟수
batch_size = 128 # 1번 학습에 사용할 샘플 개수 (미니 배치)
# optimizers = Sgd(learning_rate = 0.01)
optimizers = Adam(lr = 0.01) # optimizer or how we set the parameter

# 초기값이 중요한 역할을 할수도 있고, 어떤 최적화 알고리즘을 사용을 하느냐에 따라서 (regardless of initial values), 학습되는 양? 이 다를 수도 있다
# optimizer with Adam resulted in horrible

# optimizers = dict()
# optimizers['SGD'] = Sgd(learning_rate = 0.01) #파라미터 최적화 알고리즘
# optimizers['Momentum'] = Momentum(lr = 0.01)
# optimizers['Adagrad'] = AdaGrad(lr = 0.01)
# optimizers['ADAM'] = Adam(lr = 0.01)
# optimizers['RMSprop'] = RMSprop(lr = 0.01)
Exemplo n.º 33
0
def get_data():
    (x_train,t_train),(x_test,t_test)=load_mnist(flatten=True,normalize=False)
    return x_test,t_test
# coding: utf-8

import sys, os
sys.path.append(os.pardir)  
import numpy as np
import matplotlib.pyplot as plt
from dataset.mnist import load_mnist
from PIL import Image

def img_show(img):
    pil_img = Image.fromarray(np.uint8(img))
    pil_img.show()

(x_train, t_train), (x_test, t_test) = load_mnist(flatten=True, normalize=False, one_hot_label = False)

pp = []
j = 0
for i in range(len(t_test)):
    if t_test[i] == 5:
        pp.append(i)
    else:
        pass
type(pp)
len(pp)

j = 0
for i in pp:
    j += 1
    if j > 70:
        break
    img = x_test[i]
Exemplo n.º 35
0
# coding: utf-8
import sys, os
sys.path.append(os.pardir)  # 부모 디렉터리의 파일을 가져올 수 있도록 설정
import numpy as np
import matplotlib.pyplot as plt
from deep_convnet import DeepConvNet
from dataset.mnist import load_mnist

(x_train, t_train), (x_test, t_test) = load_mnist(flatten=False)

network = DeepConvNet()
network.load_params("deep_convnet_params.pkl")

print("calculating test accuracy ... ")
#sampled = 1000
#x_test = x_test[:sampled]
#t_test = t_test[:sampled]

classified_ids = []

acc = 0.0
batch_size = 100

for i in range(int(x_test.shape[0] / batch_size)):
    tx = x_test[i * batch_size:(i + 1) * batch_size]
    tt = t_test[i * batch_size:(i + 1) * batch_size]
    y = network.predict(tx, train_flg=False)
    y = np.argmax(y, axis=1)
    classified_ids.append(y)
    acc += np.sum(y == tt)
# coding: utf-8
import sys, os
sys.path.append(os.pardir)  # 親ディレクトリのファイルをインポートするための設定
import numpy as np
import matplotlib.pyplot as plt
from dataset.mnist import load_mnist
from simple_convnet import SimpleConvNet
from common.trainer import Trainer

# データの読み込み
(x_train, t_train), (x_test, t_test) = load_mnist(flatten=False)

# 処理に時間のかかる場合はデータを削減 
#x_train, t_train = x_train[:5000], t_train[:5000]
#x_test, t_test = x_test[:1000], t_test[:1000]

max_epochs = 20

network = SimpleConvNet(input_dim=(1,28,28), 
                        conv_param = {'filter_num': 30, 'filter_size': 5, 'pad': 0, 'stride': 1},
                        hidden_size=100, output_size=10, weight_init_std=0.01)
                        
trainer = Trainer(network, x_train, t_train, x_test, t_test,
                  epochs=max_epochs, mini_batch_size=100,
                  optimizer='Adam', optimizer_param={'lr': 0.001},
                  evaluate_sample_num_per_epoch=1000)
trainer.train()

# パラメータの保存
network.save_params("params.pkl")
print("Saved Network Parameters!")
Exemplo n.º 37
0
from dataset.mnist import load_mnist

from PIL import Image
from image_print import ImagePrint

import random

#from simple_convnet import SimpleConvNet
#from trainer import Trainer

graph = ImagePrint(
    28, 28, 5, 6, 1,
    100)  # Picture Width, Height / Picture Count X, Y / Space / BG Color
(x_train,
 t_train), (x_test,
            t_test) = load_mnist(flatten=False)  #normalization : default, TRUE

count = x_train.shape[0]
count_out = 11

print("Study Image Count :", count)

num = random.randint(0, count - count_out)

for i in range(count_out):
    print("#{} label:{}".format(num + i, t_train[num + i]))
    graph.input_image(i, x_train[num + i, 0].reshape(-1), 255)

graph.img_show()
"""
max_epochs = 20
# coding: utf-8
import sys, os
sys.path.append(os.pardir)  # 親ディレクトリのファイルをインポートするための設定
import numpy as np
import matplotlib.pyplot as plt
from dataset.mnist import load_mnist
from common.multi_layer_net_extend import MultiLayerNetExtend
from common.optimizer import SGD, Adam

(x_train, t_train), (x_test, t_test) = load_mnist(normalize=True)

# 学習データを削減
x_train = x_train[:1000]
t_train = t_train[:1000]

max_epochs = 20
train_size = x_train.shape[0]
batch_size = 100
learning_rate = 0.01


def __train(weight_init_std):
    bn_network = MultiLayerNetExtend(input_size=784, hidden_size_list=[100, 100, 100, 100, 100], output_size=10, 
                                    weight_init_std=weight_init_std, use_batchnorm=True)
    network = MultiLayerNetExtend(input_size=784, hidden_size_list=[100, 100, 100, 100, 100], output_size=10,
                                weight_init_std=weight_init_std)
    optimizer = SGD(lr=learning_rate)
    
    train_acc_list = []
    bn_train_acc_list = []
    
Exemplo n.º 39
0
from PyQt5.QtWidgets import QLabel, QMessageBox, QPushButton, QFrame
from PyQt5.QtGui import QPainter, QPen, QPixmap, QColor, QImage
from PyQt5.QtCore import Qt, QPoint, QSize

from simple_convnet import SimpleConvNet
from common.functions import softmax
from deep_convnet import DeepConvNet

MODE_MNIST = 1  # MNIST随机抽取
MODE_WRITE = 2  # 手写输入

Thresh = 0.5  # 识别结果置信度阈值

# 读取MNIST数据集
(_, _), (x_test, _) = load_mnist(normalize=True,
                                 flatten=False,
                                 one_hot_label=False)

# 初始化网络

# 简单CNN
"""
conv - relu - pool - affine - relu - affine - softmax
"""
network = SimpleConvNet(input_dim=(1, 28, 28),
                        conv_param={
                            'filter_num': 30,
                            'filter_size': 5,
                            'pad': 0,
                            'stride': 1
                        },
Exemplo n.º 40
0
import os
import sys
import numpy as np
from PIL import Image
sys.path.append(os.pardir)
from dataset.mnist import load_mnist

def img_show(img):
    pil_img = Image.fromarray(np.uint8(img))
    pil_img.show()

(x_train, t_train), (x_test, t_test) = \
    load_mnist(flatten=True, normalize=False)

print(x_train.shape)
print(t_train.shape)

print(x_test.shape)
print(t_test.shape)
img = x_train[0]
label = t_train[0]
print(label)

print(img.shape)
img = img.reshape(28,28)
print(img.shape)

img_show(img)
Exemplo n.º 41
0
def get_train_data():
    (x_train, t_train), (x_test, t_test) = \
    load_mnist(flatten=True, one_hot_label=True)

    return x_train, t_train