예제 #1
0
#가중치 감소를 이용한 오버피팅 억제
import os, sys
sys.path.append(os.pardir)
import numpy as np
import matplotlib.pyplot as plt
from Dataset.mnist import load_mnist
from common.optimizer import SGD
from common.multi_layer_net import MultiLayerNet

(x_train, t_train), (x_test, t_test) = load_mnist(normalize=True)

x_train = x_train[:300]
t_train = t_train[:300]

weight_decay_lambda = 0.1

network = MultiLayerNet(input_size=784,
                        hidden_size_list=[100, 100, 100, 100, 100, 100],
                        output_size=10,
                        weight_decay_lambda=weight_decay_lambda)
optimizer = SGD(lr=0.01)
max_epochs = 201
train_size = x_train.shape[0]
batch_size = 100

train_loss_list = []
train_acc_list = []
test_acc_list = []

iter_per_epoch = max(train_size / batch_size, 1)
epoch_cnt = 0
import sys, os
sys.path.append(os.path.dirname(os.path.abspath(os.path.dirname(__file__))))
import numpy as np
from Dataset.mnist import load_mnist

if __name__ == "__main__":
    (x_train, t_train), (x_test, t_test) = load_mnist(normalize=True,
                                                      one_hot_label=True)

    train_size = x_train.shape[0]  # 훈련 데이터 - mnist의 경우 60,000개

    batch_size = 10
    batch_mask = np.random.choice(train_size, batch_size)
    # 0 ~ (train_size-1) 중 랜덤하게 batch_size개 선택

    x_batch = x_train[batch_mask]
    t_batch = t_train[batch_mask]

    print(x_batch)
    print(t_batch)
예제 #3
0
def get_data():
    (x_train, t_train), (x_test, t_test) = load_mnist(flatten=True, normalize=True, one_hot_label=False)
    return x_test, t_test