def get_data():
  """
  >>> x_test, t_test = get_data()
  >>> np.shape(x_test)
  (10000, 784)
  >>> np.shape(t_test)
  (10000,)
  """
  (x_train, t_train), (x_test, t_test) = \
    load_mnist(normalize=True, flatten=True, one_hot_label=False)
  return x_test, t_test
Example #2
0
#!/usr/bin/env python3
import numpy as np
from lib.mnist import load_mnist
from chap6.multi_layer_net import MultiLayerNet
from chap6.overfit_weight_decay import train
from optimizers import SGD

(x_train, t_train), (x_test, t_test) = load_mnist(normalize=True,
                                                  one_hot_label=True)
x_train = x_train[:300]
t_train = t_train[:300]
optimizer = SGD(lr=0.01)

max_epochs = 201
train_size = x_train.shape[0]
batch_size = 100

# train
network = MultiLayerNet(input_size=784,
                        hidden_size_list=[100, 100, 100, 100, 100, 100],
                        output_size=10)
(train_loss_list, train_acc_list, test_acc_list) = train(network)
network = MultiLayerNet(input_size=784,
                        hidden_size_list=[100, 100, 100, 100, 100, 100],
                        output_size=10,
                        use_dropout=True,
                        dropout_ratio=0.2)
(train_loss_list_decay, train_acc_list_decay,
 test_acc_list_decay) = train(network)

# draw out
Example #3
0
        return acc / x.shape[0]


def _test():
    import doctest
    doctest.testmod()


if __name__ == "__main__":
    _test()
    print("start mnist training")
    from chap6.optimizers import SGD
    from lib.mnist import load_mnist

    (x_train, t_train), (x_test, t_test) = load_mnist(one_hot_label=True,
                                                      flatten=False)
    optimizer = SGD()

    max_epochs = 201
    train_size = x_train.shape[0]
    batch_size = 100

    def train(network):
        train_loss_list = []
        train_acc_list = []
        test_acc_list = []

        iter_per_epoch = max(train_size / batch_size, 1)
        print(iter_per_epoch)
        epoch_cnt = 0
Example #4
0
import numpy as np
from lib.mnist import load_mnist
from lib.network import TwoLayerNet

(x_train, t_train), (x_test, t_test) = load_mnist(path='dataset', one_hot_label='True', flatten='True')

network = TwoLayerNet(input_size=784, hidden_size=50, output_size=10)
network.restore_network()
print(np.argmax(network.predict(x_test[0])))
print(t_test[0])
        failures = []
        for idx in range(x_test.shape[0]):
            if y[idx] != t_test[idx]:
                failures.append((x_test[idx], y[idx], t_test[idx]))
        for i in range(min(len(failures), 60)):
            img, y, _ = failures[i]
            if (i % 10 == 0): print()
            print(y, end=", ")
            img = img.reshape(28, 28)
            plt.subplot(6, 10, i + 1)
            plt.imshow(img, cmap='gray')
        print()
        plt.show()


(x_train, t_train), (x_test, t_test) = load_mnist()
options = {
    'weight_decay_lambda': 0,
    'learning_rate': 0.01,
    'beta1': 0.9,
    'beta2': 0.999
}
overfitmask = np.random.choice(x_train.shape[0], 300)
x_train = x_train[overfitmask]
t_train = t_train[overfitmask]
network = Network2()
network.add(Dense(784, 50, options, 'sgd', 'he'))
network.add(Relu())
network.add(Dense(50, 10, options, 'sgd', 'xavier'))
network.add(SoftmaxWithLoss())