def loadData():
    mnist_dataset = mnist_data_loader.read_data_sets("./MNIST_data/",
                                                     one_hot=False)
    # training dataset
    train_set = mnist_dataset.train
    # test dataset
    test_set = mnist_dataset.test
    print("Training dataset size: ", train_set.num_examples)
    print("Test dataset size: ", test_set.num_examples)
    return train_set, test_set
Esempio n. 2
0
    def dataload(self):

        # Data Preprocessing
        mnist_dataset = mnist_data_loader.read_data_sets("./MNIST_data/")
        train_set = mnist_dataset.train
        test_set = mnist_dataset.test

        # train dataset
        train_set = train_set.next_batch(self.batch_size)
        self.input, self.label = train_set
        
        # test dataset
        test_set = test_set.next_batch(1000)
        self.test_input, self.test_label = test_set
Esempio n. 3
0
    plt.savefig("number_a.png", bbox_inches='tight')
    plt.show()

    plt.imshow(np.reshape(img_map_six, [28, 28]), cmap='gray')
    plt.savefig("number_b.png", bbox_inches='tight')
    plt.show()


def normalize(y):
    y_norm = (y - np.min(y)) / (np.max(y) - np.min(y))
    return y_norm.astype(int)


if __name__ == "__main__":

    mnist_dataset = mnist_data_loader.read_data_sets("./MNIST_data/",
                                                     one_hot=False)
    # training dataset
    train_set = mnist_dataset.train
    # train_set.labels = normalize(train_set.labels)
    # test dataset
    test_set = mnist_dataset.test
    # test_set.labels = normalize(test_set.labels)
    print("Training dataset size: ", train_set.num_examples)
    print("Test dataset size: ", test_set.num_examples)

    batch_size = 200
    max_epoch = 100
    reg = 1e-5

    loss_history = []
    acc_history = []