def __init__(self, random_state=None, dtype=np.float32):
        import mnist
        super().__init__(random_state)
        self.dtype = dtype

        mnist_dir = MNIST_DIR if os.path.isdir(MNIST_DIR) else None

        self.train_images = mnist.download_and_parse_mnist_file(
            'train-images-idx3-ubyte.gz', target_dir=mnist_dir)
        self.test_images = mnist.download_and_parse_mnist_file(
            't10k-images-idx3-ubyte.gz', target_dir=mnist_dir)

        self.train_images = self.train_images / 255.0
        self.test_images = self.train_images / 255.0
def load_dataset() -> (np.array, np.array, np.array, np.array, np.ndarray):
    """

    """
    path = 'dataset/Mnist_Fashion'
    x_train_file, y_train_file = 'train-images-idx3-ubyte', 'train-labels-idx1-ubyte'
    x_test, y_test = 't10k-images-idx3-ubyte', 't10k-labels-idx1-ubyte'
    data_names = [x_train_file, y_train_file, x_test, y_test]
    Data = [
        download_and_parse_mnist_file(fname, target_dir=path)
        for fname in data_names
    ]
    x_train, y_train, x_test, y_test = Data[0], Data[1], Data[2], Data[3]
    # Add a new dimension to images :
    # it hsould be 28,28,1 not 28 ,28
    x_train = np.expand_dims(x_train, axis=-1)
    y_train = np.expand_dims(y_train, axis=-1)
    x_test = np.expand_dims(x_test, axis=-1)
    y_test = np.expand_dims(y_test, axis=-1)

    avilable_classes = [
        'T-shirt', 'Trouser', 'Pullover', 'Dress', 'Coat', 'Sandal', 'Shirt',
        'Sneaker', 'Bag', 'Ankle', 'boot'
    ]
    return x_train, y_train, x_test, y_test, avilable_classes
Exemple #3
0
def download(data):
    # Download data
    y = mnist.download_and_parse_mnist_file(data)
    return y
Exemple #4
0
#!/usr/bin/env python
# coding: utf-8

# In[1]:

import numpy as np
import mnist
import matplotlib.pyplot as plt
from pylab import cm
X = mnist.download_and_parse_mnist_file(
    "train-images-idx3-ubyte.gz")  #教師用画像データ 60000×28×28
Y = mnist.download_and_parse_mnist_file(
    "train-labels-idx1-ubyte.gz")  #対応する数値データ 60000

# In[2]:

#定数
SIZE = 28
d = SIZE * SIZE  #画素数
M = 100  #中間層のノード数
C = 10  #クラス数
B = 100  #バッチサイズ
RATE = 0.01  #学習率η

# In[3]:


#各画素の値(0~255)を0~1に正規化
def Normalize(data):
    N = np.where(data < 128, 0, 1)
    return N
Exemple #5
0
import numpy as np
from keras.models import Model, Sequential
from keras.layers import Dense, Activation
from sklearn.preprocessing import OneHotEncoder
from sklearn.preprocessing import StandardScaler
import mnist

train_images = mnist.download_and_parse_mnist_file(
    fname="train-images-idx3-ubyte.gz")
train_images = np.reshape(train_images, (len(train_images), 784))
standardScaler = StandardScaler()
train_images = standardScaler.fit_transform(train_images)

test_images = mnist.download_and_parse_mnist_file(
    fname="t10k-images-idx3-ubyte.gz")
test_images = np.reshape(test_images, (len(test_images), 784))
test_images = standardScaler.transform(test_images)

train_labels = mnist.download_and_parse_mnist_file(
    fname="train-labels-idx1-ubyte.gz")
train_labels = np.reshape(train_labels, (-1, 1))

test_labels = mnist.download_and_parse_mnist_file(
    fname="t10k-labels-idx1-ubyte.gz")

ohe = OneHotEncoder()
train_labels = ohe.fit_transform(train_labels).toarray()

model = Sequential()
model.add(Dense(units=32, input_shape=(784, ), activation="relu"))
model.add(Dense(units=10, activation="softmax"))
import numpy as np  
import mnist

X_train = mnist.download_and_parse_mnist_file("train-images-idx3-ubyte.gz")
Y_train = mnist.download_and_parse_mnist_file("train-labels-idx1-ubyte.gz")
X_test = mnist.download_and_parse_mnist_file("t10k-images-idx3-ubyte.gz")
Y_test = mnist.download_and_parse_mnist_file("t10k-labels-idx1-ubyte.gz")

  
def shuffle_batches(X,Y,batch_size):
    data_num = X.shape[0]
    mini_batches = []

    index = np.arange(data_num)
    np.random.shuffle(index)
    shuffled_X = X[index]
    shuffled_Y = Y[index]

    num_minibatches = int(data_num / batch_size)
    for k in range (0,num_minibatches):
        mini_batch_X = shuffled_X[(k * batch_size): ((k+1) * batch_size)]  
        mini_batch_Y = shuffled_Y[(k * batch_size): ((k+1) * batch_size)]
        mini_batch = (mini_batch_X, mini_batch_Y) #((100,784),(100,10))
        mini_batches.append(mini_batch)

    return mini_batches

# b = shuffle_batches(X_test,Y_test,100)
# print(b[0][0].reshape(100,1,28,28).shape)

Exemple #7
0
import numpy as np
import matplotlib.pyplot as plt
import mnist

train_X = mnist.download_and_parse_mnist_file("train-images-idx3-ubyte.gz")
# print(type(np.array(train_X[0])))
# print(train_X.shape)
train_Y = mnist.download_and_parse_mnist_file("train-labels-idx1-ubyte.gz")
test_X = mnist.download_and_parse_mnist_file("t10k-images-idx3-ubyte.gz")
test_Y = mnist.download_and_parse_mnist_file("t10k-labels-idx1-ubyte.gz")
# print(len(test_X))

# from pylab import cm
# idx = 105
# plt.imshow(train_X[idx], cmap=cm.gray)
# plt.show()
# print (train_Y[idx])
# print(np.array(train_X[0]).reshape(1,784))


class params:
    def __init__(self, M, d):
        np.random.seed(seed=32)
        self.W = np.random.normal(0, 1/d, (d, M))
        self.b = np.random.normal(0, 1/d, (1, M))
        self.eta = 0.01

    def update(self, dW, db):
        self.W -= self.eta * dW
        self.b -= self.eta * db
Exemple #8
0
import numpy as np
import mnist
import matplotlib.pylab as plt
from network3 import Network3
from pylab import cm

X = mnist.download_and_parse_mnist_file("t10k-images-idx3-ubyte.gz")
Y = mnist.download_and_parse_mnist_file("t10k-labels-idx1-ubyte.gz")

# 入力獲得
flag = True
while flag:
    i = input("Enter a number between 0 and 9999: ")
    if i.isdigit():
        i = (int)(i)
        if i >= 0 and i <= 9999:
            flag = False
        else:
            print("Invalid Input!")
    else:
        print("Invalid Input!")

# ネットワークサイズ指定
l0 = X.shape[1] * X.shape[2]
l1 = 50  #中間層ノード数
l2 = len(np.unique(Y))
sizes = np.array([l0, l1, l2])

# ネットワーク構築
net = Network3(sizes,
               batch_size=100,
Exemple #9
0
import numpy as np
import mnist
from Lena import Activation, Dense, Dropout, Model, Convolution2d, MaxPooling, AveragePooling, Input, EarlyStopping, ModelCheckpointMgr

x_test = mnist.download_and_parse_mnist_file('t10k-images-idx3-ubyte.gz',
                                             'data/')
y_test = mnist.download_and_parse_mnist_file('t10k-labels-idx1-ubyte.gz',
                                             'data/')
x_test = x_test[:, np.newaxis, :, :] / 255

model = Model()
model.add(Input((1, 28, 28)))
model.add(Convolution2d([10, 5, 5], padding='SAME'))
model.add(Activation('sigmoid'))
model.add(MaxPooling((2, 2)))
model.add(Convolution2d([15, 5, 5], padding='SAME'))
model.add(Activation('sigmoid'))
model.add(MaxPooling((2, 2)))
model.add(Convolution2d([20, 6, 6], padding='VALID'))
model.add(Activation('relu'))
model.add(MaxPooling((2, 2)))
model.add(Dense(20))
model.add(Activation('sigmoid'))
model.add(Dense(10))
model.add(Activation('softmax'))
model.comp('gradient_descent', 'binary_crossentropy')

chpoint_mgr = ModelCheckpointMgr(model)
chpoint_mgr.LoadWeights('ConvolutionNN002006.txt')

Exemple #10
0
        formated_test_images = _test_images.reshape(len(_test_images), 1, 28, 28)
        y_hat, loss = first_net.predict(formated_test_images, one_hot_test_labels)
        y_hat = np.argmax(y_hat, axis=0)
        test_accuracy2 = accuracy_score(y_hat, _test_labels)
        test_accuracy_list.append(test_accuracy2)
        loss_list.append(loss)

    print(f' test精度 in test : {np.mean(np.asarray(test_accuracy_list))} , test平均loss : {np.mean(np.asarray(loss_list))}')


if __name__ == '__main__':
    mnist_root = '../data/'
    # train_images = mnist.download_and_parse_mnist_file(fname='train-images-idx3-ubyte.gz', target_dir=mnist_root)
    # train_images = (train_images / 255) - 0.5
    # train_labels = mnist.download_and_parse_mnist_file(fname='train-labels-idx1-ubyte.gz', target_dir=mnist_root)
    test_images = mnist.download_and_parse_mnist_file(fname='t10k-images-idx3-ubyte.gz', target_dir=mnist_root)
    # test_images = (test_images / 255) - 0.5
    test_labels = mnist.download_and_parse_mnist_file(fname='t10k-labels-idx1-ubyte.gz', target_dir=mnist_root)


    # train_images = train_images[:3000]
    # train_labels = train_labels[:3000]
    test_images = test_images[3000:6000]
    test_labels = test_labels[3000:6000]


    # train_images1 = np.rot90(train_images, 1, (1, 2))
    # # train_images2 = np.rot90(train_images, 3, (1, 2))
    # train_images = np.concatenate((train_images, train_images1), axis=0)
    # # train_images = np.concatenate((train_images, train_images2), axis=0)
    # train_labels = np.concatenate((train_labels, train_labels), axis=0)