def mixed_mnist(count): total = count*10 valid = int(np.floor(count/10)) rawset = mnist_data() arranged_data = np.zeros((1,len(rawset[0][0]))) arranged_target = np.zeros((1,1)) for i in range(10): arranged_data = np.append(arranged_data,rawset[0][500*i:(500*i)+count],axis=0) arranged_target = np.append(arranged_target,np.ones((count,),dtype=int)*i) for i in range(10): #validation arranged_data = np.append(arranged_data,rawset[0][500*(i+1)-valid:500*(i+1)],axis=0) arranged_target = np.append(arranged_target,np.ones((valid,),dtype=int)*i) arranged_data = np.delete(arranged_data,0,axis=0) arranged_target = np.delete(arranged_target,0,axis=0) return [arranged_data,arranged_target,valid*10]
def load_train_test(test_size=0.33, random_state=2): X, y = mnist_data() X_train, X_test, y_train, y_test = train_test_split( X, y, test_size=test_size, random_state=random_state, stratify=y) X_train = X_train.reshape(X_train.shape[0], 1, 28, 28).astype('float32') X_test = X_test.reshape(X_test.shape[0], 1, 28, 28).astype('float32') X_train = X_train / 255 X_test = X_test / 255 # one hot encode outputs y_train = np_utils.to_categorical(y_train) y_test = np_utils.to_categorical(y_test) return X_train, X_test, y_train, y_test
def test_import_mnist_data(): X, y = mnist_data() assert (X.shape[0] == 5000) assert (X.shape[1] == 784) assert (y.shape[0] == 5000)
# mnist.py from mlxtend.data import mnist_data from mlxtend.classifier import MultiLayerPerceptron as MLP from mlxtend.preprocessing import shuffle_arrays_unison X, y = mnist_data() X, y = shuffle_arrays_unison((X, y), random_seed=1) X_train, y_train = X[:500], y[:500] X_test, y_test = X[500:], y[500:] import matplotlib.pyplot as plt def plot_digit(X, y, idx): img = X[idx].reshape(28, 28) plt.imshow(img, cmap='Greys', interpolation='nearest') plt.title('true label: %d' % y[idx]) plt.show() plot_digit(X, y, 3500) from mlxtend.preprocessing import standardize X_train_std, params = standardize(X_train, columns=range(X_train.shape[1]), return_params=True) X_test_std = standardize(X_test, columns=range(X_test.shape[1]), params=params) nn1 = MLP(hidden_layers=[150], l2=0.00, l1=0.0, epochs=100,
def test_import_mnist_data(): X, y = mnist_data() assert(X.shape[0] == 5000) assert(X.shape[1] == 784) assert(y.shape[0] == 5000)
from mlxtend.data import mnist_data import numpy as np import pandas as pd import matplotlib.pyplot as plt import math from math import ceil import tensorflow as tf from sklearn import model_selection # load in the data X_orig, y_orig = mnist_data() # print(X[1]) image_width = image_height = int(math.sqrt(len(X_orig[0]))) # reshape X X_orig = np.array(X_orig).astype(float) X = X_orig.reshape([X_orig.shape[0], image_height, image_height, 1]) # form train & test set X_train, X_test, Y_train, Y_test = model_selection.train_test_split(X, y_orig, test_size=0.02, random_state=17) # change y to one_hot vector def convert_to_onehot(y_vec): """ A function that reshapes the y vector (1,2,5...) into one hot tensor[[0,1,0,0,0,0,0...], [0,0,1,0,0,0,0...]] :param y_vec :return: one_hot tensor """ classes = len(set(y_vec)) classes_tf = tf.constant(classes)
__author__ = 'BENGEOS-PC' from mlxtend.data import mnist_data from matplotlib import pyplot as plt def plot_digit(X,y,idx): img = X[idx].reshape(28,28) plt.imshow(img,cmap='Greys',interpolation='nearest') plt.title('True Lable: %'% y[idx]) plt.show() X,y = mnist_data() plot_digit(X,y,4)