예제 #1
0
def reshape_bw(data):
    sh = data.shape
    return data.reshape((sh[0], sh[1], sh[2], 1))


def reshape_flatten(data):
    return data.reshape([data.shape[0], data.shape[1] * data.shape[2]])


if __name__ == "__main__":

    f1 = open('./results_naive.txt', 'w+')
    # for ds_name in emnist.list_datasets():
    f1.write('-' * 20 + '\nRunning on the dataset ' +
             emnist.list_datasets()[0] + '\n' + '-' * 20 + '\n')
    print('-' * 20 + '\nRunning on the dataset ' + emnist.list_datasets()[0] +
          '\n' + '-' * 20)

    (train_images,
     train_labels), (test_images,
                     test_labels) = load_data(emnist.list_datasets()[0])

    train_images = reshape_bw(train_images)
    test_images = reshape_bw(test_images)
    train_images, test_images = train_images / 255.0, test_images / 255.0

    # CNN = cnn.ConvNN()
    # CNN.make_model(train_images, train_labels)
    # CNN.fit(test_images, test_labels)
    # CNN.plot_all(test_images, test_labels)
예제 #2
0
from keras.layers.convolutional import MaxPooling2D
from keras import backend as K
from tensorflow.keras.datasets import mnist
from sklearn.model_selection import train_test_split
from sklearn.metrics import confusion_matrix

#ignore warning messages
import warnings
warnings.filterwarnings('ignore')

sns.set()

# pip install emnist
# Import Dataset(s)
from emnist import list_datasets
list_datasets()

from emnist import extract_training_samples
images_train, labels_train = extract_training_samples('letters')
from emnist import extract_test_samples
images_test, labels_test = extract_test_samples('letters')

# Flatten Data
dims = images_train.shape[1] * images_train.shape[2]
X_train = images_train.reshape(images_train.shape[0], dims)
X_test = images_test.reshape(images_test.shape[0], dims)

# Rescale to 0 -> 1 by dividing by max pixel value (255)
X_train = X_train.astype('float32') / 255
X_test = X_test.astype('float32') / 255
예제 #3
0
import matplotlib.pyplot as plt

np.random.seed(1671)  # for reproducibility

# network and training
NB_EPOCH = 100
BATCH_SIZE = 256
VERBOSE = 2
NB_CLASSES = 26   # number of outputs = number of digits
OPTIMIZER = Adam() # optimizer, explainedin this chapter
N_HIDDEN = 512
VALIDATION_SPLIT=0.2 # how much TRAIN is reserved for VALIDATION
DROPOUT = 0.20

print(list_datasets())
X_train, y_train = extract_training_samples('letters')
print("train shape: ", X_train.shape)
print("train labels: ",y_train.shape)
X_test, y_test = extract_test_samples('letters')
print("test shape: ",X_test.shape)
print("test labels: ",y_test.shape)

#for indexing from 0
y_train = y_train-1
y_test = y_test-1

#X_train is 124800 rows of 28x28 values --> reshaped in 124800 x 784
#X_train is 20800 rows of 28x28 values --> reshaped in 20800 x 784
RESHAPED = X_train.shape[1]*X_train.shape[2]