def learn(test_predict):
    #  object = tf.keras.datasets.mnist
    #(xtrain, ytrain), (xtest, ytest) = object.load_data()
    (xtrain, ytrain), (xtest, ytest) = emnist.load_data(type='digits')

    xtrain = xtrain / 255.0
    xtest = xtest / 255.0
    #   xtrain = np.concatenate(( [test_predict[3],test_predict[0]],xtrain))
    #  ytrain = np.concatenate(( [7,3],ytrain))

    model = tf.keras.models.Sequential([
        tf.keras.layers.Flatten(input_shape=(28, 28)),
        tf.keras.layers.Dense(256, activation='relu', name='hidden1'),
        tf.keras.layers.Dropout(0.2),
        tf.keras.layers.Dense(128, activation='relu', name='hidden2'),
        tf.keras.layers.Dense(10, activation=tf.nn.softmax)
    ])
    model.summary()
    model.compile(optimizer='RMSprop',
                  loss='sparse_categorical_crossentropy',
                  metrics=['accuracy'])
    v = xtest[0:1000]
    yv = ytest[0:1000]
    history = model.fit(xtrain, ytrain, epochs=10, validation_data=(v, yv))
    loss_train = history.history['loss']
    loss_val = history.history['val_loss']
    epochs = range(1, 10)
    plt.plot(loss_train, 'g')
    plt.plot(loss_val, 'b')
    plt.title('Training and Validation loss')
    plt.xlabel('Epochs')
    plt.ylabel('Loss')
    plt.legend()
    plt.show()

    loss_train = history.history['accuracy']
    loss_val = history.history['val_accuracy']

    plt.plot(loss_train, 'g')
    plt.plot(loss_val, 'b')
    plt.title('Training and Validation accuracy')
    plt.xlabel('Epochs')
    plt.ylabel('Accuracy')
    plt.legend()
    plt.show()

    predlist = list()
    print(model.evaluate(xtest, ytest))
    pred = model.predict(test_predict)
    print(pred.shape)
    print(test_predict.shape)

    for i in range(0, 4, 1):
        print(np.argmax(pred[i]))
        predlist.append(np.argmax(pred[i]))

#     print(np.argmax(pred[i]))
#    predlist.append(np.argmax(pred[i]))

    return predlist
def make_model_emnist():
    # Load dataset from emnist https://github.com/christianversloot/extra_keras_datasets#emnist-balanced
    (x_train, y_train), (x_test, y_test) = emnist.load_data(type='byclass')
    x_train[0].shape
    (28, 28)
    x_train, x_test = x_train / 255, x_test / 255  # greyscale

    # Model structure
    model = keras.Sequential(name='EMNIST_Model')
    model.add(layers.Flatten(input_shape=(28, 28)))
    model.add(layers.Dense(256, activation='relu'))
    model.add(layers.Dense(62, activation='softmax'))

    # Compile Model
    model.compile(
        optimizer='nadam',
        loss='sparse_categorical_crossentropy',
        metrics=['accuracy']
    )

    # Fitting Model
    model.fit(x_train, y_train, epochs=10)

    # Evaluate
    model.evaluate(x_test, y_test)


    model.save('EMNIST.model')
Example #3
0
    def __init__(self, select_adversarials=True) -> None:

        (self.X_train, self.y_train), (self.X_test,
                                       self.y_test) = mnist.load_data()

        (self.X_adv_train,
         self.y_adv_train), (self.X_adv_test,
                             self.y_adv_test) = emnist.load_data()

        # reshaping data
        self.X_train = self.featurize_input(self.X_train)
        self.X_test = self.featurize_input(self.X_test)
        self.X_adv_train = self.featurize_input(self.X_adv_train)
        self.X_adv_test = self.featurize_input(self.X_adv_test)

        # one hot encode target values
        self.y_train = to_categorical(self.y_train)
        self.y_test = to_categorical(self.y_test)
        self.y_adv_train = to_categorical(self.y_adv_train)
        self.y_adv_test = to_categorical(self.y_adv_test)

        self.train_keys = self.revere_categorical(self.y_train)
        self.holdout_training_keys = self.revere_categorical(self.y_adv_train)
        self.holdout_testing_keys = self.revere_categorical(self.y_adv_test)

        # is it a number or a letter?
        self.adversarial_training_labels = self.holdout_training_keys > 9
        self.adversarial_testing_labels = self.holdout_testing_keys > 9

        if select_adversarials == True:
            select_labels = [
                0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 12, 14, 15, 20, 22, 23, 27,
                31, 33
            ]

            selection_mask = [
                lab in select_labels for lab in self.holdout_training_keys
            ]
            self.X_adv_train = self.X_adv_train[selection_mask]
            self.y_adv_train = self.y_adv_train[selection_mask]
            self.adversarial_training_labels = self.adversarial_training_labels[
                selection_mask]
            self.holdout_training_keys = self.holdout_training_keys[
                selection_mask]

            selection_mask2 = [
                lab in select_labels for lab in self.holdout_testing_keys
            ]
            self.X_adv_test = self.X_adv_test[selection_mask2]
            self.y_adv_test = self.y_adv_test[selection_mask2]
            self.adversarial_testing_labels = self.adversarial_testing_labels[
                selection_mask2]
            self.holdout_testing_keys = self.holdout_testing_keys[
                selection_mask2]
Example #4
0
def load_data(type='byclass', out_dim=62):
    (train_x, train_y), (test_x, test_y) = emnist.load_data(type=type)
    # Shuffle data to avoid fitting to a specific pattern
    train_x, train_y = shuffle(train_x, train_y, random_state=42)
    test_x, test_y = shuffle(test_x, test_y, random_state=42)
    # Reshape data to match input shape (# samples, 28, 28, 1)
    train_samples = len(train_x)
    test_samples = len(test_x)
    num_pixels = len(train_x[0])
    train_shape = (train_samples, num_pixels, num_pixels, 1)
    test_shape = (test_samples, num_pixels, num_pixels, 1)

    train_x = np.reshape(train_x, train_shape)
    test_x = np.reshape(test_x, test_shape)
    # One hot encode output vectors
    train_y = keras.utils.to_categorical(train_y, out_dim)
    test_y = keras.utils.to_categorical(test_y, out_dim)
    # # Normalize input datas // Scale up data to maybe set apart outliers
    # train_x       = train_x * 45.0 # / 255.0
    # test_x        = test_x  * 45.0 # / 255.0

    return train_x, train_y, test_x, test_y
def get_data(experiment, occlusion=None, bars_type=None, one_hot=False):

    # Load MNIST data, as part of TensorFlow.
    (train_images,
     train_labels), (test_images,
                     test_labels) = emnist.load_data(type='balanced')

    all_data = np.concatenate((train_images, test_images), axis=0)
    all_labels = np.concatenate((train_labels, test_labels), axis=0)

    for i, l in enumerate(all_labels):
        all_labels[i] = {
            36: 10,
            37: 11,
            38: 13,
            39: 14,
            40: 15,
            41: 16,
            42: 17,
            43: 23,
            44: 26,
            45: 27,
            46: 29
        }.get(l, l)

    all_data = add_noise(all_data, experiment, occlusion, bars_type)

    all_data = all_data.reshape((all_labels.size, img_columns, img_rows, 1))
    all_data = all_data.astype('float32') / 255

    if one_hot:
        # Changes labels to binary rows. Each label correspond to a column, and only
        # the column for the corresponding label is set to one.
        all_labels = to_categorical(all_labels)

    return (all_data, all_labels)
Example #6
0
import keras

net = tensorflow.keras.Sequential()

input_shape = (28, 28, 1)
kernel_size = (3, 3)

net.add(Conv2D(64, kernel_size, input_shape=input_shape))
net.add(MaxPool2D(pool_size=(2, 2)))
net.add(Activation('relu'))
net.add(Flatten())

net.add(Dense(128, activation='relu'))  #warstwa ukryta - 128
net.add(Dense(27, activation='softmax'))  #warstwa wyjściowa

(X_train, y_train), (X_test, y_test) = emnist.load_data(
    type='letters')  #załaduj emnist 'letters'
X_train = X_train.reshape(-1, 28, 28, 1)
X_test = X_test.reshape(-1, 28, 28, 1)  #matryca obrazka
y_train_cat = to_categorical(y_train)
y_test_cat = to_categorical(y_test)
X_train = X_train.astype('float32')
X_test = X_test.astype('float32')
X_train = X_train / 255.0
X_test = X_test / 255.0

tensorboard = tensorflow.keras.callbacks.TensorBoard(
    log_dir=r"C:\Users\Dominik\Desktop\logs",
    histogram_freq=0,
    write_graph=True,
    write_images=False,
    update_freq="batch",
Example #7
0
import tensorflow as tf
from tensorflow import keras
import numpy as np
from extra_keras_datasets import emnist 
(x_train, y_train), (x_test, y_test) = emnist.load_data(type='balanced')
print("x train shape is: " , x_train.shape)
print("x test shape is: " , x_test.shape)
print("y train shape is: " , y_train.shape)
print("y test shape is: " , y_test.shape)
var = int(x_test.shape[0]/2)
print(var)
x_val = x_test[0:var,:,:]
y_val = y_test[0:var]

x_test2 = x_test[var:,:,:]
y_test2 = y_test[var:]

#to avoid shape 2D .... error
x_train = x_train.reshape(x_train.shape[0], 28, 28, 1)
x_test = x_test.reshape(x_test.shape[0], 28, 28, 1)
x_val  = x_val.reshape(x_val.shape[0], 28, 28, 1)
x_test2 = x_test2.reshape(x_test2.shape[0], 28, 28, 1)
input_shape = (28, 28, 1)

#set the CNN Architecture
model = tf.keras.models.Sequential([
        tf.keras.layers.Conv2D(64, (3,3),padding="same",
        activation='relu', #stride and padding my default 1,1 and valid  
        input_shape=(28,28,1)), 
        tf.keras.layers.BatchNormalization(),
        tf.keras.layers.MaxPooling2D(
Example #8
0
from keras.models import Sequential
from keras.layers import Dense, Dropout, Conv2D, MaxPooling2D, Flatten, BatchNormalization
from keras.optimizers import RMSprop
import tensorflow as tf
import extra_keras_datasets.emnist as emnist
# from extra_keras_datasets import emnist

import os
os.environ['KMP_DUPLICATE_LIB_OK']='TRUE'

batch_size = 128  # 訓練データを128(仮)ずつのデータに分けて学習させる
num_classes = 26 # 分類させる数。アルファベットなので26。
epochs = 300 # 訓練データを繰り返し学習させる数

# 訓練データ(train)とテストデータ(test)を取得する
(t_images, t_labels), (v_images, v_labels) = emnist.load_data(type="letters")


# 元のデータは1次元の配列なので、それを画像毎の配列に整形する
t_images = t_images.reshape(124800, 28, 28, 1)
v_images = v_images.reshape(20800, 28, 28, 1)
t_images = t_images.astype('float32')
v_images = v_images.astype('float32')
t_images /= 255
v_images /= 255
print(t_images.shape[0], 'train samples')
print(v_images.shape[0], 'test samples')

#Kerasで扱いやすい形に変換する
# t_labels = keras.utils.to_categorical(t_labels, num_classes)
# v_labels = keras.utils.to_categorical(v_labels, num_classes)
Example #9
0
import tensorflow as tf
from extra_keras_datasets import emnist
import matplotlib.pyplot as plt
import numpy
from random import *

# Importeren van MNIST dataset
(input_train, target_train), (input_test,
                              target_test) = emnist.load_data(type='letters')

#	Data normaliseren
input_train = tf.keras.utils.normalize(input_train, axis=1)

# Model opbouwen
model = tf.keras.models.Sequential()

# Flatten beelden => Array
model.add(tf.keras.layers.Flatten())

# Toevoegen van de eerste laag : relu, 128 neuronen
model.add(tf.keras.layers.Dense(128, activation=tf.nn.relu))

# Toevoegen van de tweede laag : relu, 128 neuronen
model.add(tf.keras.layers.Dense(128, activation=tf.nn.relu))

# Toevoegen van de derde laag  : softmax , 26 neuronen output
model.add(tf.keras.layers.Dense(32, activation=tf.nn.softmax))

# Model compileren
model.compile(optimizer="adam",
              loss="sparse_categorical_crossentropy",
import cv2 as cv
import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
import os
from extra_keras_datasets import emnist

if ((os.getcwd()).split(os.sep)[-1] == 'models'):
    pass
elif ((os.getcwd()).split(os.sep)[-1] == 'handwritingAI'):
    os.chdir(f'{os.getcwd()}//models')
else:
    os.chdir(f'{os.getcwd()}//handwritingAI//models')

# Get data
(x_train, y_train), (x_test, y_test) = emnist.load_data(
    type='digits')  # Loads data into training and test sets
'Cohen, G., Afshar, S., Tapson, J., & van Schaik, A. (2017). EMNIST: an extension of MNIST to handwritten letters. Retrieved from http://arxiv.org/abs/1702.05373'

# Reshape data to  fit the model
x_train = x_train.reshape(240000, 28, 28, 1)
x_test = x_test.reshape(40000, 28, 28, 1)

# Categorize labels by one-hot encoding (Only used for digit classification)
y_test = tf.keras.utils.to_categorical(y_test)
y_train = tf.keras.utils.to_categorical(y_train)

# Define model
model = tf.keras.models.Sequential()
model.add(
    tf.keras.layers.Conv2D(32,
                           3,