Esempio n. 1
0
              validation_data=(x_test, y_test),
              callbacks=[TuneReportCallback({"mean_accuracy": "accuracy"})])


if __name__ == "__main__":
    import ray
    from ray import tune
    from ray.tune.schedulers import ASHAScheduler
    from ray.tune.schedulers.pb2 import PB2
    import tensorflow as tf
    import numpy as np
    import random

    print('Is cuda available for container:', tf.test.is_gpu_available())

    mnist.load_data()  # we do this on the driver because it's not threadsafe

    ray.init(
        num_cpus=8,
        num_gpus=1,
        include_dashboard=
        True,  # if you use docker use docker run -p 8265:8265 -p 6379:6379
        dashboard_host='0.0.0.0')

    sched_asha = ASHAScheduler(
        time_attr="training_iteration",
        max_t=100,
        grace_period=10,
        #mode='max', #find maximum, do not define here if you define in tune.run
        reduction_factor=3,
        brackets=1)
Esempio n. 2
0
    test_dataset = h5py.File('datasets/test_catvnoncat.h5', "r")
    test_set_x_orig = np.array(
        test_dataset["test_set_x"][:])  # your test set features
    test_set_y_orig = np.array(
        test_dataset["test_set_y"][:])  # your test set labels

    classes = np.array(test_dataset["list_classes"][:])  # the list of classes

    train_set_y_orig = train_set_y_orig.reshape((1, train_set_y_orig.shape[0]))
    test_set_y_orig = test_set_y_orig.reshape((1, test_set_y_orig.shape[0]))

    return train_set_x_orig, train_set_y_orig, test_set_x_orig, test_set_y_orig, classes


if __name__ == '__main__':
    (X_train, y_train), (X_test, y_test) = mnist.load_data()

    plt.imshow(X_train[0], cmap=plt.get_cmap('gray_r'))
    plt.show()

    X_train_flatten = flatten(X_train)
    X_test_flatten = flatten(X_test)

    X_train_flatten = X_train_flatten.astype('float32')
    X_test_flatten = X_test_flatten.astype('float32')

    X_train = X_train_flatten / 255
    X_test = X_test_flatten / 255

    y_train = tensorflow.keras.utils.to_categorical(y_train, 10).T
    y_test = tensorflow.keras.utils.to_categorical(y_test, 10).T
Esempio n. 3
0
from sklearn.metrics import classification_report, confusion_matrix
import os
from tensorflow.keras.datasets import mnist
import pickle
import numpy as np
from time import perf_counter
from sklearn import preprocessing
from keras.utils import to_categorical
from keras.models import Sequential
from keras.layers import  Dense
from csv import DictWriter
from csv import writer
import itertools

current_dir = os.getcwd()
(X_orig, y_orig), (_,_) = mnist.load_data(path=current_dir+'/../../../Data/MNIST/mnist.npz')


#Parameters
dr = [1,2,3,4,5,6,7,8,9,10,50,100,200]

dataset = current_dir+"/../../../Data/MNIST/mnist_medium.npz"
data = np.load(dataset)
index = data['index']
index = np.append(index,np.array([i for i in range(60000) if i not in index]))

field_names = ["Dimension","Accuracy"]
with open("mppca.csv","w", newline='') as file:
    filewriter = writer(file, delimiter=',')
    filewriter.writerow(field_names)
                default="output.png",
                help="path to output visualization file")
ap.add_argument("-p",
                "--plot",
                type=str,
                default="plot.png",
                help="path to output plot file")
args = vars(ap.parse_args())

# Initialize the number of epochs to train for and batch size
EPOCHS = 25
BS = 32

# Load the MNIST dataset
print("[INFO] Loading the MNIST dataset...")
((trainX, _), (testX, _)) = mnist.load_data()

# Add a channel dimension to every image in the dataset, then scale the pixel intensities to the range [0, 1]
trainX = np.expand_dims(trainX, axis=-1)
testX = np.expand_dims(testX, axis=-1)
trainX = trainX.astype("float32") / 255.0
testX = testX.astype("float32") / 255.0

# Construct the convolutional autoencoder
print("[INFO] Building the autoencoder...")
(encoder, decoder, autoencoder) = ConvAutoEncoder.build(28, 28, 1)
opt = Adam(lr=1e-3)
autoencoder.compile(loss="mse", optimizer=opt)

# Train the convolution autoencoder
H = autoencoder.fit(trainX,
Esempio n. 5
0
def change_hidden_units():
    with tensorflow.device('/cpu:0'):
        
        for hidden_units in hidden_units_arr:
        
            print("\nNumber of Hidden Units", hidden_units)
            starttime = datetime.now()
            # Load mnist dataset
            print("\nLoading MNIST dataset.")
            (x_train, y_train), (x_test, y_test) = mnist.load_data()

       
            # Compute the number of labels
            num_labels = len(np.unique(y_train))

            # Convert to one-hot vectors
            y_train = to_categorical(y_train)
            y_test = to_categorical(y_test)

            # Get the image dimensions (assumed square)
            image_size = x_train.shape[1]
            input_size = image_size * image_size

            # Resize and normalize
            x_train = np.reshape(x_train, [-1, input_size])
            x_train = x_train.astype('float32') / 255
            x_test = np.reshape(x_test, [-1, input_size])
            x_test = x_test.astype('float32') / 255

            # Setup the network parameters
            batch_size = 128
            hidden_units = 256
            dropout = 0.45

            # model is a 3-layer MLP with ReLU and dropout after each layer
            model = Sequential()
            model.add(Dense(hidden_units, input_dim=input_size))
            model.add(Activation('relu'))
            model.add(Dropout(dropout))
            model.add(Dense(hidden_units))
            model.add(Activation('relu'))
            model.add(Dropout(dropout))
            model.add(Dense(num_labels))

            # this is the output for one-hot vector
            model.add(Activation('softmax'))

            # Print model summary and save the network image to the file specified
            model.summary()
            plot_model(model, to_file='mp1_nn1.png', show_shapes=True)

            # loss function for one-hot vector
            # use of adam optimizer
            # accuracy is good metric for classification tasks
            model.compile(loss='categorical_crossentropy',
                          optimizer='adam',
                          metrics=['accuracy'])

            # Train the network
            model.fit(x_train, y_train, epochs=20, batch_size=batch_size)

            # Compute predictions (test mode) for training data
            y_pred = model.predict(x_train,
                                   batch_size=batch_size,
                                   verbose=0)

            # Convert one-hot tensors back to class labels (and make them numpy arrays)
            y_train_true_class = K.argmax(y_train).numpy()
            y_train_pred_class = K.argmax(y_pred).numpy()

            # Students, insert code here to create CM and print confusion matrix and stats
            cm_train, cm_train_acc = confusion_matrix(y_train_true_class, y_train_pred_class)
            print_confusion_matrix_stats(cm_train, 'Train')

            # Validate the model on test dataset to determine generalization
            y_pred = model.predict(x_test,
                                   batch_size=batch_size,
                                   verbose=0)

            # Convert one-hot tensors back to class labels (and make them numpy arrays)
            y_test_true_class = K.argmax(y_test).numpy()
            y_test_pred_class = K.argmax(y_pred).numpy()

            # Students, insert code here to create CM and print confusion matrix and stats
            cm_test, cm_test_acc = confusion_matrix(y_test_true_class, y_test_pred_class)
            print_confusion_matrix_stats(cm_test, 'Test')
            
            endtime = datetime.now()
            
            print("Elapsed time:", endtime - starttime)
    return
def get_validation_data(dataset,
                        normalize=True,
                        sort_x=True,
                        binarize=True,
                        subtract_mean=True,
                        balance_traits=True,
                        input_shape=None):
    if dataset == 'mnist':
        ((_, _), (X, Y)) = mnist.load_data()
        if K.image_data_format() == "channels_first":
            X = X.reshape((X.shape[0], 1, 28, 28))
        else:
            X = X.reshape((X.shape[0], 28, 28, 1))
        if normalize:
            X = X.astype("float") / 255.0
            X = X - np.mean(X, axis=0)
    elif dataset == '3D-shapes':
        data_path = '../data/3dshapes.h5'
        parent_dir = str(pathlib.Path().absolute()).split('/')[-1]
        if parent_dir == 'SimilarityGames':
            data_path = data_path[3:]
        dataset = h5py.File(data_path, 'r')
        data = dataset['images'][:]
        full_labels = dataset['labels'][:]
        labels_reg, labels_relational, keeper_idxs = get_shape_color_labels(
            full_labels, balance_traits=balance_traits)
        if keeper_idxs is not None:
            data = np.array([data[idx] for idx in keeper_idxs])

        (train_data, test_data, train_labels,
         test_labels) = train_test_split(data,
                                         labels_reg,
                                         test_size=0.25,
                                         random_state=42)
        if K.image_data_format() == "channels_first":
            train_data = train_data.reshape(
                (train_data.shape[0], input_shape[2], input_shape[1],
                 input_shape[0]))
            test_data = test_data.reshape((test_data.shape[0], input_shape[2],
                                           input_shape[1], input_shape[0]))
        else:
            train_data = train_data.reshape(
                (train_data.shape[0], input_shape[0], input_shape[1],
                 input_shape[2]))
            test_data = test_data.reshape((test_data.shape[0], input_shape[0],
                                           input_shape[1], input_shape[2]))
        if normalize:
            train_data = train_data.astype("float32") / 255.0
            test_data = test_data.astype("float32") / 255.0
        if subtract_mean:
            if K.image_data_format() == "channels_first":
                tmp_data = train_data.reshape(train_data.shape[1], -1)
            else:
                tmp_data = train_data.reshape(train_data.shape[-1], -1)

            mean = np.mean(tmp_data, axis=1)
            # sanity check because np.mean over multiple axes seems to behave strangely some times,
            # may be worth diving into
            if abs(np.mean(mean) - np.mean(train_data)) > 1e-3:
                raise ValueError(
                    "results of mean calculation suspicious, please double check before continuing"
                )
            print('channel means = ' + str(mean) + ', data mean = ' +
                  str(np.mean(train_data)))
            test_data = test_data - mean
        X = test_data
        Y = test_labels

    if sort_x:
        X_sorted = sort_data(X, Y)
    if binarize:
        lb = LabelBinarizer()
        Y = lb.fit_transform(Y)

    if sort_x:
        return X, Y, X_sorted
    else:
        return X, Y
Esempio n. 7
0
import tensorflow as tf
from tensorflow.keras.models import Model, Sequential
from tensorflow.keras.layers import Flatten, Dense
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.losses import SparseCategoricalCrossentropy
from tensorflow.keras.metrics import SparseCategoricalAccuracy

from tensorflow.keras.datasets import mnist

(train_x, train_y), (test_x, test_y) = mnist.load_data()
train_x, test_x = train_x / 255.0, test_x / 255.0

print('train data:', train_x.shape, train_y.shape)
print('test data: ', test_x.shape, test_y.shape)


# 自定义网络
class MnistModel(Model):
    def __init__(self):
        super(MnistModel, self).__init__()
        self.flatten = Flatten()
        self.d1 = Dense(128,
                        activation='relu',
                        kernel_regularizer=tf.keras.regularizers.L2())
        self.d2 = Dense(10, activation='softmax')

    def call(self, x):
        y = self.flatten(x)
        y = self.d1(y)
        y = self.d2(y)
        return y
Esempio n. 8
0
        Flatten(input_shape=(28, 28)),
        Dense(128, activation=relu),
        Dense(64, activation=relu),
        Dense(n, activation=relu),
        Dense(64, activation=relu),
        Dense(128, activation=relu),
        Dense(28*28, activation=softmax),
        Reshape((28, 28)),
    ])

    model.compile(optimizer='adadelta', loss='binary_crossentropy')

    return model

if __name__ == "__main__":
    (train_in, _), (test_in, test_out) = mnist.load_data()
    train_in = train_in / 255.0
    test_in = test_in / 255.0
    train_in = train_in.reshape((train_in.shape[0], 28, 28, 1))
    test_in = test_in.reshape((test_in.shape[0], 28, 28, 1))

    n = 16
    model, _, _ = get_conv_ae(n)
    model.summary()

    x_size = 10
    y_size = 4
    x_epochs = 1
    indices = [np.where(test_out == i)[0][0] for i in range(x_size)]
    fig = plt.figure(figsize=(x_size, y_size))
    out_vis = []
Esempio n. 9
0
# based on MNIST images: 28x28 greyscale
num_rows = 28
num_cols = 28
num_channels = 1

#
latent_dim = 100
NUM_EPOCHS = 20000  # probably needs to be closer to 50k?
BATCH_SIZE = 64
DROPOUT_RATE = 0.3
LEAKY_RELU_ALPHA = 0.25

##################################################################################################
# load dataset
(train_images, train_labels), (_, _) = mnist.load_data()

# rescale -1 to 1 and normalize
# use in coordination with tanh activation
train_images = train_images.reshape(train_images.shape[0], num_rows, num_cols,
                                    num_channels)
train_images = train_images.astype(np.float32)
train_images = (train_images - 127.5) / 127.5

##################################################################################################
# GENERATOR
g = Sequential()

g.add(
    Dense(units=7 * 7 * 512,
          input_dim=latent_dim,
Esempio n. 10
0
# -*- coding: utf-8 -*-
"""
Created on Wed Feb 17 08:16:56 2021

@author: justin
"""

import tensorflow
from tensorflow.keras.datasets import mnist
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Dropout, Conv2D, MaxPooling2D, Flatten
from tensorflow.keras.optimizers import RMSprop

(mnist_train_images,
 mnist_train_labels), (mnist_test_images,
                       mnist_test_labels) = mnist.load_data()

from tensorflow.keras import backend as K

if K.image_data_format() == 'channels_first':
    train_images = mnist_train_images.reshape(mnist_train_images.shape[0], 1,
                                              28, 28)
    test_images = mnist_test_images.reshape(mnist_test_images.shape[0], 1, 28,
                                            28)
    input_shape = (1, 28, 28)
else:
    train_images = mnist_train_images.reshape(mnist_train_images.shape[0], 28,
                                              28, 1)
    test_images = mnist_test_images.reshape(mnist_test_images.shape[0], 28, 28,
                                            1)
    input_shape = (28, 28, 1)
Esempio n. 11
0
def load_data(index):
    if (index == 1):
        (x_train, y_train), (x_test, y_test) = cifar10.load_data()
        num_classes = 10
        # Convert class vectors to binary class matrices.
        y_train = keras.utils.to_categorical(y_train, num_classes)
        y_test = keras.utils.to_categorical(y_test, num_classes)
        x_train = x_train.astype('float32')
        x_test = x_test.astype('float32')
        x_train /= 255
        x_test /= 255

    elif (index == 2):
        print("data set cifar100")
        (x_train, y_train), (x_test,
                             y_test) = cifar100.load_data(label_mode='fine')
        num_classes = 100
        y_train = keras.utils.to_categorical(y_train, num_classes)
        y_test = keras.utils.to_categorical(y_test, num_classes)
        x_train = x_train.astype('float32')
        x_test = x_test.astype('float32')
        x_train /= 255
        x_test /= 255

    elif (index == 3):
        print("data set MNIST")
        # input image dimensions
        img_rows, img_cols = 28, 28

        # the data, split between train and test sets
        (x_train, y_train), (x_test, y_test) = mnist.load_data()
        num_classes = 10

        if K.image_data_format() == 'channels_first':
            x_train = x_train.reshape(x_train.shape[0], 1, img_rows, img_cols)
            x_test = x_test.reshape(x_test.shape[0], 1, img_rows, img_cols)
            input_shape = (1, img_rows, img_cols)
        else:
            x_train = x_train.reshape(x_train.shape[0], img_rows, img_cols, 1)
            x_test = x_test.reshape(x_test.shape[0], img_rows, img_cols, 1)
            input_shape = (img_rows, img_cols, 1)

        x_train = x_train.astype('float32')
        x_test = x_test.astype('float32')
        x_train /= 255
        x_test /= 255
        # print('x_train shape:', x_train.shape)
        # print(x_train.shape[0], 'train samples')
        # print(x_test.shape[0], 'test samples')

        # convert class vectors to binary class matrices
        y_train = keras.utils.to_categorical(y_train, num_classes)
        y_test = keras.utils.to_categorical(y_test, num_classes)

    elif (index == 4):
        print("data set MNIST-Fashion")
        (x_train, y_train), (x_test, y_test) = fashion_mnist.load_data()
        num_classes = 10
        # Convert class vectors to binary class matrices.
        # if we are using "channels first" ordering, then reshape the design
        # matrix such that the matrix is:
        # 	num_samples x depth x rows x columns
        if K.image_data_format() == "channels_first":
            x_train = x_train.reshape((x_train.shape[0], 1, 28, 28))
            x_test = x_test.reshape((x_test.shape[0], 1, 28, 28))

        # otherwise, we are using "channels last" ordering, so the design
        # matrix shape should be: num_samples x rows x columns x depth
        else:
            x_train = x_train.reshape((x_train.shape[0], 28, 28, 1))
            x_test = x_test.reshape((x_test.shape[0], 28, 28, 1))

        y_train = keras.utils.to_categorical(y_train, num_classes)
        y_test = keras.utils.to_categorical(y_test, num_classes)
        x_train = x_train.astype('float32')
        x_test = x_test.astype('float32')
        x_train /= 255
        x_test /= 255

    elif (index == 5):
        # print("probiere STL 10 data set")
        import os
        import urllib as urllib
        import tarfile
        import sys
        import numpy as np

        HEIGHT, WIDTH, DEPTH = 96, 96, 3
        num_classes = 10
        SIZE = HEIGHT * WIDTH * DEPTH
        DATA_DIR = './stl10_data'
        DATA_URL = 'http://ai.stanford.edu/~acoates/stl10/stl10_binary.tar.gz'
        TRAIN_DATA_PATH = DATA_DIR + '/stl10_binary/train_X.bin'
        TEST_DATA_PATH = DATA_DIR + '/stl10_binary/test_X.bin'
        TRAIN_LABELS_PATH = DATA_DIR + '/stl10_binary/train_y.bin'
        TEST_LABELS_PATH = DATA_DIR + '/stl10_binary/test_y.bin'
        CLASS_NAMES_PATH = DATA_DIR + '/stl10_binary/class_names.txt'

        def read_labels(path_to_labels):
            with open(path_to_labels, 'rb') as f:
                labels = np.fromfile(f, dtype=np.uint8)
                return labels

        def read_all_images(path_to_data):
            with open(path_to_data, 'rb') as f:
                # read whole file in uint8 chunks
                everything = np.fromfile(f, dtype=np.uint8)
                images = np.reshape(everything, (-1, DEPTH, WIDTH, HEIGHT))

                images = np.transpose(images, (0, 3, 2, 1))
                return images

        def download_and_extract():
            # if the dataset already exists locally, no need to download it again.
            if all((
                    os.path.exists(TRAIN_DATA_PATH),
                    os.path.exists(TRAIN_LABELS_PATH),
                    os.path.exists(TEST_DATA_PATH),
                    os.path.exists(TEST_LABELS_PATH),
            )):
                return

            dest_directory = DATA_DIR
            if not os.path.exists(dest_directory):
                os.makedirs(dest_directory)

            filename = DATA_URL.split('/')[-1]
            filepath = os.path.join(dest_directory, filename)
            if not os.path.exists(filepath):

                def _progress(count, block_size, total_size):
                    sys.stdout.write('\rDownloading %s %.2f%%' %
                                     (filename, float(count * block_size) /
                                      float(total_size) * 100.0))
                    sys.stdout.flush()

                filepath, _ = urlretrieve(DATA_URL, filepath)
                print('Downloaded', filename)
                tarfile.open(filepath, 'r:gz').extractall(dest_directory)

        def load_dataset():
            # download the extract the dataset.
            download_and_extract()

            # load the train and test data and labels.
            x_train = read_all_images(TRAIN_DATA_PATH)
            y_train = read_labels(TRAIN_LABELS_PATH)
            x_test = read_all_images(TEST_DATA_PATH)
            y_test = read_labels(TEST_LABELS_PATH)

            if K.image_data_format() == "channels_first":
                x_train = x_train.reshape(
                    (x_train.shape[0], DEPTH, HEIGHT, WIDTH))
                x_test = x_test.reshape(
                    (x_test.shape[0], DEPTH, HEIGHT, WIDTH))
            else:
                x_train = x_train.reshape(
                    (x_train.shape[0], HEIGHT, WIDTH, DEPTH))
                x_test = x_test.reshape(
                    (x_test.shape[0], HEIGHT, WIDTH, DEPTH))

            x_train = x_train.astype('float32')
            x_train = (x_train - 127.5) / 127.5
            x_test = x_test.astype('float32')
            x_test = (x_test - 127.5) / 127.5

            # convert the labels to be zero based.
            y_train -= 1
            y_test -= 1

            # convert labels to hot-one vectors.
            y_train = keras.utils.to_categorical(y_train, num_classes)
            y_test = keras.utils.to_categorical(y_test, num_classes)

            return (x_train, y_train), (x_test, y_test)

        (x_train, y_train), (x_test, y_test) = load_dataset()

    else:
        print("data set not found")

    # gibt die geladenen Datensaetze an den Suchalgorithmus zurueck
    return (x_train, y_train), (x_test, y_test)
Esempio n. 12
0
from tensorflow.keras.datasets import mnist
from tensorflow.keras import models
from tensorflow.keras import layers
from tensorflow.keras.utils import to_categorical

(train_images, train_labels), (test_images, test_labels) = mnist.load_data()

train_images = train_images.reshape((60000, 28 * 28))
train_images = train_images.astype('float32') / 255

test_images = test_images.reshape((10000, 28 * 28))
test_images = test_images.astype('float32') / 255

train_labels = to_categorical(train_labels)
test_labels = to_categorical(test_labels)

network = models.Sequential()
network.add(layers.Dense(512, activation='relu', input_shape=(28 * 28, )))
network.add(layers.Dense(10, activation='softmax'))

network.compile(optimizer='rmsprop',
                loss='categorical_crossentropy',
                metrics=['accuracy'])

hist = network.fit(train_images, train_labels, epochs=5, batch_size=128)

result = network.evaluate(test_images, test_labels, verbose=2)
Esempio n. 13
0
    def build_dataset(self, type):
        if type == 'cifar10':
            # LOAD DATA
            from tensorflow.keras.datasets import cifar10
            (train_image, train_label), (test_image,
                                         test_label) = cifar10.load_data()

            # PREPARE IMAGE BATCH
            _image_batch = np.concatenate([train_image, test_image], axis=0)
            self.image_batch = image.scale_out(_image_batch / 255.0)

            # PREPARE LABEL BATCH
            _label_batch = np.concatenate([train_label, test_label], axis=0)
            self.label_batch = np.zeros(
                shape=[_label_batch.shape[0],
                       _label_batch.max() + 1])
            for i, label in enumerate(_label_batch):
                self.label_batch[i, label] = 1

            # PREPARE TRAIN INDEX
            self.train_index = np.arange(start=0,
                                         stop=self.image_batch.shape[0] -
                                         self.batch_size,
                                         step=self.batch_size,
                                         dtype=int)

        elif type == 'cifar100':
            # LOAD DATA
            from tensorflow.keras.datasets import cifar100
            (train_image, train_label), (test_image,
                                         test_label) = cifar100.load_data()

            # PREPARE IMAGE BATCH
            _image_batch = np.concatenate([train_image, test_image], axis=0)
            self.image_batch = image.scale_out(_image_batch / 255.0)

            # PREPARE LABEL BATCH
            _label_batch = np.concatenate([train_label, test_label], axis=0)
            self.label_batch = np.zeros(
                shape=[_label_batch.shape[0],
                       _label_batch.max() + 1])
            for i, label in enumerate(_label_batch):
                self.label_batch[i, label] = 1

            # PREPARE TRAIN INDEX
            self.train_index = np.arange(start=0,
                                         stop=self.image_batch.shape[0] -
                                         self.batch_size,
                                         step=self.batch_size,
                                         dtype=int)

        elif type == 'mnist':
            # LOAD DATA
            from tensorflow.keras.datasets import mnist
            (train_image, train_label), (test_image,
                                         test_label) = mnist.load_data()

            # PREPARE IMAGE BATCH
            _train_image, _test_image = np.pad(
                train_image, ((0, 0), (2, 2), (2, 2)),
                'edge'), np.pad(test_image, ((0, 0), (2, 2), (2, 2)), 'edge')
            _train_image, _test_image = _train_image[
                ..., np.newaxis], _test_image[..., np.newaxis]
            _image_batch = np.concatenate([_train_image, _test_image], axis=0)
            self.image_batch = image.scale_out(_image_batch / 255.0)

            # PREPARE LABEL BATCH
            _label_batch = np.concatenate([train_label, test_label], axis=0)
            self.label_batch = np.zeros(
                shape=[_label_batch.shape[0],
                       _label_batch.max() + 1])
            for i, label in enumerate(_label_batch):
                self.label_batch[i, label] = 1

            # PREPARE TRAIN INDEX
            self.train_index = np.arange(start=0,
                                         stop=self.image_batch.shape[0] -
                                         self.batch_size,
                                         step=self.batch_size,
                                         dtype=int)

        else:
            raise ValueError('unknown dataset type: {}'.format(type))
Esempio n. 14
0
def train(args):
    if args.d == "mnist":
        (x_train, y_train), (x_test, y_test) = mnist.load_data()
        x_train = x_train.reshape(-1, 28, 28, 1)
        x_test = x_test.reshape(-1, 28, 28, 1)

        layers = [
            Conv2D(24, (5, 5), padding="valid", input_shape=(28, 28, 1)),
            Activation("relu"),
            MaxPooling2D(pool_size=(2, 2)),
            Conv2D(64, (5, 5), padding="valid"),
            Activation("relu"),
            MaxPooling2D(pool_size=(2, 2)),
            Dropout(0.5),
            Flatten(),
            Dense(1000),
            Activation("relu"),
            Dropout(0.5),
            Dense(10),
        ]

    elif args.d == "cifar":
        (x_train, y_train), (x_test, y_test) = cifar10.load_data()

        layers = [
            Conv2D(32, (3, 3), padding="same", input_shape=(32, 32, 3)),
            Activation("relu"),
            Conv2D(32, (3, 3), padding="same"),
            Activation("relu"),
            MaxPooling2D(pool_size=(2, 2)),
            Conv2D(64, (3, 3), padding="same"),
            Activation("relu"),
            Conv2D(64, (3, 3), padding="same"),
            Activation("relu"),
            MaxPooling2D(pool_size=(2, 2)),
            Conv2D(128, (3, 3), padding="same"),
            Activation("relu"),
            Conv2D(128, (3, 3), padding="same"),
            Activation("relu"),
            MaxPooling2D(pool_size=(2, 2)),
            Flatten(),
            Dropout(0.5),
            Dense(1024, kernel_regularizer=l2(0.01),
                  bias_regularizer=l2(0.01)),
            Activation("relu"),
            Dropout(0.5),
            Dense(512, kernel_regularizer=l2(0.01), bias_regularizer=l2(0.01)),
            Activation("relu"),
            Dropout(0.5),
            Dense(10),
        ]

    x_train = x_train.astype("float32")
    x_test = x_test.astype("float32")
    x_train = (x_train / 255.0) - (1.0 - CLIP_MAX)
    x_test = (x_test / 255.0) - (1.0 - CLIP_MAX)

    y_train = np_utils.to_categorical(y_train, 10)
    y_test = np_utils.to_categorical(y_test, 10)

    model = Sequential()
    for layer in layers:
        model.add(layer)
    model.add(Activation("softmax"))
    opt = tf.keras.optimizers.Adam(lr=0.0002)

    print(model.summary())
    model.compile(loss="categorical_crossentropy",
                  optimizer=opt,
                  metrics=["accuracy"])

    model.fit(
        x_train,
        y_train,
        epochs=10,
        batch_size=128,
        shuffle=True,
        verbose=1,
        validation_data=(x_test, y_test),
    )

    model.save("./model/model_{}.h5".format(args.d))
Esempio n. 15
0
def mnist_train_and_evaluate(model, batch_size=32, epochs=10, model_name=''):
    # load the dataset using the builtin Keras method
    (X_train, y_train), (X_test, y_test) = mnist.load_data()

    # derive a validation set from the training set
    # the original training set is split into
    # new training set (90%) and a validation set (10%)
    X_train, X_val = train_test_split(X_train,
                                      test_size=0.10,
                                      random_state=101)
    y_train, y_val = train_test_split(y_train,
                                      test_size=0.10,
                                      random_state=101)

    # the shape of the data matrix is NxHxW, where
    # N is the number of images,
    # H and W are the height and width of the images
    # keras expect the data to have shape NxHxWxC, where
    # C is the channel dimension
    X_train = np.reshape(X_train, (-1, 28, 28, 1))
    X_val = np.reshape(X_val, (-1, 28, 28, 1))
    X_test = np.reshape(X_test, (-1, 28, 28, 1))

    # convert the datatype to float32
    X_train = X_train.astype('float32')
    X_val = X_val.astype('float32')
    X_test = X_test.astype('float32')

    # normalize our data values to the range [0,1]
    X_train /= 255
    X_val /= 255
    X_test /= 255

    # convert 1D class arrays to 10D class matrices
    y_train = to_categorical(y_train, 10)
    y_val = to_categorical(y_val, 10)
    y_test = to_categorical(y_test, 10)

    # compile the model
    model.compile(loss='categorical_crossentropy',
                  optimizer='sgd',
                  metrics=['accuracy'])

    # use this variable to name your model
    # model_name="my_first_model"

    # create a way to monitor our model in Tensorboard (disabled)
    # tensorboard = TensorBoard("logs/" + model_name)

    # train the model
    model.fit(X_train,
              y_train,
              batch_size=batch_size,
              epochs=epochs,
              verbose=1,
              validation_data=(X_val, y_val))  # , callbacks=[tensorboard])

    score = model.evaluate(X_test, y_test, verbose=0)

    #print("Model '{}' performance:".format(model_name))
    #print("Loss: ",score[0])
    #print("Accuracy: ",score[1])
    #print()

    return score[1]
Esempio n. 16
0
def train(choice, h_layers=None, neurons=None, b_size=None, eps=None):
    if (choice == 1):
        model = build_model(h_layers, neurons)
        print("\nLoading dataset...")
        (X_train, y_train), (X_test, y_test) = mnist.load_data()
        print("\nLoading dataset Successful...")
        print("\nNormalizing data...")
        X_train = X_train / 255.0
        X_test = X_test / 255.0
        print("\nNormalizing data Successful...")
        input("\nPress any key to start training your model...")
        os.system('cls')
        print("Training your model with : \nHidden Layers : ", h_layers,
              "\nNeurons : ,", neurons, "\nBatch Size : ", b_size,
              "\nEpochs : ", eps, "\n")
        model.fit(X_train,
                  y_train,
                  batch_size=b_size,
                  epochs=eps,
                  validation_data=(X_test, y_test))
        time.sleep(3)
        os.system('cls')
        print("Model training Successful,Your Model Details")
        print("\nModel trained on ", len(X_train), " Samples and tested on ",
              len(X_test), " Samples")
        print("\nModel Testing Accuracy : ", model.evaluate(X_test, y_test)[1])
        print("\nModel Summary :")
        print(model.summary())
        input(
            "\nPress any key to continue...(Your trained model will be saved automatically...)"
        )
        print("\nSaving your model...")
        model.save('DIGIT.model')
    else:
        model = build_cnn_model()
        print("\nLoading dataset...")
        (X_train, y_train), (X_test, y_test) = mnist.load_data()
        print("\nLoading dataset Successful...")
        print("\nNormalizing data...")
        X_train = X_train / 255.0
        X_test = X_test / 255.0
        print("\nNormalizing data Successful...")
        print("\nConverting the input Image into Volume...")
        X_train = X_train.reshape(-1, 28, 28, 1)
        X_test = X_test.reshape(-1, 28, 28, 1)
        print("\nConvertion Successful...")
        input("\nPress any key to start training your model...")
        os.system('cls')
        model.fit(X_train,
                  y_train,
                  batch_size=b_size,
                  epochs=eps,
                  validation_data=(X_test, y_test))
        time.sleep(3)
        os.system('cls')
        print("Model training Successful,Your Model Details")
        print("\nModel trained on ", len(X_train), " Samples and tested on ",
              len(X_test), " Samples")
        print("\nModel Testing Accuracy : ", model.evaluate(X_test, y_test)[1])
        print("\nModel Summary :")
        print(model.summary())
        input(
            "\nPress any key to continue...(Your trained model will be saved automatically...)"
        )
        print("\nSaving your model...")
        model.save('DIGIT_CNN.model')
from pyimagesearch.nn.conv import LeNet
from tensorflow.keras.optimizers import SGD
from tensorflow.keras.datasets import mnist
from sklearn.preprocessing import LabelBinarizer
from sklearn.metrics import classification_report
from tensorflow.keras import backend as K
import matplotlib.pyplot as plt
import numpy as np

print("[INFO] accessing MNIST...")
((trainData, trainLabels), (testData, testLabels)) = mnist.load_data()

if K.image_data_format() == "channels_first":
    trainData = trainData.reshape((trainData.shape[0], 1, 28, 28))
    testData = testData.reshape((testData.shape[0], 1, 28, 28))
else:
    trainData = trainData.reshape((trainData.shape[0], 28, 28, 1))
    testData = testData.reshape((testData.shape[0], 28, 28, 1))

trainData = trainData.astype("float32") / 255.0
testData = testData.astype("float32") / 255.0

print(trainLabels)
le = LabelBinarizer()
trainLabels = le.fit_transform(trainLabels)
testLabels = le.transform(testLabels)

print("[INFO] compiling model...")
opt = SGD(lr=0.01)
model = LeNet.build(width=28, height=28, depth=1, classes=10)
model.compile(loss="categorical_crossentropy",
Esempio n. 18
0
        model.add(LeakyReLU(alpha=0.2))
        model.add(Dropout(0.25))
        model.add(Conv2D(32, kernel_size=3, strides=2, padding="same"))
        model.add(ZeroPadding2D(padding=((0, 1), (0, 1))))
        model.add(BatchNormalization(momentum=0.8))
        model.add(LeakyReLU(alpha=0.2))
        model.add(Dropout(0.25))
        model.add(Conv2D(64, kernel_size=3, strides=2, padding="same"))
        model.add(BatchNormalization(momentum=0.8))
        model.add(LeakyReLU(alpha=0.2))
        model.add(Dropout(0.25))
        model.add(Conv2D(128, kernel_size=3, strides=1, padding="same"))
        model.add(BatchNormalization(momentum=0.8))
        model.add(LeakyReLU(alpha=0.2))
        model.add(Dropout(0.25))
        model.add(Flatten())
        model.add(Dense(1))

        model.summary()

        img = Input(shape=self.img_shape)
        validity = model(img)

        return Model(inputs=img, outputs=validity)


if __name__ == "__main__":
    # Load the dataset
    (X_train, _), (_, _) = mnist.load_data()
    wgan = WGAN(img_shape=(28, 28, 1))
    wgan.train(data=X_train, epochs=400, batch_size=32, sample_interval=400)
Esempio n. 19
0
def load_dataset(angle):
    # load dataset
    (trainX, trainY), (testX, testY) = mnist.load_data()
    for i in [9]:
        trainX = np.delete(trainX, np.where(trainY == i), axis=0)
        trainY = np.delete(trainY, np.where(trainY == i))
        testX = np.delete(testX, np.where(testY == i), axis=0)
        testY = np.delete(testY, np.where(testY == i))
    trainX = trainX.astype('float32')
    testX = testX.astype('float32')
    #Create two new datasets for the rotated digits
    train_rot_X = np.empty((len(trainX) * 2, 28, 28))
    train_rot_Y = np.empty((len(trainX) * 2, ))
    test_rot_X = np.empty((len(testX) * 2, 28, 28))
    test_rot_Y = np.empty((len(testY) * 2))
    for i in range(len(trainY)):
        train_rot_Y[i] = trainY[i]
    for i in range(len(trainY)):
        train_rot_Y[i + len(trainX)] = trainY[i]
    for i in range(len(testY)):
        test_rot_Y[i] = testY[i]
    for i in range(len(testY)):
        test_rot_Y[i + len(testY)] = testY[i]
    for i in range(len(trainX)):
        train_rot_X[i] = trainX[i]
        T = measure.moments_central(train_rot_X[i])
        im_2 = rotate(
            train_rot_X[i],
            180. -
            0.5 * np.arctan2(2. * T[1, 1], T[2, 0] - T[0, 2]) / math.pi * 180.,
            reshape=False,
            order=3)
        train_rot_X[i] = rotate(
            train_rot_X[i],
            -0.5 * np.arctan2(2. * T[1, 1], T[2, 0] - T[0, 2]) / math.pi *
            180.,
            reshape=False,
            order=3)
        train_rot_X[i + len(trainX)] = im_2
    for i in range(len(testX)):
        test_rot_X[i] = testX[i]
        T = measure.moments_central(test_rot_X[i])
        im_2 = rotate(
            test_rot_X[i],
            180. -
            0.5 * np.arctan2(2. * T[1, 1], T[2, 0] - T[0, 2]) / math.pi * 180.,
            reshape=False,
            order=3)
        test_rot_X[i] = rotate(
            test_rot_X[i],
            -0.5 * np.arctan2(2. * T[1, 1], T[2, 0] - T[0, 2]) / math.pi *
            180.,
            reshape=False,
            order=3)
        test_rot_X[i + len(testX)] = im_2
    #reshape dataset to have a single channel
    trainX = train_rot_X.reshape((train_rot_X.shape[0], 28, 28, 1))
    testX = testX.reshape((testX.shape[0], 28, 28, 1))
    # one hot encode target values
    trainY = to_categorical(train_rot_Y)
    testY = to_categorical(testY)
    print(trainY.shape)
    return trainX, trainY, testX, testY
Esempio n. 20
0
# In[1]:

# TensorFlow and tf.keras
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras.datasets import mnist

# Helper libraries
import numpy as np
import matplotlib.pyplot as plt

print(tf.__version__)

# In[ ]:

type(mnist.load_data()[0][0])

# ## Chargement du MNIST data
#

# La cellule ci-dessous permet de séparer la base de données MNIST, de tensorFlow, en différentes variables(train_data, train_label, test_date, test_labels) grâce à la fonction mnist.load_data()
#

# train_data contient des images 28X28 pixels d'un chiffre écrit à la main qui serviront à entrainer le réseau neuronal

# In[ ]:

print(train_data)

# train_labels contient le chiffre associé à la réponse attendue lors de l'entrainement
Esempio n. 21
0
import numpy as np
from tensorflow.keras.datasets import mnist

(x_train, _), (x_test, _) = mnist.load_data()

x_train = x_train.reshape(60000, 784).astype('float32')/255
x_test = x_test.reshape(10000, 784).astype('float32')/255

from tensorflow.keras.models import Sequential, Model
from tensorflow.keras.layers import Dense, Input

def autoencoder(hidden_layer_size):
    model = Sequential()
    model.add(Dense(units=hidden_layer_size, input_shape=(784,),
                    activation='relu'))
    model.add(Dense(units=784,activation='sigmoid'))
    return model
def deeplearning():
    model = Sequential()
    model.add(Dense(256, input_shape=(784,),activation='relu'))
    model.add(Dense(128,activation='relu'))
    model.add(Dense(64,activation='relu'))
    model.add(Dense(32,activation='relu'))
    model.add(Dense(64,activation='relu'))
    model.add(Dense(128,activation='relu'))
    model.add(Dense(256,activation='relu'))
    model.add(Dense(units=784,activation='sigmoid'))
    return model

# model = autoencoder(hidden_layer_size=154)
model = deeplearning()
def main(args):
    # Horovod: initialize Horovod.
    hvd.init()

    if not args.use_only_cpu:
        # Horovod: pin GPU to be used to process local rank (one GPU per process)
        config = tf.ConfigProto()
        config.gpu_options.allow_growth = True
        config.gpu_options.visible_device_list = str(hvd.local_rank())
    else:
        config = None

    K.set_session(tf.Session(config=config))

    batch_size = 128
    num_classes = 10

    # Horovod: adjust number of epochs based on number of GPUs.
    epochs = int(math.ceil(args.num_epochs / hvd.size()))

    # Input image dimensions
    img_rows, img_cols = 28, 28

    # The data, shuffled and split between train and test sets
    (x_train, y_train), (x_test, y_test) = mnist.load_data()

    if K.image_data_format() == "channels_first":
        x_train = x_train.reshape(x_train.shape[0], 1, img_rows, img_cols)
        x_test = x_test.reshape(x_test.shape[0], 1, img_rows, img_cols)
        input_shape = (1, img_rows, img_cols)
    else:
        x_train = x_train.reshape(x_train.shape[0], img_rows, img_cols, 1)
        x_test = x_test.reshape(x_test.shape[0], img_rows, img_cols, 1)
        input_shape = (img_rows, img_cols, 1)

    x_train = x_train.astype("float32")
    x_test = x_test.astype("float32")
    x_train /= 255
    x_test /= 255
    print("x_train shape:", x_train.shape)
    print(x_train.shape[0], "train samples")
    print(x_test.shape[0], "test samples")

    # Convert class vectors to binary class matrices
    y_train = keras.utils.to_categorical(y_train, num_classes)
    y_test = keras.utils.to_categorical(y_test, num_classes)

    model = Sequential()
    model.add(Conv2D(32, kernel_size=(3, 3), activation="relu", input_shape=input_shape))
    model.add(Conv2D(64, (3, 3), activation="relu"))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Dropout(0.25))
    model.add(Flatten())
    model.add(Dense(128, activation="relu"))
    model.add(Dropout(0.5))
    model.add(Dense(num_classes, activation="softmax"))

    # Horovod: adjust learning rate based on number of GPUs.
    opt = keras.optimizers.Adadelta(1.0 * hvd.size())

    # Horovod: add Horovod Distributed Optimizer.
    opt = hvd.DistributedOptimizer(opt)

    ##### Enabling SageMaker Debugger ###########
    # creating hook
    smd_hook = smd.KerasHook(
        out_dir=args.out_dir,
        save_config=smd.SaveConfig(save_interval=args.save_interval),
        include_collections=["weights", "gradients"],
        include_workers=args.include_workers,
    )

    ##### Enabling SageMaker Debugger ###########
    # wrapping optimizer so hook can identify gradients
    opt = smd_hook.wrap_optimizer(opt)

    model.compile(loss=keras.losses.categorical_crossentropy, optimizer=opt, metrics=["accuracy"])

    callbacks = [
        # Horovod: broadcast initial variable states from rank 0 to all other processes.
        # This is necessary to ensure consistent initialization of all workers when
        # training is started with random weights or restored from a checkpoint.
        hvd.callbacks.BroadcastGlobalVariablesCallback(0),
        ##### Enabling SageMaker Debugger ###########
        # adding smd hook as a callback
        smd_hook,
    ]

    # Horovod: save checkpoints only on worker 0 to prevent other workers from corrupting them.
    if hvd.rank() == 0:
        callbacks.append(
            keras.callbacks.ModelCheckpoint(os.path.join(args.model_dir, "checkpoint-{epoch}.h5"))
        )

    model.fit(
        x_train,
        y_train,
        batch_size=batch_size,
        callbacks=callbacks,
        epochs=epochs,
        verbose=1 if hvd.rank() == 0 else 0,
        validation_data=(x_test, y_test),
    )
    score = model.evaluate(x_test, y_test, verbose=0)
    print("Test loss:", score[0])
    print("Test accuracy:", score[1])
Esempio n. 23
0
from tensorflow.keras.datasets import mnist  #Библиотека с базой Mnist
from tensorflow.keras.models import Sequential, model_from_json  # Подлючаем класс создания модели Sequential
from tensorflow.keras.layers import Dense  # Подключаем класс Dense - полносвязный слой
from tensorflow.keras.optimizers import Adam  # Подключаем оптимизатор Adam
from tensorflow.keras import utils  #Утилиты для to_categorical
from tensorflow.keras.preprocessing import image  #Для отрисовки изображения
import numpy as np  # Подключаем библиотеку numpy

(x_train_org,
 y_train_org), (x_test_org,
                y_test_org) = mnist.load_data()  #Загрузка данных Mnist

#Меняем формат входных картинок с 28х28 на 784х1
x_train = x_train_org.reshape(60000, 784)
x_test = x_test_org.reshape(10000, 784)

#Нормализуем входные картинки
x_train = x_train.astype(
    'float32'
)  # преобразовываем x_train в тип float (цифры с плавающей точкой)
x_train = x_train / 255  # делим на 255, чтобы диапазон был от 0 до 1
x_test = x_test.astype(
    'float32')  # преобразовываем x_test в тип float (цифры с плавающей точкой)
x_test = x_test / 255  # делим на 255, чтобы диапазон был от 0 до 1

# Преобразуем ответы в формат one_hot_encoding
y_train = utils.to_categorical(y_train_org, 10)
y_test = utils.to_categorical(y_test_org, 10)

model = Sequential()  # Создаём сеть прямого распространения
model.add(Dense(800, input_dim=784, activation="relu")
Esempio n. 24
0
# example of a cnn for image classification
from numpy import unique
from numpy import argmax
from tensorflow.keras.datasets.mnist import load_data
from tensorflow.keras import Sequential
from tensorflow.keras.layers import Dense
from tensorflow.keras.layers import Conv2D
from tensorflow.keras.layers import MaxPool2D
from tensorflow.keras.layers import Flatten
from tensorflow.keras.layers import Dropout
# load dataset
(x_train, y_train), (x_test, y_test) = load_data()
# reshape data to have a single channel
x_train = x_train.reshape(
    (x_train.shape[0], x_train.shape[1], x_train.shape[2], 1))
x_test = x_test.reshape((x_test.shape[0], x_test.shape[1], x_test.shape[2], 1))
# determine the shape of the input images
in_shape = x_train.shape[1:]
# determine the number of classes
n_classes = len(unique(y_train))
print(in_shape, n_classes)
# normalize pixel values
x_train = x_train.astype('float32') / 255.0
x_test = x_test.astype('float32') / 255.0
# define model
model = Sequential()
model.add(
    Conv2D(32, (3, 3),
           activation='relu',
           kernel_initializer='he_uniform',
           input_shape=in_shape))
Esempio n. 25
0
def Train_model(model, json_file, bot, update):

    epochs = 3

    #telegram callback
    telegram_callback = TelegramBotCallback(bot, update)

    #loading config
    with open(json_file, 'r') as f:
        config = json.load(f)
    category = config['category']

    #for classification
    if category == 1:
        (X_train, y_train), (X_test, y_test) = mnist.load_data()
        X_train = tf.keras.utils.normalize(X_train, axis=1)
        X_test = tf.keras.utils.normalize(X_test, axis=1)

        model.fit(X_train,y_train,\
               epochs=epochs,\
                    validation_data=(X_test,y_test),\
                      verbose=1,\
                      callbacks=[telegram_callback])

        score = model.evaluate(X_test, y_test, verbose=0)
        bot.send_message('Test loss:' + str(score[0]))
        bot.send_message('Test accuracy:' + str(score[1]))

    #for regression
    elif category == 2:
        (X_train, y_train), (X_test, y_test) = boston_housing.load_data()

        model.fit(X_train,y_train,\
              epochs=3,\
                  validation_data=(X_test,y_test),\
                  verbose=1,\
                  callbacks=[telegram_callback])

        score = model.evaluate(X_test, y_test, verbose=0)
        bot.send_message('Test loss:' + str(score))
        # bot.send_message('Test accuracy:' + str(score[1]))

    elif category == 3:
        num_classes = 10
        (X_train, y_train), (X_test, y_test) = mnist.load_data()
        X_train = X_train[:1000]
        y_train = y_train[:1000]
        X_test = X_test[:200]
        y_test = y_test[:200]
        img_rows, img_cols = 28, 28
        X_train = X_train.reshape(X_train.shape[0], img_rows, img_cols, 1)
        X_test = X_test.reshape(X_test.shape[0], img_rows, img_cols, 1)
        X_train = X_train.astype('float32')
        X_test = X_test.astype('float32')
        X_train /= 255
        X_test /= 255

        y_train = tf.keras.utils.to_categorical(y_train, num_classes)
        y_test = tf.keras.utils.to_categorical(y_test, num_classes)
        #print(X_train.shape)
        model.fit(X_train,y_train,\
               epochs=3,\
                    batch_size=32,\
                      validation_data=(X_test,y_test),\
                      verbose=1,\
                      callbacks=[telegram_callback])
Esempio n. 26
0
from tensorflow.keras import backend as K
import numpy as np
import tensorflow as tf
from tensorflow.keras.datasets import mnist
from tensorflow.keras.models import Model

batch_size = 1000
num_classes = 10
epochs = 60
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
sess = tf.Session(config=config)
K.set_session(sess)

img_rows, img_cols = 28, 28
(x_train, y_train_original), (x_test, y_test_original) = mnist.load_data()
import numpy as np
y_train_original = y_train_original.astype(np.int16)
y_test_original = y_test_original.astype(np.int16)

x_train = x_train / 255.
x_test = x_test / 255.

from mnist_model import Model as MM
y_train = keras.utils.to_categorical(y_train_original, num_classes)
y_test = keras.utils.to_categorical(y_test_original, num_classes)

x_train = x_train.reshape((-1, 28, 28, 1))
x_test = x_test.reshape((-1, 28, 28, 1))

m = MM()
Esempio n. 27
0
import matplotlib.pyplot as plt
import numpy as np
import argparse

# construct the argument parse and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-o",
                "--output",
                required=True,
                help="path to the output loss/accuracy plot")
args = vars(ap.parse_args())

# grab the MNIST dataset (if this is your first time using this
# dataset then the 11MB download may take a minute)
print("[INFO] accessing MNIST...")
((trainX, trainY), (testX, testY)) = mnist.load_data()

# each image in the MNIST dataset is represented as a 28x28x1
# image, but in order to apply a standard neural network we must
# first "flatten" the image to be simple list of 28x28=784 pixels
trainX = trainX.reshape((trainX.shape[0], 28 * 28 * 1))
testX = testX.reshape((testX.shape[0], 28 * 28 * 1))

# scale data to the range of [0, 1]
# change from uint8 to float
# scale bewteen 0 and 1
trainX = trainX.astype("float32") / 255.0
testX = testX.astype("float32") / 255.0

# convert the labels from integers to vectors
# one hot encoding applied
Esempio n. 28
0
@author: USER
"""

#colab
import matplotlib.pyplot as plt
import numpy as np
from tensorflow.keras.utils import to_categorical
from tensorflow.keras.models import Sequential
#訓練模組用
from tensorflow.keras.layers import Dense, Activation
from tensorflow.keras.optimizers import SGD
from tensorflow.keras.datasets import mnist
#https://ithelp.ithome.com.tw/articles/10191725
#https://medium.com/chiukevin0321/tensorflow%E8%88%87keras%E5%9F%BA%E6%9C%AC%E4%BB%8B%E7%B4%B9-621352fc7150
(x_train, y_train) < (x_test, y_test) = mnist.load_data()
len(x_train)
len(x_test)
x_train.shape
x_train[0]
plt.imshow(x_train[0], cmap='binary')
y_train[0]
t1 = x_train.reshape(60000, 784)
x_train = x_train.reshape(60000, -1)  #轉一維
x_test = x_test.reshape(10000, -1)
x_train.shape
t1.shape
x_train = x_train / 255
x_test = x_test / 255
y_train = to_categorical(y_train, 10)
y_test = to_categorical(y_test, 10)  #辨識種類
youtube_video('YRhxdVk_sIs')

"""---

# Mnist Digits

Mnist is een dataset van afbeeldingen van 28x28 pixels zwart en wit. De afbeeldingen bevatten handgeschreven cijfers van 0 tot 9. Het doel is om deze afbeeldingen te analyseren en te predicten welk cijfer er staat.

Data preprocessing
"""

from tensorflow.keras.datasets import mnist
import matplotlib.pyplot as plt

(train_data, train_labels), (test_data, test_labels) = mnist.load_data()
train_data = train_data.reshape((60000,784))
test_data = test_data.reshape((10000,784))

"""De data is al gesplitst in een train en test set. We bekijken een aantal afbeeldingen uit de dataset:"""

def show_digit(idx,network = None):
    print(train_data[idx].shape)
    img = train_data[idx].reshape((28,28))
    lbl = train_labels[idx]


    plt.imshow(img,cmap='gray')
    if network != None:
        pred = network.predict(img.reshape(1,784))[0]
        plt.title('label: {}, Prediction: {}'.format(lbl,pred))
Esempio n. 30
0
#!/usr/bin/env python3
import tensorflow as tf
import tensorflow.keras as keras
from tensorflow.keras.datasets import mnist
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Dropout
from tensorflow.keras.layers import Flatten,  MaxPooling2D, Conv2D
from tensorflow.keras.callbacks import TensorBoard

(X_train,y_train), (X_test, y_test) = mnist.load_data()

X_train = X_train.reshape(60000,28,28,1).astype('float32')
X_test = X_test.reshape(10000,28,28,1).astype('float32')

X_train /= 255
X_test /= 255

n_classes = 10
y_train = keras.utils.to_categorical(y_train, n_classes)
y_test = keras.utils.to_categorical(y_test, n_classes)

model = Sequential()
model.add(Conv2D(32, kernel_size=(3,3), activation='relu', input_shape=(28,28,1)) )
model.add(Conv2D(64, kernel_size=(3,3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2,2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(128, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(n_classes, activation='softmax'))
Esempio n. 31
0
               kernel_size=(3, 3),
               activation='relu',
               padding='valid'))
    model.add(UpSampling2D((14, 14)))
    model.add(
        Conv2D(filters=1,
               kernel_size=(3, 3),
               activation='sigmoid',
               padding='same'))
    model.summary()
    return model


from tensorflow.keras.datasets import mnist

train_set, test_set = mnist.load_data()
x_train, y_train = train_set
x_test, y_test = test_set

x_train = x_train.reshape(-1, x_train.shape[1], x_train.shape[2],
                          1).astype('float32') / 255.
x_test = x_test.reshape(-1, x_test.shape[1], x_test.shape[2],
                        1).astype('float32') / 255.
print(x_train.shape, x_test.shape)

model = autoencoder()

# model.compile(optimizer='adam', loss='mse', metrics=['acc'])
model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['acc'])
model.fit(x_train, x_train, epochs=10)