コード例 #1
0
# coding: utf-8
# author: LiuChen

from neural_network import *
from mnist import MNIST
from data_tools import *

# 加载mnist数据集
mnistdata = MNIST('./mnist_dataset')
mnistdata.gz = True
train_dev_set = mnistdata.load_training()
test_set = mnistdata.load_testing()

# 训练集
train_x = train_dev_set[0][:55000]
train_y = train_dev_set[1][:55000]
train_y = one_hot(train_y)
# 验证集
dev_x = train_dev_set[0][55000:]
dev_y = train_dev_set[1][55000:]
dev_y = one_hot(dev_y)
# 测试集
test_x = test_set[0]
test_y = test_set[1]
test_y = one_hot(test_y)

# 神经网络结构定义
network = FCNetwork(lmd=0.1)  # 学习率
act_fun = Relu  # 隐含层激活函数,Sigmoid、Tanh 或 Relu

# 输入层
コード例 #2
0
def get_testing_files(path):
    mndata = MNIST(path)
    mndata.gz = True
    return mndata.load_testing()
コード例 #3
0
ファイル: knn_lda.py プロジェクト: johpetsc/machine-learning
import pylab as pl
import random as rand
import collections
import numpy as np
from mnist import MNIST
from sklearn.externals import joblib
from sklearn.metrics import accuracy_score
from sklearn.metrics import confusion_matrix
from sklearn.neighbors import KNeighborsClassifier
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis

mndata = MNIST('data')
mndata.gz = True


def training(mndata):  # function that returns the train data
    images, labels = mndata.load_training()

    #index = rand.randrange(0, len(images))

    #print(mndata.display(images[index]))
    #print(labels[index])

    Train = collections.namedtuple('Train', ['images', 'labels'])

    return Train(images, labels)


def testing(mndata):  # function that returns the test data
    images, labels = mndata.load_testing()
コード例 #4
0
                dJ_dZ = (self.layers[1].weights.T @ dJ_dZ) * self.act_prime(
                    self.layers[0].outputs)
                self.layers[0].dJ_dW = dJ_dZ @ inputs[i].reshape(1, -1)
                self.layers[0].dJ_dB = dJ_dZ

                # adjust all weights and biases
                for layer in self.layers:
                    layer.weights -= learn_rate * layer.dJ_dW
                    layer.biases -= learn_rate * layer.dJ_dB


if __name__ == "__main__":
    # ----------------------------------------Handwritten Digits----------------------------------------------- #
    # load MNIST dataset
    mn_data = MNIST('./MNIST')
    mn_data.gz = True
    mn_images, mn_labels = mn_data.load_training()

    # normalise image data from 0-255 to 0.01 to 1
    images_array = (np.array(mn_images) / 255.0 * 0.99) + 0.01

    # convert labels into array of 10 elements with all zeros while the labeled element is replaced by 1
    labels_list = []
    for label in mn_labels:
        empty = np.full(10, 0.01)
        empty[label] = 0.99
        labels_list.append(empty)
    labels_array = np.array(labels_list)

    # instantiate Neural Network
    my_ANN = NeuralNetwork(input_nodes=784,
コード例 #5
0
            [np.argmax(test_alpha[i, :]) for i in range(len(test_alpha))])
        return sum(int(i == j) for (i, j) in zip(test_result, y))

    def save_model(self, dstFile1, dstFile2):
        '''
        保存model的weights和bias,dstFile1保存偏置,dstFile2保存权重
        '''
        np.save(dstFile1, self.bias)
        np.save(dstFile2, self.weights)
        return None


if __name__ == '__main__':
    #载入数据集
    mdata = MNIST()
    mdata.gz = True
    train_images, train_labels = mdata.load_training()
    test_images, test_labels = mdata.load_testing()
    '''
    tuple形式 
    train_data[0]为训练图片 shape(60000,784)
    train_data[1]为训练图片的标签 shape(60000,)
    test_data[0]为测试图片 shape(10000,784)
    test_data[1]为测试图片的标签 shape(10000,)
    '''
    train_data = [np.array(train_images), np.array(train_labels)]
    test_data = [np.array(test_images), np.array(test_labels)]
    model = DRNN(np.array([784, 30, 10]))
    #model.SBGD(train_data,test_data,epochs=30,batch_size=5,eta=0.05,cost='cross_entropy')
    model.SBGD(train_data, test_data, epochs=50, batch_size=10, eta=0.05)
    #model.save_model('bias.npy','weights.npy')
コード例 #6
0
def train(save_model_path, mnist_data_path='mnist_data', print_accuracy=False):

    # Dear course team,
    #
    # In this homework I used 2 models:
    # the first one is a Random Forest, trained on MNIST. It predicts the
    # probabilities for each digit in sudoku square.
    # Then, the second model - another Random Forest - is trained using those probabilities as the features and
    # the right digits as the answers, on all train images.

    # The first model:
    fix_seed(SEED)

    mnist_data = MNIST(mnist_data_path)
    mnist_data.gz = True

    images_train, labels_train = mnist_data.load_training()
    images_test, labels_test = mnist_data.load_testing()

    images_train = np.uint8(
        [np.reshape(im, (MNIST_CELL_SIZE, ) * 2) for im in images_train])
    images_test = np.uint8(
        [np.reshape(im, (MNIST_CELL_SIZE, ) * 2) for im in images_test])
    labels_train, labels_test = np.int16(labels_train), np.int16(labels_test)

    hhog = lambda image: hog(image,
                             orientations=8,
                             pixels_per_cell=(8, 8),
                             cells_per_block=(3, 3),
                             visualize=False,
                             multichannel=False)

    features_train = np.array([hhog(im) for im in images_train])
    features_test = np.array([hhog(im) for im in images_test])
    # features_train = np.array([im.ravel() for im in images_train])
    # features_test = np.array([im.ravel() for im in images_test])

    #     rf = RandomForestClassifier(n_estimators=100, n_jobs=-1, random_state=SEED)
    rf = XGBClassifier(n_estimators=100, n_jobs=-1, random_state=SEED)
    rf.fit(features_train, labels_train)

    model_fname = 'random_forest.joblib'
    joblib.dump(rf, Path(save_model_path) / model_fname)

    print_accuracy = True

    if print_accuracy:
        from sklearn.metrics import accuracy_score
        print(accuracy_score(labels_test, rf.predict(features_test)))

    # The second model:
    prefix = '/autograder/source/train/'
    #     prefix = ''

    images_arr = [
        prefix + 'train_0.jpg', prefix + 'train_2.jpg', prefix + 'train_3.jpg',
        prefix + 'train_4.jpg', prefix + 'train_6.jpg', prefix + 'train_7.jpg',
        prefix + 'train_8.jpg'
    ]

    truth_arr = [
        [
            5, 3, 6, 5, 1, 4, 3, 4, 8, 7, 2, 6, 8, 6, 2, 9, 5, 9, 3, 4, 2, 9,
            5, 6, 8, 1, 5, 6, 1, 8, 4, 7, 5
        ],  # 0
        [
            1, 7, 8, 6, 5, 1, 2, 6, 5, 2, 7, 3, 5, 9, 2, 8, 1, 4, 9, 7, 1, 6,
            8, 4, 7, 1, 5, 6, 9, 7, 6
        ],  # 2.1
        [
            8, 3, 4, 8, 9, 3, 7, 4, 2, 3, 4, 7, 5, 7, 6, 4, 7, 8, 6, 9, 5, 4,
            1, 5, 9, 5, 1, 6, 5, 9, 4, 8, 3
        ],  # 2.2
        [
            2, 9, 3, 5, 7, 4, 9, 5, 6, 2, 9, 3, 2, 1, 8, 1, 9, 4, 6, 3, 5, 2,
            4, 3, 7, 6, 8, 3, 7, 4, 1, 9
        ],  # 3
        [
            6, 5, 1, 6, 5, 8, 3, 5, 9, 4, 3, 6, 2, 1, 5, 7, 3, 9, 1, 5, 5, 8,
            6, 5, 9, 4, 2, 1, 5, 8, 6, 1, 6, 2, 5
        ],  # 4
        [
            7, 5, 9, 1, 3, 4, 2, 3, 8, 5, 2, 9, 3, 6, 7, 2, 9, 8, 1, 2, 4, 6,
            3, 3, 6, 5, 3, 1
        ],  # 6
        [
            9, 7, 8, 1, 1, 9, 8, 6, 5, 7, 2, 5, 3, 9, 7, 8, 6, 2, 6, 1, 9, 7,
            8, 4, 1, 5, 2, 4, 8, 7, 2, 9, 3, 9, 5, 4, 7, 2, 1, 7, 4
        ],  # 7
        [
            4, 5, 7, 6, 5, 6, 3, 2, 1, 5, 1, 9, 8, 6, 7, 4, 8, 4, 3, 6, 6, 4,
            7, 1, 2, 8, 3, 1, 5, 4, 7, 1, 8, 3, 4, 7
        ]
    ]  # 8

    truths, pred_probas = [], []

    for truth in truth_arr:
        truths.append(truth)
    truths = np.hstack(truths)

    for i, fname in enumerate(images_arr):
        image = cv2.imread(fname)
        image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
        image = io.imread(fname, as_gray=True)
        image_full = image

        mask, corners = mask_image(image_full)
        images_warped, tforms = normalize_image(image_full)

        sudoku_digits = []
        for image_warped in images_warped:
            pred_square, pred_proba, flag, cells, _ = detect_digits(
                image_warped, rf, get_templates=False)
            pred_probas.append(pred_proba)

    pred_probas = np.vstack(pred_probas)

    #     rf2 = RandomForestClassifier(n_estimators=50, n_jobs=-1, random_state=666)
    rf2 = XGBClassifier(n_estimators=50, n_jobs=-1, random_state=666)
    rf2.fit(pred_probas, truths)

    model_fname = 'random_forest2.joblib'
    joblib.dump(rf2, Path(save_model_path) / model_fname)
コード例 #7
0
        parser.error("training size must be in domain (0,60000]!")
    if args.training_size > 10000:
        parser.error("evaluate size must be in domain (0,10000]!")
    if args.gpu:
        tf.logging.info('Using GPU - setting threads to 1')
        args.num_thread = 1

    tf.logging.info('Running with threads({})'.format(args.num_thread))
    tf.logging.info(
        'Parameters: epoch({}), batch-size({}), training-rate({:f}), training-size({}), evaluate-size({})'
        .format(args.epoch, args.batch_size, args.training_rate,
                args.training_size, args.evaluate_size))

    # Get data
    mndata = MNIST(args.mnistpath, return_type='numpy')
    mndata.gz = args.mnist_gzip
    train_images, train_labels_raw = mndata.load_training()
    test_images, test_labels_raw = mndata.load_testing()
    train_labels = one_hot_encode(train_labels_raw)
    test_labels = one_hot_encode(test_labels_raw)
    tf.logging.info('Loaded data')

    # set TF settings
    sesscfg = tf.ConfigProto()
    sesscfg.log_device_placement = False
    sesscfg.intra_op_parallelism_threads = args.num_thread
    sesscfg.inter_op_parallelism_threads = args.num_thread
    sesscfg.device_count['GPU'] = args.gpu
    sesscfg.graph_options.optimizer_options.global_jit_level = tf.OptimizerOptions.ON_1

    cnn_graph, img, lab, out, pred, loss, opt = cnn_train(args.training_rate)
コード例 #8
0
    def load_data(self, get_first: bool = False):
        """Load all the unpacked data into numpy arrays.

        Args:
            get_first (bool): Download and unpack data first.
        """
        if get_first:
            self.get_data()

        dtype = 'balanced' if self.balanced else 'byclass'

        idx_paths = [
            (
                os.path.join(self.data_path, 'gzip',
                             f'emnist-{dtype}-train-images-idx3-ubyte'),
                os.path.join(self.data_path, 'gzip',
                             f'emnist-{dtype}-train-labels-idx1-ubyte'),
            ),
            (
                os.path.join(self.data_path, 'gzip',
                             f'emnist-{dtype}-test-images-idx3-ubyte'),
                os.path.join(self.data_path, 'gzip',
                             f'emnist-{dtype}-test-labels-idx1-ubyte'),
            )
        ]
        mapping = []

        with open(os.path.join(self.data_path, 'gzip',
                               f'emnist-{dtype}-mapping.txt'),
                  mode='r') as lm:
            for line in lm:
                mapping.append(chr(int(line.split()[1])))

        mndata = MNIST(os.path.join(self.data_path, 'gzip'))
        mndata.gz = True

        print("\n[~] Loading dataset...")

        x_train, y_train = mndata.load(idx_paths[0][0], idx_paths[0][1])
        x_test, y_test = mndata.load(idx_paths[1][0], idx_paths[1][1])

        x_train = np.array(x_train)
        y_train = np.array(y_train)
        x_test = np.array(x_test)
        y_test = np.array(y_test)

        # Normalize an array with data in an interval of [0, 255] to [0, 1]
        x_train = x_train.astype('float32') / 255
        x_test = x_test.astype('float32') / 255

        # Reshape the image to be used in a CNN
        x_train = x_train.reshape(x_train.shape[0], 28, 28, 1)
        x_test = x_test.reshape(x_test.shape[0], 28, 28, 1)

        # Perform one-hot encoding on label array
        y_train = tf.keras.utils.to_categorical(y_train, len(mapping))
        y_test = tf.keras.utils.to_categorical(y_test, len(mapping))

        print("[!] Dataset loaded!")

        return x_train, y_train, x_test, y_test, mapping
コード例 #9
0
ファイル: two2_2.py プロジェクト: SubhaniSk7/Machine-Learning
import copy

import pandas as pd
import math
import matplotlib.pyplot as mt
import numpy as np
import sklearn as sk
import sklearn.metrics
from sklearn.linear_model import LogisticRegression
from mnist import MNIST

mndata = MNIST('/home/subhani007/Desktop/ML Assignment/extracted')
mndata.gz = False
images_train, labels_train = mndata.load_training()

images_test, labels_test = mndata.load_testing()
print(len(images_train))
print(len(labels_train))

# for i in range(0, 10):
#     # print(np.array(images_test[i]).reshape(784, 1))
#     # logit.predict(testImages[i],testLabels[i])
#     print('-->', i)
#     print(logit.predict(np.array(images_test[i]).reshape(1, 784)))
#     score = logit.score(np.array(images_test[i]).reshape(1,784), np.array(labels_test[i]).reshape(1,1))
#     print('score:', i, ':', score)


# logitL2 = LogisticRegression(multi_class='ovr', solver='lbfgs',penalty='l1')
# logitL2.fit(images_train, labels_train)
# score = logitL2.score(np.array(images_test).reshape(10000, 784), np.array(labels_test).reshape(10000, 1))
コード例 #10
0
def main(_):
  # Import data
  # mnist = input_data.read_data_sets(FLAGS.data_dir)
  mndata = MNIST('data/fashion')
  mndata.gz = True
  x_train, y_train = mndata.load_training()
  x_test, y_test = mndata.load_testing()

  num_classes = 10

#   a = np.array(y_train)
#   b = np.zeros((len(y_train), num_classes))
#   b[np.arange(len(y_train)), a] = 1
#   y_train = b

#   a = np.array(y_test)
#   b = np.zeros((len(y_test), num_classes))
#   b[np.arange(len(y_test)), a] = 1
#   y_test = b

  # Create the model
  x = tf.placeholder(tf.float32, [None, 784], name='x')
  W = tf.Variable(tf.zeros([784, 10]))
  b = tf.Variable(tf.zeros([10]))
  y = tf.matmul(x, W) + b

  # Define loss and optimizer
  y_ = tf.placeholder(tf.int64, [None], name='y_')

  # The raw formulation of cross-entropy,
  #
  #   tf.reduce_mean(-tf.reduce_sum(y_ * tf.log(tf.nn.softmax(y)),
  #                                 reduction_indices=[1]))
  #
  # can be numerically unstable.
  #
  # So here we use tf.losses.sparse_softmax_cross_entropy on the raw
  # outputs of 'y', and then average across the batch.
  cross_entropy = tf.losses.sparse_softmax_cross_entropy(labels=y_, logits=y)
  train_step = tf.train.GradientDescentOptimizer(0.5).minimize(cross_entropy)

  sess = tf.InteractiveSession()
  tf.global_variables_initializer().run()
  # Train
  for _ in range(1000):
    # batch_xs, batch_ys = mnist.train.next_batch(100)
    batch_xs, batch_ys = next_batch(100, x_train, y_train)
    # print(batch_xs.shape, batch_ys.shape)
    sess.run(train_step, feed_dict={x: batch_xs, y_: batch_ys})

  # Test trained model
  correct_prediction = tf.equal(tf.argmax(y, 1), y_)
  accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
  print(sess.run(
      accuracy, feed_dict={
        #   x: mnist.test.images,
        #   y_: mnist.test.labels
        x: x_test,
        y_: y_test
      }))

  saver = tf.train.Saver()
  saver.save(sess, 'model/model.ckpt')
コード例 #11
0
import random
import numpy as np
from mnist import MNIST
from NeuralNetwork import NeuralNetwork

# parameters
learning_rate = 3
num_training_examples_per_epoch = 100
num_epochs = 100000
num_reports = 20
network = NeuralNetwork([784, 80, 20, 10])

# setup
mnist_data = MNIST('mnist_data')
mnist_data.gz = True
training_images, training_labels = mnist_data.load_training()
testing_images, testing_labels = mnist_data.load_testing()
training_images = np.array(
    training_images) / 255  # normalization to prevent calculation overflow
testing_images = np.array(testing_images) / 255


# trains a neural network to identify handwritten digits
def main():
    print(evaluate_network())
    for i in range(num_epochs):
        images, labels = get_random_training_examples()
        network.train_on_minibatch(images,
                                   [one_hot(label) for label in labels],
                                   learning_rate)
        if i % (num_epochs // num_reports) == 0:
コード例 #12
0
ファイル: ga_tanh.py プロジェクト: Galit1321/neuroevolution
def setup(init_pop):
    population = []
    mndata = MNIST('./mnist_data')
    mndata.gz = True
    gen = 10000
    mutation_rate = 0  # dynamic
    elitism = int(init_pop * .1)
    sel = int(init_pop * .33)
    train_x, train_y = mndata.load_training()
    train_x = np.array(train_x) / 255.0
    test_x, test_labels = mndata.load_testing()
    test_x = np.array(test_x) / 255.0
    all_data = list(zip(train_x, train_y))
    valid_data = list(zip(test_x, test_labels))
    fitness = []
    for j in range(0, init_pop):
        population.append(create_crom(128))
    indices = list(range(len(all_data)))
    size_sample = 50
    for i in range(0, gen):
        fitness.clear()
        validation_idx = np.random.choice(indices,
                                          size=size_sample,
                                          replace=False)
        sub_set = np.array(all_data)[validation_idx]
        for crom in population:
            loss, acc = check_validation(crom, sub_set, np.tanh)
            fitness.append((loss, crom, acc))
        fitness = sorted(fitness, key=lambda tup: tup[0])
        best = fitness[0]
        if i % 100 == 0:
            print(i, " best loss:", best[0], "best acc", best[2])
            if i % 1000 == 0:
                with open('weights_tanh/weights_save' + str(i) + '.pkl',
                          'wb') as f:
                    pickle.dump(best[1], f, pickle.HIGHEST_PROTOCOL)
                size_sample = int(1.5 * size_sample) % 6000
                mutation_rate = (0.5 * (float(i + 1) / float(gen)))
                print(mutation_rate)
        chosen = selection(fitness, sel)
        children = [elem[1] for elem in fitness[:elitism]]
        for elem in chosen:
            if len(children) == init_pop:
                break
            mom, pop = elem
            child1 = crossover(mom, pop)
            children.append(mutate(child1, mutation_rate))
        population = children
    best = fitness[0]
    print(gen, " best loss:", best[0], "best acc", best[2])
    loss, acc, pred = check_test(best[1], valid_data, np.tanh)
    print("test loss:", loss, "test  acc", acc)
    with open('weights_tanh/weights_save' + '.pkl', 'wb') as f:
        pickle.dump(best[1], f, pickle.HIGHEST_PROTOCOL)
    with open("weights_tanh/weight.txt", 'w') as f:
        for key, value in best[1].items():
            f.write(key)
            for elem in value:
                f.write(str(elem) + ',\n')
    f = open("weights_tanh/test_tanh.pred", "w")
    f.write(pred[:-1])
    f.close()
コード例 #13
0
def get_data(dataset='digits'):
    mndata = MNIST(abspath('./emnist_data'))
    mndata.gz = True
    mndata.select_emnist(dataset)
    return mndata.load_training()
from sklearn.externals import joblib
from skimage.feature import hog

# fix random seed for reproducibility
seed_value = 123; np.random.seed(seed_value); #set_random_seed(seed_value) # Keras uses its source of randomness regardless Theano or TensorFlow.In addition, TensorFlow has its own random number generator that must also be seeded by calling the set_random_seed() function immediately after the NumPy random number generator:

exec(open(os.path.abspath('image_common_utils.py')).read())

#%% Source reference: http://hanzratech.in/2015/02/24/handwritten-digit-recognition-using-opencv-sklearn-and-python.html

################### Get data to Build the model #####################
# Import the modules
from mnist import MNIST

# Load the dataset
mnist_data = MNIST('./image_data/mnist_yann_lecun'); mnist_data.gz = True
images, labels = mnist_data.load_training()

# Extract the features and labels
ar_features = np.array(images, 'int16')
ar_labels = np.array(labels, 'int')
ar_features.shape, ar_labels.shape # (60000, 784), (60000,)

#View images
# View one image of particular index
index = 1
x_image = np.reshape(ar_features[index], [28, 28])
plt.imshow(x_image); plt.show()

# see the label
ar_labels[index]
コード例 #15
0
def augment(images, labels, start=10, end=20, inc=10):
    aug_images = []
    aug_labels = []
    for image, label in zip(images, labels):
        aug_images.append(image)
        aug_labels.append(label)
        for i in range(start, end, inc):
            rotated = rotate(reshape(image, (28, 28)), i, reshape=False)
            aug_images.append(reshape(rotated, (784)))
            aug_labels.append(label)
    return (array(aug_images), array(aug_labels))


mnist = MNIST("./mnist")
mnist.gz = True
images, labels = mnist.load_training()
images_test, labels_test = mnist.load_testing()

images = array(images) / 255.
images_test = array(images_test) / 255.
labels = one_hot(labels)
labels_test = one_hot(labels_test)

colours = ["b", "g", "r", "c", "m"]

l2 = L2Reg(5.0)
net1 = Network([Layer(784, 100, sigmoid, l2), Layer(100, 10, softmax, l2)])
net1.save('./weights/tmp.pkl')

net2 = Network.load('./weights/tmp.pkl')
コード例 #16
0
def get_train_images():
    mndata = MNIST('resources')
    mndata.gz = True
    images, labels = mndata.load_training()
    return images, labels