예제 #1
0
def main():
    mnist = Mnist(os.path.join(os.getcwd(), "datasets/train.csv"), rows=100)
    for dataset in mnist.random()[:1]:
        # reshape from 1x784 to 28x28 and scale to 0-1
        array = numpy.asfarray(dataset.data).reshape((28, 28)) / 255
        fig, ax = plt.subplots()
        im = ax.imshow(array, cmap=plt.get_cmap('Greys'), interpolation='None')
        fig.colorbar(im)
        ax.set_title(dataset.label)
        plt.show()
예제 #2
0
def main():
    """ EntryPoint """

    # Set Data ...
    df = pd.read_csv('mnist.csv', delimiter=',')
    dataset = df.values.tolist()
    # print(dataset)

    s = pd.Series(list(range(10)))
    dummy = pd.get_dummies(s)
    one_hot = dummy.values.tolist()
    # print(one_hot[0])

    mnist = Mnist(dataset, one_hot)
    train_inputs, validation_inputs, test_inputs = mnist.load_inputs()
    train_targets, validation_targets, test_targets = mnist.load_targets()
    print("Data loaded ...")
    # print(train_inputs[0])

    input_size = train_inputs.shape[0]  # 784
    output_size = train_targets.shape[0]  # 10
    hidden_layer_size = 50

    ann = ANN([input_size, hidden_layer_size, hidden_layer_size, output_size])

    n_epochs = 150

    # Learn ...
    validation_accuracy, epochs = ann.learning(train_inputs, train_targets,
                                               validation_inputs,
                                               validation_targets, n_epochs)

    # Test ...
    test_accuracy, classes_accuracy = ann.evaluate(test_inputs, test_targets,
                                                   mnist)

    print('Test accuracy:', test_accuracy)

    plt.figure(1)
    plt.plot(epochs, validation_accuracy)
    plt.xlabel('Epoch')
    plt.ylabel('Accuracy')
    plt.grid()
    plt.title('Validation Accuracy')

    plt.figure(2)
    y_pos = np.arange(len(range(10)))
    plt.bar(y_pos, classes_accuracy)
    plt.xticks(y_pos, range(10))
    plt.ylabel('Accuracy')
    plt.title('Accuracy for each Class')
    plt.show()
예제 #3
0
파일: main.py 프로젝트: VamosC/MNIST
def train():
    images, labels = process_data('./data/train-images-idx3-ubyte',
                                  './data/train-labels-idx1-ubyte')
    train_set = Mnist(images, labels)
    # train_loader = DataLoader(train_set, batch_size=64,
    #                           shuffle=True, num_workers=8, pin_memory=True)
    train_loader = DataLoader(train_set, batch_size=64, shuffle=True)
    model = Convnet()

    optimizer = torch.optim.Adam(model.parameters(), lr=0.001)
    lr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer,
                                                   step_size=20,
                                                   gamma=0.5)
    aver = Averager()
    for epoch in range(1, 11):
        lr_scheduler.step()
        model.train()
        for i, batch in enumerate(train_loader, 1):
            # image, label = [_.cuda() for _ in batch]
            image, label = batch
            score = model(image)
            loss = F.cross_entropy(score, label.long())
            acc = count_acc(score, label, aver)
            print('epoch %d batch %d acc: %f' % (epoch, i, acc))
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()
        print('epoch %d acc: %f' % (epoch, aver.item()))
    save_model(model, 'model-1')
예제 #4
0
파일: main.py 프로젝트: AndRossi/mnist
def main(unused_argv):

    tf.logging.set_verbosity(tf.logging.DEBUG)

    mnist = Mnist("model")

    # download training and eval data
    mnist_data = tf.contrib.learn.datasets.load_dataset("mnist")
    train_data = mnist_data.train.images  # Returns np.array
    train_labels = np.asarray(mnist_data.train.labels, dtype=np.int32)
    eval_data = mnist_data.test.images  # Returns np.array
    eval_labels = np.asarray(mnist_data.test.labels, dtype=np.int32)

    mnist.train(train_data, train_labels)

    results = mnist.evaluate(eval_data, eval_labels)
    print(results)
예제 #5
0
 def __init__(self, mnist_type,is_train,max_data_num,transform):
     self.is_train = is_train
     if mnist_type == "MNIST":
         self.data_type = Mnist(is_train, max_data_num)        
     elif mnist_type == "MNIST_M":            
         self.data_type = MnistM(is_train, max_data_num)        
     elif mnist_type == "SVHN":            
         self.data_type = Svhn(is_train, max_data_num)   
     elif mnist_type == "SYNTHDIGITS":   
         self.data_type = SynthDigits(is_train, max_data_num)
     #self.data_type = np.swapaxes(self.data_type, 0,2)
     self.file_size = self.data_type.file_size
     self.transform = transform
예제 #6
0
파일: test_mnist.py 프로젝트: benkeil/mnist
class TestMnist(unittest.TestCase):
    """ Tests ... """
    def setUp(self):
        self._mnist = Mnist(
            os.path.join(os.path.dirname(__file__), 'resources/mnist.csv'))

    def tearDown(self):
        pass

    def test_load_data(self):
        """
        Test case for ...
        """
        assert len(self._mnist.get()) == 10
        assert self._mnist.get()[0].label == 7
        pass

    def test_rand(self):
        """
        Test case for ...
        """
        assert len(self._mnist.random()[:5]) == 5
        pass
예제 #7
0
def main():
    latent_dim = 32
    original_mnist = Mnist()
    ld_mnist = LdMnist("ld_mnist_dataset_v3")
    class_decoder = tf.keras.models.load_model(
        os.path.join("saved_models_v3", "class_decoder"))

    linear_model = create_and_train_linear_model(latent_dim, ld_mnist)
    print("Linear model evaluation:")
    evaluate(linear_model, ld_mnist, original_mnist, class_decoder)
    del linear_model

    middle_layers_model = create_and_train_middle_layers(latent_dim, ld_mnist)
    print("Middle layers trained from scratch evaluation:")
    evaluate(middle_layers_model, ld_mnist, original_mnist, class_decoder)
    del middle_layers_model
예제 #8
0
파일: main.py 프로젝트: VamosC/MNIST
def test():
    images, labels = process_data('./data/t10k-images-idx3-ubyte',
                                  './data/t10k-labels-idx1-ubyte')
    test_set = Mnist(images, labels)
    # train_loader = DataLoader(train_set, batch_size=64,
    #                           shuffle=True, num_workers=8, pin_memory=True)
    test_loader = DataLoader(test_set, batch_size=64, shuffle=True)
    model = Convnet()
    model.load_state_dict(torch.load('./model/model-1.pth'))
    model.eval()
    aver = Averager()
    for i, batch in enumerate(test_loader, 1):
        # image, label = [_.cuda() for _ in batch]
        image, label = batch
        score = model(image)
        count_acc(score, label, aver)
    print('test acc: %f' % aver.item())
예제 #9
0
import numpy as np
import image_viewer
from fc import FullyConnected
from mnist import Mnist

mnist = Mnist()
train_x,train_y = mnist.get_train_data()
test_x,test_y = mnist.get_test_data()

fc = FullyConnected(train_x, train_y, test_x, test_y)
fc.train()

예제 #10
0
    one_hot_label[one_hot_label == 1] = 0.99
    return one_hot_label
  
def evaluate(mnist_data, network):
    #corrects, wrongs = network.evaluate(mnist.trainImages, mnist.trainLabels)
    #logger.info("{:.2f}% Correct in training data".format((corrects / (corrects + wrongs)) * 100))
    #trainingPercent.append("{:.2f}%".format((corrects / (corrects + wrongs)) * 100))

    corrects, wrongs = network.evaluate(mnist.testImages, mnist.testLabels)
    #logger.info("{:.2f}% Correct in test data".format((corrects / (corrects + wrongs)) * 100))

    return "{:.2f}%".format((corrects / (corrects + wrongs)) * 100)


logger.debug("START")
mnist = Mnist("train-images.idx3-ubyte", "train-labels.idx1-ubyte",
               "t10k-images.idx3-ubyte",  "t10k-labels.idx1-ubyte")
shuffledList = list(range(len(mnist.trainImages)))
random.shuffle(shuffledList)

epochs = 2 
 
testPercent = []
trainingPercent = []

from network import Network

nn_batch = Network([28 * 28, 100, 10], 0.2)
nn_single = Network([28 * 28, 100, 10], 0.2)

for epoch in range(epochs):
    logger.info("epoch {}".format(epoch))
예제 #11
0
batch_size = 32  # batch size
cat_dim = 10  # total categorical factor
con_dim = 2  # total continuous factor
rand_dim = 38
num_epochs = 30
debug_max_steps = 1000
save_epoch = 5
max_epochs = 50

#
# inputs
#

# MNIST input tensor ( with QueueRunner )
data = Mnist(batch_size=batch_size, num_epochs=num_epochs)
num_batch_per_epoch = data.train.num_batch

# input images and labels
x = data.train.image
y = data.train.label

# labels for discriminator
y_real = tf.ones(batch_size)
y_fake = tf.zeros(batch_size)

# discriminator labels ( half 1s, half 0s )
y_disc = tf.concat(axis=0, values=[y, y * 0])

#
# create generator
예제 #12
0
    config = OrderedDict([('model_name', args.model_name),
                          ('path_dir', args.path_dir), ('i_type', args.i_type),
                          ('o_type', args.o_type), ('datasets', args.datasets),
                          ('epochs', args.epochs),
                          ('batch_size', args.batch_size),
                          ('lr_set', args.lr_set), ('ep_set', args.ep_set),
                          ('archi', args.archi)])

    return config


config = parse_args()

config['io_type'] = config['i_type'] + '2' + config['o_type']

mnist = Mnist()

config['n_output'] = mnist.out_shape

n_samples = mnist.num_examples
iter_per_epoch = int(n_samples / config['batch_size'])

train_x, train_y = mnist.train_images, mnist.train_labels
test_x, test_y = mnist.test_images, mnist.test_labels

if config['model_name'] == 'AE':
    print('Run AE')
    model = AE(config['n_output'], config['archi'])
elif config['model_name'] == 'VAE':
    print('Run VAE')
    model = VAE(config['n_output'], config['archi'])
예제 #13
0
from flask import Flask, request, jsonify
import tensorflow as tf
from mnist import Mnist

app = Flask(__name__)

mnist = Mnist()
mnist.restore("/model.ckpt")

@app.route("/", methods=['POST'])
def what_number():

    json = request.json
    if(json is None or "image" not in json or len(json["image"]) != 784):
        return jsonify(error="Need json includes image property which is 784(28 * 28) length, float([0, 1.0]) array")
    else:
        result = list(mnist.what_number([json["image"]]))
        return jsonify(result=result[0])

if __name__ == "__main__":
    app.run(port=3000, host='0.0.0.0')
예제 #14
0
from flask import Flask, request, jsonify
import tensorflow as tf
from mnist import Mnist

app = Flask(__name__)

mnist = Mnist()
mnist.restore("/model.ckpt")


@app.route("/", methods=['POST'])
def what_number():

    json = request.json
    if (json is None or "image" not in json or len(json["image"]) != 784):
        return jsonify(
            error=
            "Need json includes image property which is 784(28 * 28) length, float([0, 1.0]) array"
        )
    else:
        result = list(mnist.what_number([json["image"]]))
        return jsonify(result=result[0])


if __name__ == "__main__":
    app.run(port=3000, host='0.0.0.0')
예제 #15
0
from mnist import Mnist

mnist = Mnist()
mnist.train(20000)
mnist.save("model.ckpt")
mnist.close()
print("done")
예제 #16
0
    args = parser.parse_args()

    config = OrderedDict([('model_name', args.model_name),
                          ('datasets', args.datasets), ('epochs', args.epochs),
                          ('batch_size', args.batch_size),
                          ('base_lr', args.base_lr), ('n_input', args.n_input),
                          ('n_output', args.n_output), ('archi', args.archi)])

    return config


config = parse_args()

### call data ###
mnist = Mnist()
n_samples = mnist.num_examples

### call models ###
if config['model_name'] == 'AE':
    print('Run AE')
    model = AE(config['n_input'], config['n_output'], config['archi'])
elif config['model_name'] == 'VAE':
    print('Run VAE')
    model = VAE(config['n_input'], config['n_output'], config['archi'])

### make folder ###
mother_folder = config['model_name']
try:
    os.mkdir(mother_folder)
except OSError:
예제 #17
0
파일: test_mnist.py 프로젝트: benkeil/mnist
 def setUp(self):
     self._mnist = Mnist(
         os.path.join(os.path.dirname(__file__), 'resources/mnist.csv'))
예제 #18
0
# -*- coding:utf-8 -*-
from PIL import Image
from flask import Flask, request
from flask_cors import CORS
from mnist import Mnist
import json
from io import BytesIO

app = Flask(__name__)
CORS(app)

mn = Mnist()


@app.route("/", methods=['POST'])
def index():
    img = request.files.get("img_photo")
    byte_io = BytesIO(img.read())
    img = Image.open(byte_io)
    reco_result = mn.get_pic_number(img)
    del byte_io
    del img
    return json.dumps({"result": str(reco_result[0])})


if __name__ == "__main__":
    app.run(host="0.0.0.0", port=5000)
예제 #19
0
import time

import numpy as np
import torch

from mnist import Mnist
from mnist_cnn_pytorch import Net
from operatorDemo import *

mnist = Mnist("./data/mnist/")
n_epoch = 1


def test_pytorch():
    x_train, y_train, x_valid, y_valid = map(
        torch.tensor, (mnist.tr_x, mnist.tr_y, mnist.te_x, mnist.te_y))
    model = Net()
    print(model)
    time_start = time.time()
    x_valid = torch.tensor(x_valid, dtype=torch.float32)
    for i in range(n_epoch):
        model.forward(x_valid)

    time_end = time.time()
    time_cost = time_end - time_start
    print("totally cost %.3f sec" % time_cost)


def test_numpy_serial():
    np.random.seed(1)
    test_x = mnist.te_x[:, ].reshape(10000, 28, 28, 1)
예제 #20
0
cat_dim = 10  # total categorical factor
con_dim = 2  # total continuous factor
rand_dim = 38
debug_max_steps = 1000
# 每5个阶段保存一次
save_epoch = 5
# 最大训练阶段数
# (一阶段为训练完整数据集一次)
max_epochs = 50

#
# inputs
#

# MNIST input tensor ( with QueueRunner )
data = Mnist(batch_size=batch_size)
# 每阶段训练的批次数量
num_batch_per_epoch = data.train.num_batch

# input images and labels
# 训练数据和标签
x = data.train.image
y = data.train.label

# labels for discriminator
# 假设所有的图片都为真
y_real = tf.ones(batch_size)
y_fake = tf.zeros(batch_size)

# discriminator labels ( half 1s, half 0s )
# 连接成一个一维tensor,前面部分为1,后面部分为0
예제 #21
0
from collections import defaultdict
import numpy as np
from PIL import Image
from mnist import Mnist

mnist_model = Mnist()


def resolve(file_path):
    img = Image.open(file_path, 'r')
    # img.show()
    img_array = np.array(img)

    def color_to_coordinates(image):
        d = defaultdict(list)  # matches color with coordinates
        for i, u in enumerate(img_array):
            for j, v in enumerate(u):
                t = tuple(v)
                d[t].append((i, j))
        return d

    def count_to_color(clr_to_coor):
        d2 = defaultdict(list)
        for x, y in clr_to_coor.items():
            d2[len(y)].append(x)
        return d2

    def max_color(cnt_to_clr):
        k = iter(sorted(cnt_to_clr.keys(), reverse=True))
        while True:
            kk = next(k)
예제 #22
0
파일: mnistset.py 프로젝트: zenna/asl
 def refresh_inputs(x):
   mnist_dl, randomthing = x
   a = asl.refresh_iter(mnist_dl, lambda x: Mnist(asl.util.image_data(x)))
   return [a, random.Random(0)]
예제 #23
0
        for graph in graphs:
            LOG.info("%40s %s", graph,
                     results.get(graph, _EMPTY_RESULT)['score'])

        # Save the best one, if we have a place to put it
        if len(graphs) > 0 and best_dir is not None:
            try:
                fn = os.path.join(best_dir, "best_%08d" % round)
                LOG.info("Writing best graph as %s", fn)
                with open(fn, "w") as fh:
                    fh.write(graphs[-1].to_json())
            except Exception as e:
                LOG.warning("Failed to write out graph as %s: %s", (fn, e))

        # Now move on to the next generation
        biome.step_generation(
            cull_fraction, dict((g, r['score']) for (g, r) in results.items()))

        # Now remove any graph no long in the biome from the results
        for graph in tuple(results.keys()):
            if graph not in biome.graphs:
                del results[graph]


# ------------------------------------------------------------------------------

if __name__ == "__main__":
    run(Mnist,
        Mnist.create_graph('seed_graph', num_mid=100),
        best_dir='/var/tmp')