def main():
    parser = get_command_line_parser()
    args = parser.parse_args()
    torch.manual_seed(args.seed)
    os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu
    use_gpu = torch.cuda.is_available()

    if use_gpu:
        print("Currently using GPU: {}".format(args.gpu))
        torch.backends.cudnn.benchmark = True
        torch.cuda.manual_seed_all(args.seed)
    else:
        print("Currently using CPU")

    trainloader, testloader = get_mnist_data(train_batch_size=args.batch_size,
                                             workers=args.workers)

    print("Creating model: {}".format(args.model))
    feature_extractor = ConvNet(depth=6, input_channel=1)
    model = BaseLine(feature_extractor=feature_extractor,
                     num_base_class=10,
                     embed_size=2)

    if use_gpu:
        model = model.cuda()

    # optimizer_model = torch.optim.SGD(model.parameters(), lr=args.lr_model, weight_decay=5e-04, momentum=0.9)
    optimizer_model = torch.optim.Adam(model.parameters(), lr=args.lr_model)

    if args.stepsize > 0:
        scheduler = torch.optim.lr_scheduler.StepLR(optimizer_model,
                                                    step_size=args.stepsize,
                                                    gamma=args.gamma)

    start_time = time.time()

    for epoch in range(args.max_epoch):
        print("==> Epoch {}/{}".format(epoch + 1, args.max_epoch))
        train(model, optimizer_model, trainloader, use_gpu, 10, epoch, args)

        if args.stepsize > 0:
            scheduler.step()

        if args.eval_freq > 0 and (epoch + 1) % args.eval_freq == 0 or (
                epoch + 1) == args.max_epoch:
            print("==> Test")
            acc, err = evaluate(model,
                                testloader,
                                use_gpu,
                                10,
                                epoch,
                                args=args)
            print("Accuracy (%): {}\t Error rate (%): {}".format(acc, err))

    elapsed = round(time.time() - start_time)
    elapsed = str(datetime.timedelta(seconds=elapsed))
    print("Finished. Total elapsed time (h:m:s): {}".format(elapsed))
Example #2
0
    [MNIST Dataset](http://yann.lecun.com/exdb/mnist/).
Author: Aymeric Damien
Project: https://github.com/aymericdamien/TensorFlow-Examples/
"""

from __future__ import print_function

import tensorflow as tf
from tensorflow.contrib import rnn
import numpy as np
import pdb

# Import MNIST data
from prepare_data import get_mnist_data

mnist = get_mnist_data()

'''
To classify images using a bidirectional recurrent neural network, we consider
every image row as a sequence of pixels. Because MNIST image shape is 28*28px,
we will then handle 28 sequences of 28 steps for every sample.
'''

# Training Parameters
learning_rate = 0.001
training_steps = 10000
batch_size = 128
display_step = 200

# Network Parameters
num_input = 28 # MNIST data input (img shape: 28*28)
Example #3
0
import os
import sys

import numpy as np

from prepare_data import get_mnist_data
from prepare_nn import create_dense_autoencoder
from misc_tools import tools_save_model, tools_load_model
from make_imgs import get_image_from_one_sample, get_images_from_dataset, del_all_images, make_html_page

(x_train, y_train), (x_test, y_test) = get_mnist_data()

filenames = ["nnets/autoencoder.json", "nnets/autoencoder.bin"]

autoencoder = tools_load_model(filenames[0], filenames[1])

encoder = autoencoder.get_layer("encoder")
decoder = autoencoder.get_layer("decoder")

autoencoder.compile(optimizer="adam", loss="binary_crossentropy")

part_size = 20
num_of_parts = 3
batch_size = part_size
row_heads = []

del_all_images()

for part_index in range(0, num_of_parts):
    x_test_part = x_test[0 + part_index * part_size:part_size *
                         (part_index + 1)]