コード例 #1
0
def main():
    mnData = loader.MNIST('all_Data')
    images, lables = mnData.load_training()
    cost(images,lables)
コード例 #2
0
def main():
    torch.manual_seed(0)
    torch.random.manual_seed(0)

    # create results folder, if not already exists
    output_directory = misc.get_output_directory(args)
    if not os.path.exists(output_directory):
        os.makedirs(output_directory)
    train_csv = os.path.join(output_directory, 'train.csv')
    test_csv = os.path.join(output_directory, 'test.csv')
    best_txt = os.path.join(output_directory, 'best.txt')
    
    print("=> creating data loaders ...")
    if args.data == 'MNIST':
        datadir = './data/'
        all_dataset = loader.MNIST(datadir)
        train_size = len(all_dataset) // 5 * 4
        test_size = len(all_dataset) // 10
        val_size = len(all_dataset) - (train_size + test_size)
        train_dataset, test_dataset, val_dataset = torch.utils.data.random_split(all_dataset, [train_size, test_size, val_size])
    else:
        raise RuntimeError('Dataset not found.' +
                           'The dataset must be either of nyudepthv2 or kitti.')

    train_loader = torch.utils.data.DataLoader(
        train_dataset, batch_size=args.batch_size, shuffle=True,
        num_workers=args.workers, pin_memory=True, sampler=None)
    # set batch size to be 1 for validation
    test_loader = torch.utils.data.DataLoader(test_dataset,
                                             batch_size=1, shuffle=False, num_workers=args.workers, pin_memory=True)

    print("=> data loaders created.")

    # optionally resume from a checkpoint
    if args.start_epoch != 0:
        assert os.path.isfile(args.resume), \
            "=> no checkpoint found at '{}'".format(args.resume)
        print("=> loading checkpoint '{}'".format(args.resume))
        checkpoint = torch.load(args.resume)
        args.start_epoch = checkpoint['epoch'] + 1
        best_result = checkpoint['best_result']
        model = checkpoint['model']
        optimizer = checkpoint['optimizer']
        print("=> loaded checkpoint (epoch {})".format(checkpoint['epoch']))

    # create new model
    else:
        # define model
        print("=> creating Model ({}) ...".format(args.arch))

        if args.arch == 'resnet50':
            model = models.ResNet(50)
        else:
            raise RuntimeError("model not found")

        print("=> model created.")

        
    # define loss function (criterion) and optimizer
    if args.criterion == 'cce':
        criterion = criteria.CrossEntropyLoss().cuda()
    else:
        raise RuntimeError("criterion not found")

    if args.optimizer == 'Adam':
        optimizer = torch.optim.Adam(model.parameters(), lr = args.lr, weight_decay = args.weight_decay)
    elif  args.optimizer == 'SGD':
        optimizer = torch.optim.SGD(model.parameters(), args.lr,
                                    momentum=args.momentum,
                                    weight_decay=args.weight_decay)
    else:
        raise RuntimeError("optimizer not defined")

    optimizer_scheduler = lr_scheduler.StepLR(optimizer, args.epochs//3)

    model = model.cuda()
    print(model)
    print("=> model transferred to GPU.")

    train_logger, test_logger = None, None

    for epoch in range(args.start_epoch, args.epochs):
        train_result = train.train(train_loader, model, criterion, optimizer)

        if epoch == 0:
            train_logger = logger.Logger(train_result, output_directory, train_csv)
        else:
            train_logger.append(train_result)

        optimizer_scheduler.step()

        # evaluate on validation set
        test_result = test.validate(test_loader, model, criterion, optimizer)

        if epoch == 0:
            test_logger = logger.Logger(test_result, output_directory, test_csv)
        else:
            test_logger.append(test_result)

        misc.save_checkpoint({
            'args': args,
            'epoch': epoch,
            'arch': args.arch,
            'model': model,
            'best_result': best_result,
            'optimizer': optimizer,
        }, is_best, epoch, output_directory)

    train_logger.write_into_file('train')
    test_logger.write_into_file('test')
コード例 #3
0
'''
File Name: main   
Author:    Shiming Luo
Date:      2018.05.06
'''

import numpy as np
import loader
from preprocessing import *
from architecture import *

if __name__ == '__main__':
    
    ### load data
    mnist = loader.MNIST('MNIST/raw/')
    train_images, train_labels = mnist.load_training()
    test_images, test_labels = mnist.load_testing()


    train_images,train_labels = preprocess(train_images,train_labels,60000)

    #### set up train, validation, test sets
    x_train = np.matrix(train_images[:50000]) ## N1*785
    x_val = np.matrix(train_images[50000:])  ## N2*785
    t_train = np.matrix(train_labels[:50000]).T  ## N1*1
    t_val = np.matrix(train_labels[50000:]).T  ## N2*1

    #### pre-processing
    x_train = z_score(x_train) ## N1*785
    x_val = z_score(x_val) ## N2*785
コード例 #4
0
def main():

    data_load = loader.MNIST(return_type='numpy')
    testing_im, testing_lb = data_load.load_testing()
    print testing_lb
コード例 #5
0
def main():
    average_error = [
        .118402352118, .106677842987, 0.113631954756, 0.124001470498,
        0.107012356003, 0.14040359204, 0.101725128759
    ]
    axis = [0, 10, 0, 1]
    # pylab.title('Exact')
    # pylab.plot(zeros, 'bx', ones, 'gx', twos, 'rx',threes, 'cx',fours, 'mx',fives, 'yx',six, 'b+',seven, 'g+',eight, 'r+',nine, 'c+')
    pylab.plot(average_error, 'b')
    pylab.axis(axis)
    pylab.title("Error measure ")
    pylab.xlabel("average error: number of points 5000 + i * 750")
    pylab.ylabel("error measure")
    pylab.show()
    print("Retriving data set")
    data_load = loader.MNIST(return_type='numpy')
    training_im, training_lb = data_load.load_training()
    testing_im, testing_lb = data_load.load_testing()
    #out = None
    #zeros = []
    #ones  = []
    #twos = []
    #threes = []
    #fours = []
    #fives = []
    #six = []
    #seven = []
    #eight = []
    #nine = []
    average_error = []

    for i in range(10):
        final_error = []
        error = []
        num_points = 5000 + i * 750
        error_rate = 0.0

        print "starting TSNE n_components run"
        #this loop is to get the program to run on my computer.
        #It could be removed if there is a computer with enough RAM

        for t in range(10):
            training = []
            label = []
            testing = []
            test_lb = []

            idx = np.random.randint(len(training_im), size=len(training_im))
            idxt = np.random.randint(len(testing_im), size=len(testing_im))
            for i in range(num_points):
                training.append(training_im[idx[i]])
                label.append(training_lb[idx[i]])
                if num_points < len(testing_im):
                    testing.append(testing_im[idxt[i]])
                    test_lb.append(testing_lb[idxt[i]])
            #if(i == 0) :
            #   print("will not go less then 2")
            #else :
            pca = PCA(n_components=50)
            out_pca = pca.fit_transform(training, label)
            print("Start TSNE")
            model = TSNE(n_components=2)
            #start = time.time()
            out_train = model.fit_transform(out_pca, label)
            print("Model fit")
            out_test = model.fit_transform(testing)
            print("Model fit_transform")
            #end = time.time()
            #print("Run time of Barnes-Hut at")
            #print end - start
            neigh = KNeighborsClassifier(n_neighbors=5)
            neigh.fit(out_train, label)
            error = neigh.predict(out_test)
            for k in range(len(error)):
                if error[k] == test_lb[k]:
                    error_rate = error_rate + 1
            error_rate = error_rate / num_points
            print(error_rate)
            final_error.append(error_rate)
        average_final = sum(final_error) / 10.0
        print(average_final)
        average_error.append(average_final)