コード例 #1
0
def start_evaluating(run_config, train_config, data_config, model_config):
    # hack to prevent the data loader from going on GPU 0
    import os
    os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
    os.environ["CUDA_VISIBLE_DEVICES"] = str(run_config['cuda_device'])
    # torch.cuda.set_device(run_config['cuda_device'])
    torch.cuda.set_device(0)

    logger = Logger(run_config)

    # load the data
    train_data, val_data, test_data = load_data(data_config,
                                                train_config['batch_size'])
    if test_data is None:
        test_data = val_data

    # load the model
    print('Loading model...')
    model = load_model(model_config)

    assert run_config[
        'resume_path'] is not None, 'Run path must be set for evaluation.'
    print('Loading checkpoint ' + run_config['resume_path'])
    # model = logger.load_best(model)
    model = logger.load_epoch(model, 500)

    print('Putting the model on the GPU...')
    model.cuda()

    model.eval()

    output = eval_model(test_data, model, train_config)
    path = os.path.join(run_config['log_root_path'], run_config['log_dir'])
    with open(path, 'wb') as f:
        pickle.dump(output, f)
コード例 #2
0
def start_training(run_config, train_config, data_config, model_config):
    # hack to prevent the data loader from going on GPU 0
    import os
    os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
    os.environ["CUDA_VISIBLE_DEVICES"] = str(run_config['cuda_device'])
    # torch.cuda.set_device(run_config['cuda_device'])
    torch.cuda.set_device(0)

    # initialize logging and plotting
    logger = Logger(run_config)
    plotter = Plotter(logger.log_dir, run_config, train_config, model_config,
                      data_config)

    # load the data
    train_data, val_data, test_data = load_data(data_config,
                                                train_config['batch_size'])
    if val_data is None:
        val_data = test_data

    # load the model, optimizers
    print('Loading model...')
    model = load_model(model_config)
    print('Loading optimizers...')
    optimizers, schedulers = load_opt_sched(train_config, model)

    if run_config['resume_path']:
        print('Resuming checkpoint ' + run_config['resume_path'])
        model, optimizers, schedulers = logger.load_checkpoint(
            model, optimizers)

    print('Putting the model on the GPU...')
    model.cuda()

    while True:
        # training
        out = train(train_data, model, optimizers, train_config, data_config)
        logger.log(out, 'Train')
        plotter.plot(out, 'Train')
        if val_data:
            # validation
            out = validate(val_data, model, train_config, data_config)
            logger.log(out, 'Val')
            plotter.plot(out, 'Val')
        if logger.save_epoch():
            logger.save_checkpoint(model, optimizers)
        logger.step()
        plotter.step()
        schedulers[0].step()
        schedulers[1].step()
        plotter.save()
コード例 #3
0
import matplotlib.pyplot as plt
import numpy as np
import cPickle
from util.plotting.audio_util import write_audio

import os
os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"]=str(run_config['cuda_device'])
# torch.cuda.set_device(run_config['cuda_device'])
torch.cuda.set_device(0)

# initialize logging
logger = Logger(run_config)

# load the data
train_data, val_data, test_data = load_data(data_config, train_config['batch_size'])
if val_data is None:
    val_data = test_data

data = val_data

# load the model, optimizers
print('Loading model...')
model = load_model(model_config)

assert run_config['resume_path'] is not None, 'Model must be resumed from checkpoint.'

if run_config['resume_path']:
    print('Resuming checkpoint ' + run_config['resume_path'])
    model = logger.load_best(model)
コード例 #4
0
train_config['data_path'] = args.data_path
train_config['log_root'] = args.log_path

log_root = train_config['log_root']
log_path, log_dir = init_log(log_root, train_config)
print 'Experiment: ' + log_dir

global vis
vis, handle_dict = init_plot(train_config, arch, env=log_dir)

# load data, labels
data_path = train_config['data_path']
train_loader, val_loader, label_names = load_data(
    train_config['dataset'],
    data_path,
    train_config['batch_size'],
    cuda_device=train_config['cuda_device'])

# construct model
model = get_model(train_config, arch, train_loader)

# get optimizers
(enc_opt, enc_scheduler), (dec_opt,
                           dec_scheduler), start_epoch = get_optimizers(
                               train_config, arch, model)

for epoch in range(start_epoch + 1, 2000):
    print 'Epoch: ' + str(epoch + 1)
    # train
    tic = time.time()