Example #1
0
print('Active CUDA Device: GPU', torch.cuda.current_device())

path = conf['modelDir']
path_list = path.split(glob.os.sep)
saved_model_name = glob.os.path.join(*path_list[:-1],
                                     path_list[-2] + '_saved.py')
temp_model = glob.os.path.join('lib', path_list[-2] + '_saved_print.py')
copyfile(saved_model_name, temp_model)

assert glob.os.path.isfile(temp_model), temp_model + ' does not exits!'
spec = importlib.util.spec_from_file_location('model_saved', temp_model)
model_saved = importlib.util.module_from_spec(spec)
spec.loader.exec_module(model_saved)

try:
    te = lib.FluidNetDataset(conf, 'te', save_dt=4,
                             resume=resume)  # Test instance of custom Dataset

    conf, mconf = te.createConfDict()

    print('==> overwriting conf and file_mconf')
    cpath = glob.os.path.join(conf['modelDir'],
                              conf['modelFilename'] + '_conf.pth')
    mcpath = glob.os.path.join(conf['modelDir'],
                               conf['modelFilename'] + '_mconf.pth')
    assert glob.os.path.isfile(mcpath), cpath + ' does not exits!'
    assert glob.os.path.isfile(mcpath), mcpath + ' does not exits!'
    conf = torch.load(cpath)
    mconf = torch.load(mcpath)

    test_loader = torch.utils.data.DataLoader(te, batch_size=1, \
            num_workers=0, shuffle=False, pin_memory=True)
Example #2
0
    shuffle_training = conf['shuffleTraining']
else:
    shuffle_training = not arguments.noShuffle

conf['shuffleTraining'] = not arguments.noShuffle

# Preprocessing dataset message (will exit after preproc)
if (conf['preprocOriginalFluidNetDataOnly']):
    print('Running preprocessing only')
    resume = False

print('Active CUDA Device: GPU', torch.cuda.current_device())
cuda0 = torch.device('cuda:0')

# Define training and test datasets
tr = lib.FluidNetDataset(conf, 'tr', save_dt=4, resume=resume)
te = lib.FluidNetDataset(conf, 'te', save_dt=4, resume=resume)

if (conf['preprocOriginalFluidNetDataOnly']):
    sys.exit()

# We create two conf dicts, general params and model params.
conf, mconf = tr.createConfDict()

# Separate some variables from conf dict. When resuming training, this ones will
# overwrite saved conf (where model is saved).
# User can modify them in YAML config file or in command line.
num_workers = arguments.numWorkers or conf['numWorkers']
batch_size = arguments.bsz or conf['batchSize']
max_epochs = arguments.maxEpochs or conf['maxEpochs']
print_training = output_mode == 'show' or output_mode == 'save'