Esempio n. 1
0
def run_func(config, **kwargs):
    config = yaml.safe_load(config)

    insert(config, kwargs)

    # For reproducibility
    torch.manual_seed(config['logging_params']['manual_seed'])
    np.random.seed(config['logging_params']['manual_seed'])
    cudnn.deterministic = True
    cudnn.benchmark = False

    model = vae_models[config['model_params']['name']](
        **config['model_params'])
    experiment = VAEXperiment(model, config['exp_params'])

    runner = Trainer(min_nb_epochs=1,
                     train_percent_check=config['exp_params']['fraction'],
                     val_percent_check=1.,
                     num_sanity_val_steps=0,
                     early_stop_callback=False,
                     checkpoint_callback=False,
                     logger=False,
                     weights_summary=None,
                     **config['trainer_params'])
    runner.fit(experiment)

    print("validation loss = ", experiment.val_loss.item())
    return experiment.val_loss.item()
Esempio n. 2
0
tt_logger = TestTubeLogger(
    save_dir=eval(config['logging_params']['save_dir']),
    name=config['logging_params']['name'],
    debug=False,
    create_git_tag=False,
)

# For reproducibility
torch.manual_seed(config['logging_params']['manual_seed'])
np.random.seed(config['logging_params']['manual_seed'])
cudnn.deterministic = True
cudnn.benchmark = False

model = vae_models[config['model_params']['name']](**config['model_params'])
experiment = VAEXperiment(model, config['exp_params'])

runner = Trainer(default_save_path=f"{tt_logger.save_dir}",
                 min_nb_epochs=1,
                 logger=tt_logger,
                 log_save_interval=100,
                 train_percent_check=1.,
                 val_percent_check=1.,
                 num_sanity_val_steps=5,
                 early_stop_callback=False,
                 **config['trainer_params'])

print(f"======= Training {config['model_params']['name']} =======")
load_dict = torch.load(config.ckpt_path)
experiment.load_state_dict(load_dict['state_dict'])
experiment.cuda()
Esempio n. 3
0
tt_logger = TestTubeLogger(
    save_dir=config['logging_params']['save_dir'],
    name=config['logging_params']['name'],
    debug=False,
    create_git_tag=False,
)

# For reproducibility
torch.manual_seed(config['logging_params']['manual_seed'])
np.random.seed(config['logging_params']['manual_seed'])
cudnn.deterministic = True
cudnn.benchmark = False

model = vae_models[config['model_params']['name']](**config['model_params'])
experiment = VAEXperiment(model, config['exp_params'])

runner = Trainer(default_root_dir=f"{tt_logger.save_dir}",
                 min_epochs=1,
                 logger=tt_logger,
                 log_every_n_steps=100,
                 limit_train_batches=1.,
                 val_check_interval=1.,
                 num_sanity_val_steps=5,
                 **config['trainer_params'])

print(f"======= Training {config['model_params']['name']} =======")
runner.fit(experiment)

runner.save_checkpoint(
    f'{tt_logger.save_dir}{tt_logger.name}/version_{tt_logger.version}/checkpoints/manual.ckpt'
Esempio n. 4
0
args = parser.parse_args()

if args.gpu:
    device = 'cuda'
else:
    device = 'cpu'
with open(args.filename, 'r') as file:
    try:
        config = yaml.safe_load(file)
    except yaml.YAMLError as exc:
        print(exc)

with torch.no_grad():
    model = vae_models[config['model_params']['name']](
        **config['model_params'])
    test = VAEXperiment(model, config['exp_params'])
    checkpoint = torch.load(args.ckpt,
                            map_location=lambda storage, loc: storage)
    test.load_state_dict(checkpoint['state_dict'])
    test = test.model
    if args.gpu:
        test = test.cuda()
    if args.eval:
        test.eval()
    if args.parallel:
        test = torch.nn.DataParallel(test)
        test = test.module

xsize = args.xsize
ysize = args.ysize
zsize = args.zsize
Esempio n. 5
0
    copytree(rootdir,
             model_save_path,
             ignore=ignore_patterns('*.pyc', 'tmp*', 'logs*', 'data*'))

with open(model_save_path + 'hyperparameters.txt', 'w') as f:
    json.dump(args.__dict__, f, indent=2)

# For reproducibility
torch.manual_seed(config['logging_params']['manual_seed'])
np.random.seed(config['logging_params']['manual_seed'])
cudnn.deterministic = True
cudnn.benchmark = False
print(config['model_params'])
model = vae_models[config['model_params']['name']](
    imsize=config['exp_params']['img_size'], **config['model_params'])
experiment = VAEXperiment(model, config['exp_params'])

model_path = None
if config['model_params']['only_auxillary_training'] or config['model_params'][
        'memory_leak_training'] or resume:
    weights = [
        os.path.join(model_save_path, x) for x in os.listdir(model_save_path)
        if '.ckpt' in x
    ]
    weights.sort(key=lambda x: os.path.getmtime(x))
    if len(weights) > 0:
        model_path = weights[-1]
        print('loading: ', weights[-1])
        if config['model_params']['only_auxillary_training']:
            checkpoint = torch.load(model_path)
            experiment.load_state_dict(checkpoint['state_dict'])
Esempio n. 6
0
np.random.seed(config['logging_params']['manual_seed'])
cudnn.deterministic = True
cudnn.benchmark = False

model_save_path = os.getcwd(
)  #'{}/{}/version_{}'.format(config['logging_params']['save_dir'], config['logging_params']['name'], tt_logger.version)
parent = '/'.join(model_save_path.split('/')[:-3])
config['logging_params']['save_dir'] = os.path.join(
    parent, config['logging_params']['save_dir'])
config['exp_params']['data_path'] = os.path.join(
    parent, config['exp_params']['data_path'])
print(parent, config['exp_params']['data_path'])

model = vae_models[config['model_params']['name']](
    imsize=config['exp_params']['img_size'], **config['model_params'])
experiment = VAEXperiment(model, config['exp_params'])

weights = [x for x in os.listdir(model_save_path) if '.ckpt' in x]
weights.sort(key=lambda x: os.path.getmtime(x))
load_weight = weights[-1]
print('loading: ', load_weight)

checkpoint = torch.load(load_weight)
experiment.load_state_dict(checkpoint['state_dict'])
_ = experiment.train_dataloader()
experiment.eval()
experiment.freeze()
experiment.sample_interpolate(
    save_dir=config['logging_params']['save_dir'],
    name=config['logging_params']['name'],
    version=config['logging_params']['version'],
Esempio n. 7
0
    save_dir=config['logging_params']['save_dir'],
    name=config['logging_params']['name'],
    debug=False,
    create_git_tag=False,
    version=config['logging_params']['version'],
)

# For reproducibility
torch.manual_seed(config['logging_params']['manual_seed'])
np.random.seed(config['logging_params']['manual_seed'])
cudnn.deterministic = True
cudnn.benchmark = False

model = vae_models[config['model_params']['name']](
    imsize=config['exp_params']['img_size'], **config['model_params'])
experiment = VAEXperiment(model, config['exp_params'])
model_save_path = '{}/{}/version_{}'.format(
    config['logging_params']['save_dir'], config['logging_params']['name'],
    tt_logger.version)

if config['logging_params']['resume'] == None:
    weights = [
        os.path.join(model_save_path, x) for x in os.listdir(model_save_path)
        if '.ckpt' in x
    ]
    weights.sort(key=lambda x: os.path.getmtime(x))
    model_path = weights[-1]
    print('loading: ', model_path)
    experiment = VAEXperiment.load_from_checkpoint(model_path,
                                                   vae_model=model,
                                                   params=config['exp_params'])
Esempio n. 8
0
args = parser.parse_args()

if args.gpu:
    device = 'cuda'
else:
    device = 'cpu'
with open(args.filename, 'r') as file:
    try:
        config = yaml.safe_load(file)
    except yaml.YAMLError as exc:
        print(exc)

with torch.no_grad():
    model = vae_models[config['model_params']['name']](
        **config['model_params'])
    test = VAEXperiment(model, config['exp_params'])
    checkpoint = torch.load(args.ckpt,
                            map_location=lambda storage, loc: storage)
    test.load_state_dict(checkpoint['state_dict'])
    test = test.model
    if args.gpu:
        test = test.cuda()
    if args.eval:
        test.eval()
    #if

height = args.height
width = args.width
size = args.size
eps = args.epsilon
array = np.fromfile(args.input, dtype=np.float32).reshape((height, width))
Esempio n. 9
0
    save_dir=config['logging_params']['save_dir'],
    name=config['logging_params']['name'],
    version=config['logging_params']['version'],
    debug=False,
    create_git_tag=False,
    prefix='vae_',
)

# For reproducibility
torch.manual_seed(config['logging_params']['manual_seed'])
np.random.seed(config['logging_params']['manual_seed'])
cudnn.deterministic = True
cudnn.benchmark = False

model = vae_models[config['model_params']['name']](**config['model_params'])
experiment = VAEXperiment(model,
                          config['exp_params'])

runner = Trainer(default_root_dir=f"{tt_logger.save_dir}",
                 logger=tt_logger,
                 val_check_interval=1.,
                 num_sanity_val_steps=5,
                 **config['trainer_params'])

print(f"======= Training {config['model_params']['name']} =======")
if not args.reg_only:
    runner.fit(experiment)

# NN_Reg part starts here...

dir_path = f"{config['logging_params']['save_dir']}/{config['logging_params']['name']}/version_{config['logging_params']['version']}"
ckpt_path = glob.glob(dir_path + '/checkpoints/*')[0]
Esempio n. 10
0
tt_logger = TestTubeLogger(
    save_dir=config['logging_params']['save_dir'],
    name=config['logging_params']['name'],
    debug=False,
    create_git_tag=False,
)

# For reproducibility
torch.manual_seed(config['logging_params']['manual_seed'])
np.random.seed(config['logging_params']['manual_seed'])
cudnn.deterministic = True
cudnn.benchmark = False

model = vae_models[config['model_params']['name']](**config['model_params'])
experiment = VAEXperiment(model, config['exp_params'])

# Han: above mostly unchanged, now substituting the trainer with influence analysis

device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

# currently not working for model loading because of some issues over pytorch_lightning's version?
if args.model_ckpt_dir:
    checkpoints = list(
        sorted(
            glob.glob(os.path.join(args.model_ckpt_dir, "*.ckpt"),
                      recursive=True)))
    #     trained_ckpt = pl_load(trained_ckpt_file, map_location=lambda storage, loc: storage)
    #     model.load_state_dict(trained_ckpt['state_dict'])
    trained_ckpt = torch.load(checkpoints[-1])
    experiment.load_state_dict(trained_ckpt['state_dict'])