def optimize_parses(model, parse_list, target_imgs, iterations=1500): render_params = [p for parse in parse_list for p in parse.render_params] stroke_params = [p for parse in parse_list for p in parse.stroke_params] param_groups = [{ 'params': render_params, 'lr': 0.306983 }, { 'params': stroke_params, 'lr': 0.044114 }] optimizer = torch.optim.Adam(param_groups) # optimize start_time = time.time() losses, states = opt.optimize_parselist(parse_list, target_imgs, loss_fn=model.losses_fn, iterations=iterations, optimizer=optimizer, tune_blur=True, tune_fn=model.likelihood_losses_fn) total_time = time.time() - start_time time.sleep(0.5) print('Took %s' % time_string(total_time)) parse_scores = -losses[-1] return parse_list, parse_scores
def optimize_parses(run_id, iterations=1500, reverse=False, dry_run=False): run_dir = './results/run%0.2i' % (run_id+1) load_dir = os.path.join(run_dir, 'base_parses') save_dir = os.path.join(run_dir, 'tuned_parses') assert os.path.exists(run_dir) assert os.path.exists(load_dir) if not dry_run: mkdir(save_dir) print('Loading model and data...') type_model = TypeModel().eval() token_model = TokenModel() renderer = Renderer() # move to GPU if available if torch.cuda.is_available(): torch.backends.cudnn.enabled = False type_model = type_model.cuda() token_model = token_model.cuda() renderer = renderer.cuda() # build full model model = opt.FullModel( renderer=renderer, type_model=type_model, token_model=token_model, denormalize=True) print('Loading data...') # load classification dataset and select run dataset = ClassificationDataset(osc_folder='./one-shot-classification') run = dataset.runs[run_id] # load images and base parses for this run base_parses, images, K_per_img = load_parses(run, load_dir, reverse) assert len(base_parses) == len(images) print('total # parses: %i' % len(images)) print('Optimizing parses...') # initialize Parse modules and optimizer parse_list = [opt.ParseWithToken(p) for p in base_parses] render_params = [p for parse in parse_list for p in parse.render_params] stroke_params = [p for parse in parse_list for p in parse.stroke_params] param_groups = [ {'params': render_params, 'lr': 0.306983}, {'params': stroke_params, 'lr': 0.044114} ] optimizer = torch.optim.Adam(param_groups) # optimize start_time = time.time() losses, states = opt.optimize_parselist( parse_list, images, loss_fn=model.losses_fn, iterations=iterations, optimizer=optimizer, tune_blur=True, tune_fn=model.likelihood_losses_fn ) total_time = time.time() - start_time time.sleep(0.5) print('Took %s' % time_string(total_time)) if dry_run: return parse_scores = -losses[-1] save_new_parses(parse_list, parse_scores, save_dir, K_per_img, reverse)
def refit_parses_single(run_id, test_id, iterations=1500, reverse=False, run=None, dry_run=False): run_dir = './run%0.2i' % (run_id+1) load_dir = os.path.join(run_dir, 'tuned_parses') save_dir = os.path.join(run_dir, 'refitted_parses') assert os.path.exists(run_dir) assert os.path.exists(load_dir) if not dry_run: mkdir(save_dir) print('Loading model...') token_model = TokenModel() renderer = Renderer(blur_fsize=21) if torch.cuda.is_available(): token_model = token_model.cuda() renderer = renderer.cuda() model = opt.FullModel(renderer=renderer, token_model=token_model) print('Loading parses...') # load classification dataset and select run if run is None: dataset = ClassificationDataset(osc_folder='./one-shot-classification') run = dataset.runs[run_id] if reverse: ntrain = len(run.test_imgs) test_img = torch.from_numpy(run.train_imgs[test_id]).float() else: ntrain = len(run.train_imgs) test_img = torch.from_numpy(run.test_imgs[test_id]).float() if torch.cuda.is_available(): test_img = test_img.cuda() # load tuned parses parse_list, K_per_img = load_tuned_parses(load_dir, ntrain, reverse) images = test_img.expand(len(parse_list), 105, 105) print('total # parses: %i' % len(images)) print('Optimizing parses...') # initialize Parse modules and optimizer render_params = [p for parse in parse_list for p in parse.render_params] stroke_params = [p for parse in parse_list for p in parse.stroke_params if p.requires_grad] param_groups = [ {'params': render_params, 'lr': 0.087992}, {'params': stroke_params, 'lr': 0.166810} ] optimizer = torch.optim.Adam(param_groups) # optimize start_time = time.time() losses, states = opt.optimize_parselist( parse_list, images, loss_fn=model.losses_fn, iterations=iterations, optimizer=optimizer, tune_blur=True, tune_fn=model.likelihood_losses_fn ) total_time = time.time() - start_time time.sleep(0.5) print('Took %s' % time_string(total_time)) if dry_run: return parse_scores = -losses[-1] save_new_parses(parse_list, parse_scores, save_dir, K_per_img, test_id, reverse)