def load_model(self, logdir, step): '''Load a model (identified by the config used for construction) and return it''' # 1. Construct model 创建model model = registry.construct('model', self.config['model'], preproc=self.model_preproc, device=self.device) model.to(self.device) model.eval() # 2. Restore its parameters saver = saver_mod.Saver({"model": model}) last_step = saver.restore(logdir, step=step, map_location=self.device, item_keys=["model"]) if not last_step: raise Exception(f"Attempting to infer on untrained model in {logdir}, step={step}") return model
def train(self, config, modeldir): # slight difference here vs. unrefactored train: The init_random starts over here. # Could be fixed if it was important by saving random state at end of init with self.init_random: # We may be able to move optimizer and lr_scheduler to __init__ instead. Empirically it works fine. I think that's because saver.restore # resets the state by calling optimizer.load_state_dict. # But, if there is no saved file yet, I think this is not true, so might need to reset the optimizer manually? # For now, just creating it from scratch each time is safer and appears to be the same speed, but also means you have to pass in the config to train which is kind of ugly. # TODO: not nice if config["optimizer"].get("name", None) == 'bertAdamw': bert_params = list(self.model.encoder.bert_model.parameters()) assert len(bert_params) > 0 non_bert_params = [] for name, _param in self.model.named_parameters(): if "bert" not in name: non_bert_params.append(_param) assert len(non_bert_params) + len(bert_params) == len( list(self.model.parameters())) optimizer = registry.construct('optimizer', config['optimizer'], non_bert_params=non_bert_params, bert_params=bert_params) lr_scheduler = registry.construct( 'lr_scheduler', config.get('lr_scheduler', {'name': 'noop'}), param_groups=[ optimizer.non_bert_param_group, optimizer.bert_param_group ]) else: optimizer = registry.construct('optimizer', config['optimizer'], params=self.model.parameters()) lr_scheduler = registry.construct( 'lr_scheduler', config.get('lr_scheduler', {'name': 'noop'}), param_groups=optimizer.param_groups) # 2. Restore model parameters saver = saver_mod.Saver({ "model": self.model, "optimizer": optimizer }, keep_every_n=self.train_config.keep_every_n) last_step = saver.restore(modeldir, map_location=self.device) if "pretrain" in config and last_step == 0: pretrain_config = config["pretrain"] _path = pretrain_config["pretrained_path"] _step = pretrain_config["checkpoint_step"] pretrain_step = saver.restore(_path, step=_step, map_location=self.device, item_keys=["model"]) saver.save(modeldir, pretrain_step) # for evaluating pretrained models last_step = pretrain_step # 3. Get training data somewhere with self.data_random: train_data = self.model_preproc.dataset('train') train_data_loader = self._yield_batches_from_epochs( torch.utils.data.DataLoader( train_data, batch_size=self.train_config.batch_size, shuffle=True, drop_last=True, collate_fn=lambda x: x)) train_eval_data_loader = torch.utils.data.DataLoader( train_data, batch_size=self.train_config.eval_batch_size, collate_fn=lambda x: x) val_data = self.model_preproc.dataset('val') val_data_loader = torch.utils.data.DataLoader( val_data, batch_size=self.train_config.eval_batch_size, collate_fn=lambda x: x) # 4. Start training loop with self.data_random: for batch in train_data_loader: # Quit if too long if last_step >= self.train_config.max_steps: break # Evaluate model if last_step % self.train_config.eval_every_n == 0: if self.train_config.eval_on_train: self._eval_model( self.logger, self.model, last_step, train_eval_data_loader, 'train', num_eval_items=self.train_config.num_eval_items) if self.train_config.eval_on_val: self._eval_model( self.logger, self.model, last_step, val_data_loader, 'val', num_eval_items=self.train_config.num_eval_items) # Compute and apply gradient with self.model_random: for _i in range(self.train_config.num_batch_accumulated): if _i > 0: batch = next(train_data_loader) loss = self.model.compute_loss(batch) norm_loss = loss / self.train_config.num_batch_accumulated norm_loss.backward() if self.train_config.clip_grad: torch.nn.utils.clip_grad_norm_(optimizer.bert_param_group["params"], \ self.train_config.clip_grad) optimizer.step() lr_scheduler.update_lr(last_step) optimizer.zero_grad() # Report metrics if last_step % self.train_config.report_every_n == 0: self.logger.log( f'Step {last_step}: loss={loss.item():.4f}') last_step += 1 # Run saver if last_step == 1 or last_step % self.train_config.save_every_n == 0: saver.save(modeldir, last_step) # Save final model saver.save(modeldir, last_step)
def train(self, config, modeldir, trainset, valset): # slight difference here vs. unrefactored train: The init_random starts over here. # Could be fixed if it was important by saving random state at end of init with self.init_random: # We may be able to move optimizer and lr_scheduler to __init__ instead. Empirically it works fine. I think that's because saver.restore # resets the state by calling optimizer.load_state_dict. # But, if there is no saved file yet, I think this is not true, so might need to reset the optimizer manually? # For now, just creating it from scratch each time is safer and appears to be the same speed, but also means you have to pass in the config to train which is kind of ugly. # TODO: not nice if config["optimizer"].get("name", None) == 'bertAdamw': bert_params = list(self.model.encoder.bert_model.parameters()) assert len(bert_params) > 0 non_bert_params = [] for name, _param in self.model.named_parameters(): if "bert" not in name: non_bert_params.append(_param) assert len(non_bert_params) + len(bert_params) == len( list(self.model.parameters())) optimizer = registry.construct('optimizer', config['optimizer'], non_bert_params=non_bert_params, bert_params=bert_params) else: optimizer = registry.construct('optimizer', config['optimizer'], params=self.model.parameters()) # 2. Restore model parameters saver = saver_mod.Saver({ "model": self.model, "optimizer": optimizer }, keep_every_n=self.train_config.keep_every_n) last_step = saver.restore(modeldir, map_location=self.device) if "pretrain" in config and last_step == 0: pretrain_config = config["pretrain"] _path = pretrain_config["pretrained_path"] _step = pretrain_config["checkpoint_step"] pretrain_step = saver.restore(_path, step=_step, map_location=self.device, item_keys=["model"]) print("pretrain restored! pretrain step: %d" % pretrain_step) saver.save(modeldir, pretrain_step) # for evaluating pretrained models #last_step = pretrain_step # 3. Get training data somewhere with self.data_random: train_data = self.model_preproc.dataset(trainset) train_data_loader = self._yield_batches_from_epochs( torch.utils.data.DataLoader( train_data, batch_size=self.train_config.batch_size, shuffle=True, drop_last=True, collate_fn=lambda x: x)) train_eval_data_loader = torch.utils.data.DataLoader( train_data, batch_size=self.train_config.eval_batch_size, collate_fn=lambda x: x) val_data = self.model_preproc.dataset(valset) print("train: ") for _item in train_data.components[1]: if _item.tree is None: print("?") dev_badidxs = [] print("val: ") for _idx, _item in enumerate(val_data.components[1]): if _item.tree is None: dev_badidxs.append(_idx) print("!") assert len(val_data.components[0]) == len(val_data.components[1]) new_first = [] new_second = [] for _idx in range(len(val_data.components[1])): if _idx not in dev_badidxs: new_first.append(val_data.components[0][_idx]) new_second.append(val_data.components[1][_idx]) val_data.components = copy.deepcopy((new_first, new_second)) val_data_loader = torch.utils.data.DataLoader( val_data, batch_size=self.train_config.eval_batch_size, collate_fn=lambda x: x) # 4. Start training loop with self.data_random: last_val_loss = None lr_decay_countdown = MAX_LRDECAY_COUNTDOWN for batch in train_data_loader: # Quit if too long if last_step >= self.train_config.max_steps: break # Evaluate model if last_step % self.train_config.eval_every_n == 0: if self.train_config.eval_on_train: train_loss = self._eval_model( self.logger, self.model, last_step, train_eval_data_loader, 'train', num_eval_items=self.train_config.num_eval_items) if self.train_config.eval_on_val: eval_loss = self._eval_model( self.logger, self.model, last_step, val_data_loader, 'val', num_eval_items=self.train_config.num_eval_items) if last_val_loss is None or eval_loss < last_val_loss: last_val_loss = eval_loss lr_decay_countdown = MAX_LRDECAY_COUNTDOWN elif lr_decay_countdown > 0: lr_decay_countdown -= 1 else: current_lr = None for p in optimizer.param_groups: p['lr'] *= LR_DECAY_RATE current_lr = p['lr'] self.logger.log(f'LR decay: down to {current_lr}') if DEBUG: current_lr = None for p in optimizer.param_groups: p['lr'] *= LR_DECAY_RATE current_lr = p['lr'] self.logger.log(f'LR decay: down to {current_lr}') # Compute and apply gradient with self.model_random: for _i in range(self.train_config.num_batch_accumulated): if _i > 0: batch = next(train_data_loader) loss = self.model.compute_loss(batch) norm_loss = loss / self.train_config.num_batch_accumulated norm_loss.backward() if self.train_config.clip_grad: torch.nn.utils.clip_grad_norm_(optimizer.bert_param_group["params"], \ self.train_config.clip_grad) optimizer.step() optimizer.zero_grad() # Report metrics if last_step % self.train_config.report_every_n == 0: self.logger.log( f'Step {last_step}: loss={loss.item():.4f}') last_step += 1 # Run saver if last_step == 1 or last_step % self.train_config.save_every_n == 0: saver.save(modeldir, last_step) print("model saved at %d step" % last_step) # Save final model saver.save(modeldir, last_step)
def finetune_on_database(self, infer_output_path, database, config, model_load_dir, beam_size, output_history, use_heuristic, metrics_list, scores, take_grad_steps=True, batch_size="1"): if database: current_infer_output_path = infer_output_path + "/" + database else: current_infer_output_path = infer_output_path + "/" + "entire_val" os.makedirs(os.path.dirname(current_infer_output_path), exist_ok=True) infer_output = open(current_infer_output_path, 'w') spider_data = registry.construct('dataset', self.config['data']['val'], database=database) val_data = self.model_preproc.dataset('val', database=database) # val_data_loader = self._yield_batches_from_epochs( # torch.utils.data.DataLoader(val_data, batch_size=1, collate_fn=lambda x: x, # shuffle=False)) assert len(val_data) == len(spider_data) if len(val_data) == 0: return if batch_size == "32": if len(val_data) < 32: return print("database:", database) if batch_size == "n^2": indices = np.random.permutation( self.get_no_repeat_data_indices(spider_data)) print("length of data:", len(val_data)) print("length of data after removing repeat entries:", len(indices)) else: indices = np.random.permutation(len(val_data)) # TODO: RANDOMIZE DATA optimizer, lr_scheduler = self.construct_optimizer_and_lr_scheduler( config) saver = saver_mod.Saver({ "model": self.model, "optimizer": optimizer }, keep_every_n=self.finetune_config.keep_every_n) last_step = saver.restore(model_load_dir, map_location=self.device) self.logger.log(f"Loaded trained model; last_step:{last_step}") current_batch = [] clear_batch = False current_number = 0 for i in tqdm.tqdm(indices): current_number += 1 orig_item, preproc_item = spider_data[i], val_data[i] with torch.no_grad(): decoded = self._infer_one(self.model, orig_item, preproc_item, beam_size, output_history, use_heuristic) infer_output.write( json.dumps({ 'index': int(i), 'beams': decoded, }) + '\n') infer_output.flush() if take_grad_steps: if batch_size == "1": current_batch = [preproc_item] elif batch_size == "32": if current_number % 32 != 0: current_batch.append(preproc_item) clear_batch = False continue else: clear_batch = True else: current_batch.append(preproc_item) try: with self.model_random: loss = self.model.compute_loss(current_batch) norm_loss = loss / self.finetune_config.num_batch_accumulated norm_loss.backward() if self.finetune_config.clip_grad: torch.nn.utils.clip_grad_norm_(optimizer.bert_param_group["params"], \ self.finetune_config.clip_grad) optimizer.step() lr_scheduler.update_lr(last_step) optimizer.zero_grad() if clear_batch: current_batch = [] # stats = self._eval_model(self.logger, self.model, last_step, batch, 'val', # self.finetune_config.report_every_n) # val_losses.append(stats['loss']) except KeyError: self.logger.log("keyError") current_batch = [] continue # except AssertionError: # self.logger.log("AssertionError") # continue inferred = open(current_infer_output_path) metrics = spider_data.Metrics(spider_data) inferred_lines = list(inferred) # if len(inferred_lines) < len(spider_data): # raise Exception(f'Not enough inferred: {len(inferred_lines)} vs {len(spider_data)}') for line in inferred_lines: infer_results = json.loads(line) if infer_results['beams']: inferred_code = infer_results['beams'][0]['inferred_code'] else: inferred_code = None if 'index' in infer_results: metrics.add(spider_data[infer_results['index']], inferred_code) else: metrics.add(None, inferred_code, obsolete_gold_code=infer_results['gold_code']) final_metrics = metrics.finalize() metrics_list.append(final_metrics) #print(final_metrics['total_scores']['all']['exact']) scores.append((database, final_metrics['total_scores']['all']['exact'], len(indices))) # if last_step % self.finetune_config.save_every_n == 0: # saver.save(model_save_dir+'/seed_'+seed, last_step) #print('scores', scores) #print("average score:", self.aggregate_score(scores)) return scores