def test_update(nlp: TorchLanguage): texts = ["This is a test sentence to check\u3000model.update!"] labels = [{}] pipe: TrfModel = nlp.get_pipe(TRANSFORMERS_MODEL) optimizer = nlp.resume_training() eps = 1e-5 def sum_param(params): return sum(p.sum().item() for p in params) def train(): docs, golds = nlp._format_docs_and_golds(texts, labels) before = sum_param(pipe.optim_parameters()) nlp._update_pipes(docs, golds) h = get_last_hidden_state_from_docs(docs) loss = h.sum() + torch.tensor(0.0, requires_grad=True) add_loss_to_docs(docs, loss) nlp._update_params(docs, optimizer) return abs(before - sum_param(pipe.optim_parameters())) assert train() > eps # freeze model pipe.cfg["freeze"] = True assert train() < eps # restore freeze state pipe.cfg["freeze"] = False
def train( cfg: Config, nlp: TorchLanguage, train_data: InputData, val_data: InputData, savedir: Path, ) -> None: eval_fn = EVAL_FN_MAP[cfg.task] optim = nlp.resume_training() scheduler = load_scheduler(cfg, optim) for i in range(cfg.niter): random.shuffle(train_data) train_epoch(cfg, nlp, optim, train_data, val_data, i, eval_fn) scheduler.step( ) # type: ignore # (https://github.com/pytorch/pytorch/pull/26531) scores = eval_fn(cfg, nlp, val_data) nlp.meta.update({ "score": scores, "config": OmegaConf.to_container(cfg) }) save_model(nlp, savedir / str(i))