def do_train(cfg, train_loader, valid_dict, tr_comp: TrainComponent, saver): # tb_log = TensorBoardXLog(cfg, saver.save_dir) trainer = create_supervised_trainer(tr_comp.model, tr_comp.optimizer, tr_comp.loss, device=cfg.MODEL.DEVICE, apex=cfg.APEX.IF_ON) evaler = Eval(valid_dict, cfg.MODEL.DEVICE) evaler.get_valid_eval_map(cfg, tr_comp.model) run(cfg, train_loader, tr_comp, saver, trainer, evaler)
def fine_tune_current_model(cfg, train_loader, valid_dict, tr_comp, saver): for param in tr_comp.model.base.parameters(): param.requires_grad = False trainer = create_supervised_trainer(tr_comp.model, tr_comp.optimizer, tr_comp.loss, device=cfg.MODEL.DEVICE, apex=cfg.APEX.IF_ON) evaler = Eval(valid_dict, cfg.MODEL.DEVICE) evaler.get_valid_eval_map(cfg, tr_comp.model) copy_cfg = copy.deepcopy(cfg) copy_cfg["TRAIN"]["MAX_EPOCHS"] = 60 run(copy_cfg, train_loader, tr_comp, saver, trainer, evaler)
def train_autoencoder(cfg, train_loader, valid_dict, source_tr_comp: TrainComponent, current_tr_comp: TrainComponent, saver): trainer = create_autoencoder_trainer(source_tr_comp.model, current_tr_comp.model, current_tr_comp.optimizer, current_tr_comp.loss, device=cfg.MODEL.DEVICE, apex=cfg.APEX.IF_ON) evaler = Eval(valid_dict, cfg.MODEL.DEVICE) evaler.get_valid_eval_map_autoencoder(cfg, source_tr_comp.model, current_tr_comp.model) copy_cfg = copy.deepcopy(cfg) copy_cfg["TRAIN"]["MAX_EPOCHS"] = 90 run(copy_cfg, train_loader, current_tr_comp, saver, trainer, evaler)
def ebll_train(cfg, train_loader, valid_dict, source_tr_comp, current_tr_comp, autoencoder_tr, saver): for param in current_tr_comp.model.base.parameters(): param.requires_grad = True trainer = create_ebll_trainer(source_tr_comp.model, autoencoder_tr.model, current_tr_comp.model, current_tr_comp.optimizer, current_tr_comp.loss, apex=cfg.APEX.IF_ON, device=cfg.MODEL.DEVICE) evaler = Eval(valid_dict, cfg.MODEL.DEVICE) evaler.get_valid_eval_map_ebll(cfg, source_tr_comp.model, current_tr_comp.model) run(cfg, train_loader, current_tr_comp, saver, trainer, evaler)