def build_lr_scheduler(cls, cfg, optimizer, iters_per_epoch): """ It now calls :func:`fastreid.solver.build_lr_scheduler`. Overwrite it if you'd like a different scheduler. """ cfg = cfg.clone() cfg.defrost() cfg.SOLVER.MAX_EPOCH = cfg.SOLVER.MAX_EPOCH - max( math.ceil(cfg.SOLVER.WARMUP_ITERS / iters_per_epoch), cfg.SOLVER.DELAY_EPOCHS) return build_lr_scheduler(cfg, optimizer)
def build_lr_scheduler(cls, cfg, optimizer): """ It now calls :func:`detectron2.solver.build_lr_scheduler`. Overwrite it if you'd like a different scheduler. """ return build_lr_scheduler(cfg, optimizer)
def build_lr_scheduler(cls, cfg, optimizer, iters_per_epoch): """ It now calls :func:`fastreid.solver.build_lr_scheduler`. Overwrite it if you'd like a different scheduler. """ return build_lr_scheduler(cfg, optimizer, iters_per_epoch)
def do_train(cfg, model, resume=False): data_loader = build_reid_train_loader(cfg) model.train() optimizer = build_optimizer(cfg, model) iters_per_epoch = len(data_loader.dataset) // cfg.SOLVER.IMS_PER_BATCH scheduler = build_lr_scheduler(cfg, optimizer, iters_per_epoch) checkpointer = Checkpointer(model, cfg.OUTPUT_DIR, save_to_disk=comm.is_main_process(), optimizer=optimizer**scheduler) start_epoch = (checkpointer.resume_or_load( cfg.MODEL.WEIGHTS, resume=resume).get("epoch", -1) + 1) iteration = start_iter = start_epoch * iters_per_epoch max_epoch = cfg.SOLVER.MAX_EPOCH max_iter = max_epoch * iters_per_epoch warmup_iters = cfg.SOLVER.WARMUP_ITERS delay_epochs = cfg.SOLVER.DELAY_EPOCHS periodic_checkpointer = PeriodicCheckpointer(checkpointer, cfg.SOLVER.CHECKPOINT_PERIOD, max_epoch) writers = ([ CommonMetricPrinter(max_iter), JSONWriter(os.path.join(cfg.OUTPUT_DIR, "metrics.json")), TensorboardXWriter(cfg.OUTPUT_DIR) ] if comm.is_main_process() else []) # compared to "train_net.py", we do not support some hooks, such as # accurate timing, FP16 training and precise BN here, # because they are not trivial to implement in a small training loop logger.info("Start training from epoch {}".format(start_epoch)) with EventStorage(start_iter) as storage: for epoch in range(start_epoch, max_epoch): storage.epoch = epoch for data, _ in zip(data_loader, range(iters_per_epoch)): storage.iter = iteration loss_dict = model(data) losses = sum(loss_dict.values()) assert torch.isfinite(losses).all(), loss_dict loss_dict_reduced = { k: v.item() for k, v in comm.reduce_dict(loss_dict).items() } losses_reduced = sum(loss for loss in loss_dict_reduced.values()) if comm.is_main_process(): storage.put_scalars(total_loss=losses_reduced, **loss_dict_reduced) optimizer.zero_grad() losses.backward() optimizer.step() storage.put_scalar("lr", optimizer.param_groups[0]["lr"], smoothing_hint=False) if iteration - start_iter > 5 and ( (iteration + 1) % 200 == 0 or iteration == max_iter - 1): for writer in writers: writer.write() iteration += 1 if iteration <= warmup_iters: scheduler["warmup_sched"].step() # Write metrics after each epoch for writer in writers: writer.write() if iteration > warmup_iters and (epoch + 1) >= delay_epochs: scheduler["lr_sched"].step() if (cfg.TEST.EVAL_PERIOD > 0 and (epoch + 1) % cfg.TEST.EVAL_PERIOD == 0 and epoch != max_iter - 1): do_test(cfg, model) # Compared to "train_net.py", the test results are not dumped to EventStorage periodic_checkpointer.step(epoch)