def benchmark_train(args): cfg = setup(args) model = build_model(cfg) logger.info("Model:\n{}".format(model)) if comm.get_world_size() > 1: model = DistributedDataParallel(model, device_ids=[comm.get_local_rank()], broadcast_buffers=False) optimizer = build_optimizer(cfg, model) checkpointer = DetectionCheckpointer(model, optimizer=optimizer) checkpointer.load(cfg.MODEL.WEIGHTS) cfg.defrost() cfg.DATALOADER.NUM_WORKERS = 2 data_loader = build_detection_train_loader(cfg) dummy_data = list(itertools.islice(data_loader, 100)) def f(): data = DatasetFromList(dummy_data, copy=False, serialize=False) while True: yield from data max_iter = 400 trainer = (AMPTrainer if cfg.SOLVER.AMP.ENABLED else SimpleTrainer)( model, f(), optimizer) trainer.register_hooks([ hooks.IterationTimer(), hooks.PeriodicWriter([CommonMetricPrinter(max_iter)]), hooks.TorchProfiler(lambda trainer: trainer.iter == max_iter - 1, cfg.OUTPUT_DIR, save_tensorboard=True), ]) trainer.train(1, max_iter)
def build_optimizer(cls, cfg, model): if "Detr" not in cfg.MODEL.META_ARCHITECTURE: return build_optimizer(cfg, model) params: List[Dict[str, Any]] = [] memo: Set[torch.nn.parameter.Parameter] = set() for key, value in model.named_parameters(recurse=True): if not value.requires_grad: continue # Avoid duplicating parameters if value in memo: continue memo.add(value) lr = cfg.SOLVER.BASE_LR weight_decay = cfg.SOLVER.WEIGHT_DECAY if "backbone" in key: lr = lr * cfg.SOLVER.BACKBONE_MULTIPLIER params += [{ "params": [value], "lr": lr, "weight_decay": weight_decay }] optimizer_type = cfg.SOLVER.OPTIMIZER if optimizer_type == "SGD": optimizer = torch.optim.SGD(params, cfg.SOLVER.BASE_LR, momentum=cfg.SOLVER.MOMENTUM) elif optimizer_type == "ADAMW": optimizer = torch.optim.AdamW(params, cfg.SOLVER.BASE_LR) else: raise NotImplementedError(f"no optimizer type {optimizer_type}") if not cfg.SOLVER.CLIP_GRADIENTS.CLIP_TYPE == "full_model": optimizer = maybe_add_gradient_clipping(cfg, optimizer) return optimizer
def benchmark_train(args): cfg = setup(args) model = build_model(cfg) logger.info("Model:\n{}".format(model)) if comm.get_world_size() > 1: model = DistributedDataParallel(model, device_ids=[comm.get_local_rank()], broadcast_buffers=False, find_unused_parameters=True) optimizer = build_optimizer(cfg, model) checkpointer = DetectionCheckpointer(model, optimizer=optimizer) checkpointer.load(cfg.MODEL.WEIGHTS) cfg.defrost() cfg.DATALOADER.NUM_WORKERS = 0 data_loader = build_detection_train_loader(cfg) dummy_data = list(itertools.islice(data_loader, 100)) def f(): data = DatasetFromList(dummy_data, copy=False) while True: yield from data max_iter = 400 trainer = SimpleTrainer(model, f(), optimizer) trainer.register_hooks([ hooks.IterationTimer(), hooks.PeriodicWriter([CommonMetricPrinter(max_iter)]) ]) trainer.train(1, max_iter)
def build_optimizer(cls, cfg, model): """ Returns: torch.optim.Optimizer: It now calls :func:`detectron2.solver.build_optimizer`. Overwrite it if you'd like a different optimizer. """ return build_optimizer(cfg, model)
def do_train(cfg, model, resume=False): model.train() optimizer = build_optimizer(cfg, model) scheduler = build_lr_scheduler(cfg, optimizer) checkpointer = DetectionCheckpointer( model, cfg.OUTPUT_DIR, optimizer=optimizer, scheduler=scheduler ) start_iter = ( checkpointer.resume_or_load(cfg.MODEL.WEIGHTS, resume=resume).get("iteration", -1) + 1 ) max_iter = cfg.SOLVER.MAX_ITER periodic_checkpointer = PeriodicCheckpointer( checkpointer, cfg.SOLVER.CHECKPOINT_PERIOD, max_iter=max_iter ) writers = default_writers(cfg.OUTPUT_DIR, max_iter) if comm.is_main_process() else [] # compared to "train_net.py", we do not support accurate timing and # precise BN here, because they are not trivial to implement in a small training loop data_loader = build_detection_train_loader(cfg) logger.info("Starting training from iteration {}".format(start_iter)) with EventStorage(start_iter) as storage: for data, iteration in zip(data_loader, range(start_iter, max_iter)): storage.iter = iteration loss_dict = model(data) losses = sum(loss_dict.values()) assert torch.isfinite(losses).all(), loss_dict loss_dict_reduced = {k: v.item() for k, v in comm.reduce_dict(loss_dict).items()} losses_reduced = sum(loss for loss in loss_dict_reduced.values()) if comm.is_main_process(): storage.put_scalars(total_loss=losses_reduced, **loss_dict_reduced) optimizer.zero_grad() losses.backward() optimizer.step() storage.put_scalar("lr", optimizer.param_groups[0]["lr"], smoothing_hint=False) scheduler.step() if ( cfg.TEST.EVAL_PERIOD > 0 and (iteration + 1) % cfg.TEST.EVAL_PERIOD == 0 and iteration != max_iter - 1 ): do_test(cfg, model) # Compared to "train_net.py", the test results are not dumped to EventStorage comm.synchronize() if iteration - start_iter > 5 and ( (iteration + 1) % 20 == 0 or iteration == max_iter - 1 ): for writer in writers: writer.write() periodic_checkpointer.step(iteration)
def __init__(self, context: PyTorchTrialContext): self.context = context self.cfg = self.setup_cfg() model = build_model(self.cfg) checkpointer = DetectionCheckpointer( model, self.cfg.OUTPUT_DIR ) checkpointer.resume_or_load(self.cfg.MODEL.WEIGHTS, resume=False) self.model = self.context.wrap_model(checkpointer.model) optimizer = build_optimizer(self.cfg, self.model) self.optimizer = self.context.wrap_optimizer(optimizer) self.scheduler = build_lr_scheduler(self.cfg, self.optimizer) self.scheduler = self.context.wrap_lr_scheduler(self.scheduler,LRScheduler.StepMode.STEP_EVERY_BATCH) self.dataset_name = self.cfg.DATASETS.TEST[0] self.evaluators = get_evaluator(self.cfg, self.dataset_name, self.context.get_hparam("output_dir"), self.context.get_hparam('fake_data')) self.val_reducer = self.context.wrap_reducer(EvaluatorReducer(self.evaluators), for_training=False) self.context.experimental.disable_dataset_reproducibility_checks()
def do_train(cfg, model, cat_heatmap_file, resume=False): model.train() # select optimizer and learning rate scheduler based on the config optimizer = build_optimizer(cfg, model) scheduler = build_lr_scheduler(cfg, optimizer) # creat checkpointer checkpointer = DetectionCheckpointer( model, cfg.OUTPUT_DIR, optimizer=optimizer, scheduler=scheduler ) start_iter = ( checkpointer.resume_or_load(cfg.MODEL.WEIGHTS, resume=resume).get("iteration", -1) + 1 ) max_iter = cfg.SOLVER.MAX_ITER periodic_checkpointer = PeriodicCheckpointer( checkpointer, cfg.SOLVER.CHECKPOINT_PERIOD, max_iter=max_iter ) # create output writers. Separate TensorBoard writers are created # for train and validation sets. This allows easy overlaying of graphs # in TensorBoard. train_tb_writer = os.path.join(cfg.OUTPUT_DIR, 'train') val_tb_writer = os.path.join(cfg.OUTPUT_DIR, 'val') train_writers = ( [ CommonMetricPrinter(max_iter), JSONWriter(os.path.join(cfg.OUTPUT_DIR, "metrics.json")), TensorboardXWriter(train_tb_writer), ] if comm.is_main_process() else [] ) val_writers = [TensorboardXWriter(val_tb_writer)] train_dataset_name = cfg.DATASETS.TRAIN[0] train_data_loader = build_detection_train_loader(cfg) train_eval_data_loader = build_detection_test_loader(cfg, train_dataset_name) val_dataset_name = cfg.DATASETS.TEST[0] val_eval_data_loader = build_detection_test_loader(cfg, val_dataset_name, DatasetMapper(cfg,True)) logger.info("Starting training from iteration {}".format(start_iter)) train_storage = EventStorage(start_iter) val_storage = EventStorage(start_iter) # Create the training and validation evaluator objects. train_evaluator = get_evaluator( cfg, train_dataset_name, os.path.join(cfg.OUTPUT_DIR, "train_inference", train_dataset_name), cat_heatmap_file ) val_evaluator = get_evaluator( cfg, val_dataset_name, os.path.join(cfg.OUTPUT_DIR, "val_inference", val_dataset_name), cat_heatmap_file ) # initialize the best AP50 value best_AP50 = 0 start_time = time.time() for train_data, iteration in zip(train_data_loader, range(start_iter, max_iter)): # stop if the file stop_running exists in the running directory if os.path.isfile('stop_running'): os.remove('stop_running') break iteration = iteration + 1 # run a step with the training data with train_storage as storage: model.train() storage.step() loss_dict = model(train_data) losses = sum(loss for loss in loss_dict.values()) assert torch.isfinite(losses).all(), loss_dict loss_dict_reduced = {k: v.item() for k, v in comm.reduce_dict(loss_dict).items()} losses_reduced = sum(loss for loss in loss_dict_reduced.values()) if comm.is_main_process(): storage.put_scalars(total_loss=losses_reduced, **loss_dict_reduced) optimizer.zero_grad() losses.backward() optimizer.step() storage.put_scalar("lr", optimizer.param_groups[0]["lr"], smoothing_hint=False) scheduler.step() # periodically evaluate the training set and write the results if (cfg.TEST.EVAL_PERIOD > 0 and iteration % cfg.TEST.EVAL_PERIOD == 0 and iteration != max_iter): train_eval_results = inference_on_dataset(model, train_eval_data_loader, train_evaluator) flat_results = flatten_results(train_eval_results) storage.put_scalars(**flat_results) comm.synchronize() if iteration - start_iter > 5 and (iteration % 20 == 0 or iteration == max_iter): for writer in train_writers: writer.write() periodic_checkpointer.step(iteration) # run a step with the validation set with val_storage as storage: storage.step() # every 20 iterations evaluate the dataset to collect the loss if iteration % 20 == 0 or iteration == max_iter: with torch.set_grad_enabled(False): for input, i in zip(val_eval_data_loader , range(1)): loss_dict = model(input) losses = sum(loss for loss in loss_dict.values()) assert torch.isfinite(losses).all(), loss_dict loss_dict_reduced = {k: v.item() for k, v in comm.reduce_dict(loss_dict).items()} losses_reduced = sum(loss for loss in loss_dict_reduced.values()) if comm.is_main_process(): storage.put_scalars(total_loss=losses_reduced, **loss_dict_reduced) # periodically evaluate the validation set and write the results # check the results against the best results seen and save the parameters for # the best result if (cfg.TEST.EVAL_PERIOD > 0 and iteration % cfg.TEST.EVAL_PERIOD == 0 or iteration == max_iter): val_eval_results = inference_on_dataset(model, val_eval_data_loader, val_evaluator) logger.info('val_eval_results {}', str(val_eval_results)) results = val_eval_results.get('segm', None) if results is None: results = val_eval_results.get('bbox', None) if results is not None and results.get('AP50',-1) > best_AP50: best_AP50 = results['AP50'] logger.info('saving best results ({}), iter {}'.format(best_AP50, iteration)) checkpointer.save("best_AP50") flat_results = flatten_results(val_eval_results) storage.put_scalars(**flat_results) comm.synchronize() if iteration - start_iter > 5 and (iteration % 20 == 0): for writer in val_writers: writer.write() elapsed = time.time() - start_time time_per_iter = elapsed / (iteration - start_iter) time_left = time_per_iter * (max_iter - iteration) logger.info("ETA: {}".format(str(datetime.timedelta(seconds=time_left))))
def do_train(cfg_source, cfg_target, model, resume=False): model.train() print(model) optimizer = build_optimizer(cfg_source, model) scheduler = build_lr_scheduler(cfg_source, optimizer) checkpointer = DetectionCheckpointer(model, cfg_source.OUTPUT_DIR, optimizer=optimizer, scheduler=scheduler) start_iter = (checkpointer.resume_or_load( cfg_source.MODEL.WEIGHTS, resume=resume).get("iteration", -1) + 1) max_iter = cfg_source.SOLVER.MAX_ITER periodic_checkpointer = PeriodicCheckpointer( checkpointer, cfg_source.SOLVER.CHECKPOINT_PERIOD, max_iter=max_iter) writers = ([ CommonMetricPrinter(max_iter), JSONWriter(os.path.join(cfg_source.OUTPUT_DIR, "metrics.json")), TensorboardXWriter(cfg_source.OUTPUT_DIR), ] if comm.is_main_process() else []) i = 1 max_epoch = 41.27 # max iter / min(data_len(data_source, data_target)) current_epoch = 0 data_len = 1502 alpha3 = 0 alpha4 = 0 alpha5 = 0 data_loader_source = build_detection_train_loader(cfg_source) data_loader_target = build_detection_train_loader(cfg_target) logger.info("Starting training from iteration {}".format(start_iter)) with EventStorage(start_iter) as storage: for data_source, data_target, iteration in zip( data_loader_source, data_loader_target, range(start_iter, max_iter)): iteration = iteration + 1 storage.step() if (iteration % data_len) == 0: current_epoch += 1 i = 1 p = float(i + current_epoch * data_len) / max_epoch / data_len alpha = 2. / (1. + np.exp(-10 * p)) - 1 i += 1 alpha3 = alpha alpha4 = alpha alpha5 = alpha if alpha3 > 0.5: alpha3 = 0.5 if alpha4 > 0.5: alpha4 = 0.5 if alpha5 > 0.1: alpha5 = 0.1 loss_dict = model(data_source, False, alpha3, alpha4, alpha5) loss_dict_target = model(data_target, True, alpha3, alpha4, alpha5) loss_dict["loss_r3"] += loss_dict_target["loss_r3"] loss_dict["loss_r4"] += loss_dict_target["loss_r4"] loss_dict["loss_r5"] += loss_dict_target["loss_r5"] loss_dict["loss_r3"] *= 0.5 loss_dict["loss_r4"] *= 0.5 loss_dict["loss_r5"] *= 0.5 losses = sum(loss_dict.values()) assert torch.isfinite(losses).all(), loss_dict loss_dict_reduced = { k: v.item() for k, v in comm.reduce_dict(loss_dict).items() } losses_reduced = sum(loss for loss in loss_dict_reduced.values()) if comm.is_main_process(): storage.put_scalars(total_loss=losses_reduced, **loss_dict_reduced) optimizer.zero_grad() losses.backward() optimizer.step() storage.put_scalar("lr", optimizer.param_groups[0]["lr"], smoothing_hint=False) scheduler.step() if iteration - start_iter > 5 and (iteration % 20 == 0 or iteration == max_iter): for writer in writers: writer.write() periodic_checkpointer.step(iteration)
def do_train(cfg, model, resume=False): model.train() optimizer = build_optimizer(cfg, model) scheduler = build_lr_scheduler(cfg, optimizer) checkpointer = DetectionCheckpointer(model, cfg.OUTPUT_DIR, optimizer=optimizer, scheduler=scheduler) start_iter = (checkpointer.resume_or_load( cfg.MODEL.WEIGHTS, resume=resume).get("iteration", -1) + 1) max_iter = cfg.SOLVER.MAX_ITER periodic_checkpointer = PeriodicCheckpointer(checkpointer, cfg.SOLVER.CHECKPOINT_PERIOD, max_iter=max_iter) writers = ([ CommonMetricPrinter(max_iter), JSONWriter(os.path.join(cfg.OUTPUT_DIR, "metrics.json")), TensorboardXWriter(cfg.OUTPUT_DIR), ] if comm.is_main_process() else []) #dataset|mapper|augs|sampler are done during building data_loader atoms = generate_atom_list(cfg, True) black_magic_mapper = BlackMagicMapper(cfg, is_train=True, augmentations=atoms) data_loader = build_detection_train_loader(cfg, black_magic_mapper) logger.info("Starting training from iteration {}".format(start_iter)) with EventStorage(start_iter) as storage: for data, iteration in zip(data_loader, range(start_iter, max_iter)): if cfg.DATALOADER.SAVE_BLACK_MAGIC_PATH != "": save_data_to_disk(cfg, data) iteration = iteration + 1 storage.step() loss_dict = model(data) losses = sum(loss_dict.values()) assert torch.isfinite(losses).all(), loss_dict loss_dict_reduced = { k: v.item() for k, v in comm.reduce_dict(loss_dict).items() } losses_reduced = sum(loss for loss in loss_dict_reduced.values()) if comm.is_main_process(): storage.put_scalars(total_loss=losses_reduced, **loss_dict_reduced) optimizer.zero_grad() losses.backward() optimizer.step() storage.put_scalar("lr", optimizer.param_groups[0]["lr"], smoothing_hint=False) scheduler.step() if (cfg.TEST.EVAL_PERIOD > 0 and iteration % cfg.TEST.EVAL_PERIOD == 0 and iteration != max_iter): do_test(cfg, model) # Compared to "train_net.py", the test results are not dumped to EventStorage comm.synchronize() if iteration - start_iter > 5 and (iteration % 20 == 0 or iteration == max_iter): for writer in writers: writer.write() periodic_checkpointer.step(iteration)
def _build_optimizer(self, cfg, model): return build_optimizer(cfg, model)
def do_relation_train(cfg, model, resume=False): model.train() for param in model.named_parameters(): param[1].requires_grad = False for param in model.named_parameters(): for trainable in cfg.MODEL.TRAINABLE: if param[0].startswith(trainable): param[1].requires_grad = True break if param[0] == "relation_heads.instance_head.semantic_embed.weight" or \ param[0] == "relation_heads.pair_head.semantic_embed.weight" or \ param[0] == "relation_heads.predicate_head.semantic_embed.weight" or \ param[0] == "relation_heads.triplet_head.ins_embed.weight" or \ param[0] == "relation_heads.triplet_head.pred_embed.weight" or \ param[0] == "relation_heads.subpred_head.sub_embed.weight" or \ param[0] == "relation_heads.subpred_head.pred_embed.weight" or \ param[0] == "relation_heads.predobj_head.pred_embed.weight" or \ param[0] == "relation_heads.predobj_head.obj_embed.weight" or \ param[0].startswith("relation_heads.predicate_head.freq_bias.obj_baseline.weight"): param[1].requires_grad = False optimizer = build_optimizer(cfg, model) scheduler = build_lr_scheduler(cfg, optimizer) metrics_sum_dict = { 'relation_cls_tp_sum': 0, 'relation_cls_p_sum': 0.00001, 'pred_class_tp_sum': 0, 'pred_class_p_sum': 0.00001, 'gt_class_tp_sum': 0, 'gt_class_p_sum': 0.00001, 'raw_pred_class_tp_sum': 0, 'raw_pred_class_p_sum': 0.00001, 'instance_tp_sum':0, 'instance_p_sum': 0.00001, 'instance_g_sum':0.00001, 'subpred_tp_sum': 0, 'subpred_p_sum': 0.00001, 'subpred_g_sum': 0.00001, 'predobj_tp_sum': 0, 'predobj_p_sum': 0.00001, 'predobj_g_sum': 0.00001, 'pair_tp_sum':0, 'pair_p_sum': 0.00001, 'pair_g_sum':0.00001, 'confidence_tp_sum': 0, 'confidence_p_sum': 0.00001, 'confidence_g_sum': 0.00001, 'predicate_tp_sum': 0, 'predicate_tp20_sum': 0, 'predicate_tp50_sum': 0, 'predicate_tp100_sum': 0, 'predicate_p_sum': 0.00001, 'predicate_p20_sum': 0.00001, 'predicate_p50_sum': 0.00001, 'predicate_p100_sum': 0.00001, 'predicate_g_sum': 0.00001, 'triplet_tp_sum': 0, 'triplet_tp20_sum': 0, 'triplet_tp50_sum': 0, 'triplet_tp100_sum': 0, 'triplet_p_sum': 0.00001, 'triplet_p20_sum': 0.00001, 'triplet_p50_sum': 0.00001, 'triplet_p100_sum': 0.00001, 'triplet_g_sum': 0.00001, } checkpointer = DetectionCheckpointer( model, cfg.OUTPUT_DIR, optimizer=optimizer, scheduler=scheduler, metrics_sum_dict=metrics_sum_dict ) start_iter = (checkpointer.resume_or_load(cfg.MODEL.WEIGHTS, resume=resume).get("iteration", -1) + 1) # state_dict=torch.load(cfg.MODEL.WEIGHTS).pop("model") # model.load_state_dict(state_dict,strict=False) max_iter = cfg.SOLVER.MAX_ITER periodic_checkpointer = PeriodicCheckpointer(checkpointer, cfg.SOLVER.CHECKPOINT_PERIOD, max_iter=max_iter) # relation_cls_state_dict=torch.load(cfg.MODEL.WEIGHTS).pop("model") # for param in model.named_parameters(): # if param[0] not in relation_cls_state_dict: # print(param[0]) # model.load_state_dict(relation_cls_state_dict,strict=False) writers = ( [ CommonMetricPrinter(max_iter), JSONWriter(os.path.join(cfg.OUTPUT_DIR, "metrics.json")), TensorboardXWriter(cfg.OUTPUT_DIR), ] if comm.is_main_process() else [] ) metrics_pr_dict={} # compared to "train_net.py", we do not support accurate timing and # precise BN here, because they are not trivial to implement data_loader = build_detection_train_loader(cfg) logger.info("Starting training from iteration {}".format(start_iter)) acumulate_losses=0 with EventStorage(start_iter) as storage: for data, iteration in zip(data_loader, range(start_iter, max_iter)): print(iteration) iteration = iteration + 1 storage.step() if True: # try: pred_instances, results_dict, losses_dict, metrics_dict = model(data,iteration,mode="relation",training=True) losses = sum(loss for loss in losses_dict.values()) assert torch.isfinite(losses).all(), losses_dict #print(losses_dict) loss_dict_reduced = {k: v.item() for k, v in comm.reduce_dict(losses_dict).items()} losses_reduced = sum(loss for loss in loss_dict_reduced.values()) acumulate_losses += losses_reduced if comm.is_main_process(): storage.put_scalars(acumulate_losses=acumulate_losses/(iteration-start_iter),total_loss=losses_reduced, **loss_dict_reduced) if 'relation_cls_tp' in metrics_dict: metrics_sum_dict['relation_cls_tp_sum']+=metrics_dict['relation_cls_tp'] metrics_sum_dict['relation_cls_p_sum'] += metrics_dict['relation_cls_p'] metrics_pr_dict['relation_cls_precision'] = metrics_sum_dict['relation_cls_tp_sum'] / metrics_sum_dict['relation_cls_p_sum'] if 'pred_class_tp' in metrics_dict: metrics_sum_dict['pred_class_tp_sum']+=metrics_dict['pred_class_tp'] metrics_sum_dict['pred_class_p_sum'] += metrics_dict['pred_class_p'] metrics_pr_dict['pred_class_precision'] = metrics_sum_dict['pred_class_tp_sum'] / metrics_sum_dict['pred_class_p_sum'] if 'raw_pred_class_tp' in metrics_dict: metrics_sum_dict['raw_pred_class_tp_sum']+=metrics_dict['raw_pred_class_tp'] metrics_sum_dict['raw_pred_class_p_sum'] += metrics_dict['raw_pred_class_p'] metrics_pr_dict['raw_pred_class_precision'] = metrics_sum_dict['raw_pred_class_tp_sum'] / metrics_sum_dict['raw_pred_class_p_sum'] if 'gt_class_tp' in metrics_dict: metrics_sum_dict['gt_class_tp_sum']+=metrics_dict['gt_class_tp'] metrics_sum_dict['gt_class_p_sum'] += metrics_dict['gt_class_p'] metrics_pr_dict['gt_class_precision'] = metrics_sum_dict['gt_class_tp_sum'] / metrics_sum_dict['gt_class_p_sum'] if 'instance_tp' in metrics_dict: metrics_sum_dict['instance_tp_sum']+=metrics_dict['instance_tp'] metrics_sum_dict['instance_p_sum'] += metrics_dict['instance_p'] metrics_sum_dict['instance_g_sum'] += metrics_dict['instance_g'] metrics_pr_dict['instance_precision'] = metrics_sum_dict['instance_tp_sum'] / metrics_sum_dict['instance_p_sum'] metrics_pr_dict['instance_recall'] = metrics_sum_dict['instance_tp_sum'] / metrics_sum_dict['instance_g_sum'] if 'subpred_tp' in metrics_dict: metrics_sum_dict['subpred_tp_sum']+=metrics_dict['subpred_tp'] metrics_sum_dict['subpred_p_sum'] += metrics_dict['subpred_p'] metrics_sum_dict['subpred_g_sum'] += metrics_dict['subpred_g'] metrics_pr_dict['subpred_precision'] = metrics_sum_dict['subpred_tp_sum'] / metrics_sum_dict['subpred_p_sum'] metrics_pr_dict['subpred_recall'] = metrics_sum_dict['subpred_tp_sum'] / metrics_sum_dict['subpred_g_sum'] if 'predobj_tp' in metrics_dict: metrics_sum_dict['predobj_tp_sum']+=metrics_dict['predobj_tp'] metrics_sum_dict['predobj_p_sum'] += metrics_dict['predobj_p'] metrics_sum_dict['predobj_g_sum'] += metrics_dict['predobj_g'] metrics_pr_dict['predobj_precision'] = metrics_sum_dict['predobj_tp_sum'] / metrics_sum_dict['predobj_p_sum'] metrics_pr_dict['predobj_recall'] = metrics_sum_dict['predobj_tp_sum'] / metrics_sum_dict['predobj_g_sum'] if 'pair_tp' in metrics_dict: metrics_sum_dict['pair_tp_sum'] += metrics_dict['pair_tp'] metrics_sum_dict['pair_p_sum'] += metrics_dict['pair_p'] metrics_sum_dict['pair_g_sum'] += metrics_dict['pair_g'] metrics_pr_dict['pair_precision'] = metrics_sum_dict['pair_tp_sum'] / metrics_sum_dict['pair_p_sum'] metrics_pr_dict['pair_recall'] = metrics_sum_dict['pair_tp_sum'] / metrics_sum_dict['pair_g_sum'] if 'confidence_tp' in metrics_dict: metrics_sum_dict['confidence_tp_sum']+=metrics_dict['confidence_tp'] metrics_sum_dict['confidence_p_sum'] += metrics_dict['confidence_p'] metrics_sum_dict['confidence_g_sum'] += metrics_dict['confidence_g'] metrics_pr_dict['confidence_precision'] = metrics_sum_dict['confidence_tp_sum'] / metrics_sum_dict['confidence_p_sum'] metrics_pr_dict['confidence_recall'] = metrics_sum_dict['confidence_tp_sum'] / metrics_sum_dict['confidence_g_sum'] if 'predicate_tp' in metrics_dict: metrics_sum_dict['predicate_tp_sum']+=metrics_dict['predicate_tp'] metrics_sum_dict['predicate_tp20_sum'] += metrics_dict['predicate_tp20'] metrics_sum_dict['predicate_tp50_sum'] += metrics_dict['predicate_tp50'] metrics_sum_dict['predicate_tp100_sum'] += metrics_dict['predicate_tp100'] metrics_sum_dict['predicate_p_sum'] += metrics_dict['predicate_p'] metrics_sum_dict['predicate_p20_sum'] += metrics_dict['predicate_p20'] metrics_sum_dict['predicate_p50_sum'] += metrics_dict['predicate_p50'] metrics_sum_dict['predicate_p100_sum'] += metrics_dict['predicate_p100'] metrics_sum_dict['predicate_g_sum'] += metrics_dict['predicate_g'] metrics_pr_dict['predicate_precision'] = metrics_sum_dict['predicate_tp_sum'] / metrics_sum_dict['predicate_p_sum'] metrics_pr_dict['predicate_precision20'] = metrics_sum_dict['predicate_tp20_sum'] / metrics_sum_dict['predicate_p20_sum'] metrics_pr_dict['predicate_precision50'] = metrics_sum_dict['predicate_tp50_sum'] / metrics_sum_dict['predicate_p50_sum'] metrics_pr_dict['predicate_precision100'] = metrics_sum_dict['predicate_tp100_sum'] / metrics_sum_dict['predicate_p100_sum'] metrics_pr_dict['predicate_recall'] = metrics_sum_dict['predicate_tp_sum'] / metrics_sum_dict['predicate_g_sum'] metrics_pr_dict['predicate_recall20'] = metrics_sum_dict['predicate_tp20_sum'] / metrics_sum_dict['predicate_g_sum'] metrics_pr_dict['predicate_recall50'] = metrics_sum_dict['predicate_tp50_sum'] / metrics_sum_dict['predicate_g_sum'] metrics_pr_dict['predicate_recall100'] = metrics_sum_dict['predicate_tp100_sum'] / metrics_sum_dict['predicate_g_sum'] if 'triplet_tp' in metrics_dict: metrics_sum_dict['triplet_tp_sum'] += metrics_dict['triplet_tp'] metrics_sum_dict['triplet_tp20_sum'] += metrics_dict['triplet_tp20'] metrics_sum_dict['triplet_tp50_sum'] += metrics_dict['triplet_tp50'] metrics_sum_dict['triplet_tp100_sum'] += metrics_dict['triplet_tp100'] metrics_sum_dict['triplet_p_sum'] += metrics_dict['triplet_p'] metrics_sum_dict['triplet_p20_sum'] += metrics_dict['triplet_p20'] metrics_sum_dict['triplet_p50_sum'] += metrics_dict['triplet_p50'] metrics_sum_dict['triplet_p100_sum'] += metrics_dict['triplet_p100'] metrics_sum_dict['triplet_g_sum'] += metrics_dict['triplet_g'] metrics_pr_dict['triplet_precision'] = metrics_sum_dict['triplet_tp_sum'] / metrics_sum_dict['triplet_p_sum'] metrics_pr_dict['triplet_precision20'] = metrics_sum_dict['triplet_tp20_sum'] / metrics_sum_dict['triplet_p20_sum'] metrics_pr_dict['triplet_precision50'] = metrics_sum_dict['triplet_tp50_sum'] / metrics_sum_dict['triplet_p50_sum'] metrics_pr_dict['triplet_precision100'] = metrics_sum_dict['triplet_tp100_sum'] / metrics_sum_dict['triplet_p100_sum'] metrics_pr_dict['triplet_recall'] = metrics_sum_dict['triplet_tp_sum'] / metrics_sum_dict['triplet_g_sum'] metrics_pr_dict['triplet_recall20'] = metrics_sum_dict['triplet_tp20_sum'] / metrics_sum_dict['triplet_g_sum'] metrics_pr_dict['triplet_recall50'] = metrics_sum_dict['triplet_tp50_sum'] / metrics_sum_dict['triplet_g_sum'] metrics_pr_dict['triplet_recall100'] = metrics_sum_dict['triplet_tp100_sum'] / metrics_sum_dict['triplet_g_sum'] storage.put_scalars(**metrics_pr_dict, smoothing_hint=False) optimizer.zero_grad() losses.backward() optimizer.step() storage.put_scalar("lr", optimizer.param_groups[0]["lr"], smoothing_hint=False) scheduler.step() if iteration - start_iter > 5 and (iteration % 20 == 0 or iteration == max_iter): for writer in writers: writer.write() periodic_checkpointer.step(iteration) torch.cuda.empty_cache()
def optimizer(self, model: nn.Module) -> torch.optim.Optimizer: # type: ignore optim = build_optimizer(self.cfg, model) return optim
def build_optimizer(cls, cfg, model): """ Returns: torch.optim.Optimizer: """ return build_optimizer(cfg, model)
def do_train(cfg, model, resume=False): model.train() optimizer = build_optimizer(cfg, model) scheduler = build_lr_scheduler(cfg, optimizer) checkpointer = DetectionCheckpointer(model, cfg.OUTPUT_DIR, optimizer=optimizer, scheduler=scheduler) start_iter = (checkpointer.resume_or_load( cfg.MODEL.WEIGHTS, resume=resume).get("iteration", -1) + 1) max_iter = cfg.SOLVER.MAX_ITER periodic_checkpointer = PeriodicCheckpointer(checkpointer, cfg.SOLVER.CHECKPOINT_PERIOD, max_iter=max_iter) writers = ([ CommonMetricPrinter(max_iter), JSONWriter(os.path.join(cfg.OUTPUT_DIR, "metrics.json")), TensorboardXWriter(cfg.OUTPUT_DIR), ] if comm.is_main_process() else []) # compared to "train_net.py", we do not support accurate timing and # precise BN here, because they are not trivial to implement data_loader = build_detection_train_loader(cfg) logger.info("Starting training from iteration {}".format(start_iter)) forward_pass_end_time = time.perf_counter() with EventStorage(start_iter) as storage: for data, iteration in zip(data_loader, range(start_iter, max_iter)): iteration_start_time = time.perf_counter() if comm.get_rank() == 0: print("Approx backwards pass duration: ", iteration_start_time - forward_pass_end_time) iteration = iteration + 1 storage.step() if iteration == 500: print("Iteration 500. Profiling!") with torch.autograd.profiler.profile( use_cuda=True, record_shapes=True) as prof: loss_dict = model(data) losses = sum(loss_dict.values()) assert torch.isfinite(losses).all(), loss_dict loss_dict_reduced = { k: v.item() for k, v in comm.reduce_dict(loss_dict).items() } losses_reduced = sum( loss for loss in loss_dict_reduced.values()) print(prof.key_averages().table(sort_by="self_cpu_time_total")) prof.export_chrome_trace("/root/trace.json") else: loss_dict = model(data) losses = sum(loss_dict.values()) assert torch.isfinite(losses).all(), loss_dict loss_dict_reduced = { k: v.item() for k, v in comm.reduce_dict(loss_dict).items() } losses_reduced = sum(loss for loss in loss_dict_reduced.values()) if comm.is_main_process(): storage.put_scalars(total_loss=losses_reduced, **loss_dict_reduced) forward_pass_end_time = time.perf_counter() optimizer.zero_grad() losses.backward() optimizer.step() storage.put_scalar("lr", optimizer.param_groups[0]["lr"], smoothing_hint=False) scheduler.step() if (cfg.TEST.EVAL_PERIOD > 0 and iteration % cfg.TEST.EVAL_PERIOD == 0 and iteration != max_iter): do_test(cfg, model) # Compared to "train_net.py", the test results are not dumped to EventStorage comm.synchronize() if iteration - start_iter > 5 and (iteration % 20 == 0 or iteration == max_iter): for writer in writers: writer.write() periodic_checkpointer.step(iteration)
def do_train(cfg, model, resume=False): model.train() optimizer = build_optimizer(cfg, model) scheduler = build_lr_scheduler(cfg, optimizer) checkpointer = DetectionCheckpointer( model, cfg.OUTPUT_DIR, optimizer=optimizer, scheduler=scheduler ) start_iter = ( checkpointer.resume_or_load( cfg.MODEL.WEIGHTS, resume=resume, ).get("iteration", -1) + 1 ) if cfg.SOLVER.RESET_ITER: logger.info('Reset loaded iteration. Start training from iteration 0.') start_iter = 0 max_iter = cfg.SOLVER.MAX_ITER periodic_checkpointer = PeriodicCheckpointer( checkpointer, cfg.SOLVER.CHECKPOINT_PERIOD, max_iter=max_iter ) writers = ( [ CommonMetricPrinter(max_iter), JSONWriter(os.path.join(cfg.OUTPUT_DIR, "metrics.json")), TensorboardXWriter(cfg.OUTPUT_DIR), ] if comm.is_main_process() else [] ) if cfg.MULTI_DATASET.ENABLED: data_loader = build_multi_dataset_train_loader(cfg) dataset_count = {k: torch.tensor(0).to(comm.get_local_rank()) for k in cfg.MULTI_DATASET.DATASETS} else: data_loader = build_custom_train_loader(cfg) logger.info("Starting training from iteration {}".format(start_iter)) with EventStorage(start_iter) as storage: step_timer = Timer() data_timer = Timer() start_time = time.perf_counter() for data, iteration in zip(data_loader, range(start_iter, max_iter)): data_time = data_timer.seconds() storage.put_scalars(data_time=data_time) step_timer.reset() iteration = iteration + 1 storage.step() loss_dict = model(data) losses = sum( loss for k, loss in loss_dict.items()) assert torch.isfinite(losses).all(), loss_dict loss_dict_reduced = {k: v.item() \ for k, v in comm.reduce_dict(loss_dict).items()} losses_reduced = sum(loss for loss in loss_dict_reduced.values()) if comm.is_main_process(): storage.put_scalars( total_loss=losses_reduced, **loss_dict_reduced) optimizer.zero_grad() losses.backward() optimizer.step() storage.put_scalar( "lr", optimizer.param_groups[0]["lr"], smoothing_hint=False) if cfg.MULTI_DATASET.ENABLED: for b in data: dataset_count[cfg.MULTI_DATASET.DATASETS[b['dataset_source']]] += 1 dataset_count_reduced = {k: v for k, v in \ comm.reduce_dict(dataset_count).items()} if comm.is_main_process(): storage.put_scalars(**dataset_count_reduced) step_time = step_timer.seconds() storage.put_scalars(time=step_time) data_timer.reset() scheduler.step() if ( cfg.TEST.EVAL_PERIOD > 0 and iteration % cfg.TEST.EVAL_PERIOD == 0 and iteration != max_iter ): do_test(cfg, model) comm.synchronize() if iteration - start_iter > 5 and \ (iteration % 20 == 0 or iteration == max_iter): for writer in writers: writer.write() periodic_checkpointer.step(iteration) total_time = time.perf_counter() - start_time logger.info( "Total training time: {}".format( str(datetime.timedelta(seconds=int(total_time)))))
def do_train(cfg1, model1, model2, resume=False): model1.train() optimizer = build_optimizer(cfg1, model1) scheduler = build_lr_scheduler(cfg1, optimizer) checkpointer = DetectionCheckpointer(model1, cfg1.OUTPUT_DIR, optimizer=optimizer, scheduler=scheduler) start_iter = (checkpointer.resume_or_load( cfg1.MODEL.WEIGHTS, resume=resume).get("iteration", -1) + 1) max_iter = cfg1.SOLVER.MAX_ITER periodic_checkpointer = PeriodicCheckpointer(checkpointer, cfg1.SOLVER.CHECKPOINT_PERIOD, max_iter=max_iter) writers = ([ CommonMetricPrinter(max_iter), JSONWriter(os.path.join(cfg1.OUTPUT_DIR, "metrics.json")), TensorboardXWriter(cfg1.OUTPUT_DIR), ] if comm.is_main_process() else []) # compared to "train_net.py", we do not support accurate timing and # precise BN here, because they are not trivial to implement data_loader = custom_train_loader(cfg1) logger.info("Starting training from iteration {}".format(start_iter)) with EventStorage(start_iter) as storage: for data, iteration in zip(data_loader, range(start_iter, max_iter)): iteration = iteration + 1 storage.step() height = data[0]['image'].shape[1] width = data[0]['image'].shape[2] second_stream_outputs = inference_second_stream( model2, data, height, width) loss_dict = model1(data, second_stream_outputs) losses = sum(loss_dict.values()) assert torch.isfinite(losses).all(), loss_dict loss_dict_reduced = { k: v.item() for k, v in comm.reduce_dict(loss_dict).items() } losses_reduced = sum(loss for loss in loss_dict_reduced.values()) if comm.is_main_process(): storage.put_scalars(total_loss=losses_reduced, **loss_dict_reduced) optimizer.zero_grad() losses.backward() optimizer.step() storage.put_scalar("lr", optimizer.param_groups[0]["lr"], smoothing_hint=False) scheduler.step() if (cfg1.TEST.EVAL_PERIOD > 0 and iteration % cfg1.TEST.EVAL_PERIOD == 0 and iteration != max_iter): do_test(cfg1, model1, model2) # Compared to "train_net.py", the test results are not dumped to EventStorage comm.synchronize() if iteration - start_iter > 5 and (iteration % 20 == 0 or iteration == max_iter): for writer in writers: writer.write() periodic_checkpointer.step(iteration)
def do_train(cfg, model, resume=False): model.train() optimizer = build_optimizer(cfg, model) scheduler = build_lr_scheduler(cfg, optimizer) checkpointer = DetectionCheckpointer(model, cfg.OUTPUT_DIR, optimizer=optimizer, scheduler=scheduler) start_iter = ( checkpointer.resume_or_load(cfg.MODEL.WEIGHTS, resume=resume).get( "iteration", -1) + 1 #FIXME: does not continue from iteration # when resume=True ) max_iter = cfg.SOLVER.MAX_ITER periodic_checkpointer = PeriodicCheckpointer(checkpointer, cfg.SOLVER.CHECKPOINT_PERIOD, max_iter=max_iter) writers = ([ CommonMetricPrinter(max_iter), JSONWriter(os.path.join(cfg.OUTPUT_DIR, "metrics.json")), TensorboardXWriter(cfg.OUTPUT_DIR), ] if comm.is_main_process() else []) # init best monitor metric best_monitor_metric = None # init early stopping count es_count = 0 # get train data loader data_loader = build_train_loader(cfg) logger.info("Starting training from iteration {}".format(start_iter)) with EventStorage(start_iter) as storage: for data, iteration in zip(data_loader, range(start_iter, max_iter)): storage.step() _, losses, losses_reduced = get_loss(data, model) if comm.is_main_process(): storage.put_scalars(total_loss=losses_reduced) optimizer.zero_grad() losses.backward() optimizer.step() storage.put_scalar("lr", optimizer.param_groups[0]["lr"], smoothing_hint=False) scheduler.step() if (cfg.TEST.EVAL_PERIOD > 0 and (iteration + 1) % cfg.TEST.EVAL_PERIOD == 0 and iteration != max_iter - 1): results = do_test(cfg, model) storage.put_scalars(**results['metrics']) if cfg.EARLY_STOPPING.ENABLE: curr = None if cfg.EARLY_STOPPING.MONITOR in results['metrics'].keys(): curr = results['metrics'][cfg.EARLY_STOPPING.MONITOR] if curr is None: logger.warning( "Early stopping enabled but cannot find metric: %s" % cfg.EARLY_STOPPING.MONITOR) logger.warning( "Options for monitored metrics are: [%s]" % ", ".join(map(str, results['metrics'].keys()))) elif best_monitor_metric is None: best_monitor_metric = curr elif get_es_result(cfg.EARLY_STOPPING.MODE, curr, best_monitor_metric): best_monitor_metric = curr es_count = 0 logger.info("Best metric %s improved to %0.4f" % (cfg.EARLY_STOPPING.MONITOR, curr)) # update best model periodic_checkpointer.save(name="model_best", **{**results['metrics']}) # save best metrics to a .txt file with open( os.path.join(cfg.OUTPUT_DIR, 'best_metrics.txt'), 'w') as f: json.dump(results['metrics'], f) else: logger.info( "Early stopping metric %s did not improve, current %.04f, best %.04f" % (cfg.EARLY_STOPPING.MONITOR, curr, best_monitor_metric)) es_count += 1 storage.put_scalar('val_loss', results['metrics']['val_loss']) comm.synchronize() if iteration - start_iter > 5 and ((iteration + 1) % 20 == 0 or iteration == max_iter - 1): for writer in writers: writer.write() periodic_checkpointer.step(iteration) if es_count >= cfg.EARLY_STOPPING.PATIENCE: logger.info( "Early stopping triggered, metric %s has not improved for %s validation steps" % (cfg.EARLY_STOPPING.MONITOR, cfg.EARLY_STOPPING.PATIENCE)) break
def do_train(cfg, model, resume=False): model.train() optimizer = build_optimizer(cfg, model) scheduler = build_lr_scheduler(cfg, optimizer) checkpointer = DetectionCheckpointer(model, cfg.OUTPUT_DIR, optimizer=optimizer, scheduler=scheduler) start_iter = (checkpointer.resume_or_load( cfg.MODEL.WEIGHTS, resume=resume, ).get("iteration", -1) + 1) if cfg.SOLVER.RESET_ITER: logger.info('Reset loaded iteration. Start training from iteration 0.') start_iter = 0 max_iter = cfg.SOLVER.MAX_ITER if cfg.SOLVER.TRAIN_ITER < 0 else cfg.SOLVER.TRAIN_ITER periodic_checkpointer = PeriodicCheckpointer(checkpointer, cfg.SOLVER.CHECKPOINT_PERIOD, max_iter=max_iter) writers = ([ CommonMetricPrinter(max_iter), JSONWriter(os.path.join(cfg.OUTPUT_DIR, "metrics.json")), TensorboardXWriter(cfg.OUTPUT_DIR), ] if comm.is_main_process() else []) mapper = DatasetMapper(cfg, True) if cfg.INPUT.CUSTOM_AUG == '' else \ DatasetMapper(cfg, True, augmentations=build_custom_augmentation(cfg, True)) if cfg.DATALOADER.SAMPLER_TRAIN in [ 'TrainingSampler', 'RepeatFactorTrainingSampler' ]: data_loader = build_detection_train_loader(cfg, mapper=mapper) else: from centernet.data.custom_dataset_dataloader import build_custom_train_loader data_loader = build_custom_train_loader(cfg, mapper=mapper) logger.info("Starting training from iteration {}".format(start_iter)) with EventStorage(start_iter) as storage: step_timer = Timer() data_timer = Timer() start_time = time.perf_counter() for data, iteration in zip(data_loader, range(start_iter, max_iter)): data_time = data_timer.seconds() storage.put_scalars(data_time=data_time) step_timer.reset() iteration = iteration + 1 storage.step() loss_dict = model(data) losses = sum(loss for k, loss in loss_dict.items()) assert torch.isfinite(losses).all(), loss_dict loss_dict_reduced = {k: v.item() \ for k, v in comm.reduce_dict(loss_dict).items()} losses_reduced = sum(loss for loss in loss_dict_reduced.values()) if comm.is_main_process(): storage.put_scalars(total_loss=losses_reduced, **loss_dict_reduced) optimizer.zero_grad() losses.backward() optimizer.step() storage.put_scalar("lr", optimizer.param_groups[0]["lr"], smoothing_hint=False) step_time = step_timer.seconds() storage.put_scalars(time=step_time) data_timer.reset() scheduler.step() if (cfg.TEST.EVAL_PERIOD > 0 and iteration % cfg.TEST.EVAL_PERIOD == 0 and iteration != max_iter): do_test(cfg, model) comm.synchronize() if iteration - start_iter > 5 and \ (iteration % 20 == 0 or iteration == max_iter): for writer in writers: writer.write() periodic_checkpointer.step(iteration) total_time = time.perf_counter() - start_time logger.info("Total training time: {}".format( str(datetime.timedelta(seconds=int(total_time)))))
def do_train(cfg, model, resume=False): # 模型设置训练模式 model.train() # 构建优化器 optimizer = build_optimizer(cfg, model) # 构建学习率调整策略 scheduler = build_lr_scheduler(cfg, optimizer) # 断点管理对象 checkpointer = DetectionCheckpointer(model, cfg.OUTPUT_DIR, optimizer=optimizer, scheduler=scheduler) # 可用于恢复训练的起始训练步 start_iter = (checkpointer.resume_or_load( cfg.MODEL.WEIGHTS, resume=resume).get("iteration", -1) + 1) # 最大迭代次数 max_iter = cfg.SOLVER.MAX_ITER # 这里的PeriodicCheckpointer是fvcore.common.checkpoint中的类,可以用于在指定checkpoint处保存和加载模型 periodic_checkpointer = PeriodicCheckpointer(checkpointer, cfg.SOLVER.CHECKPOINT_PERIOD, max_iter=max_iter) writers = ([ CommonMetricPrinter(max_iter), # 负责终端loss登信息的打印 JSONWriter(os.path.join(cfg.OUTPUT_DIR, "metrics.json")), TensorboardXWriter(cfg.OUTPUT_DIR), ] if comm.is_main_process() else []) # 构建batched训练data loader data_loader = build_detection_train_loader(cfg) # 构建用于获取测试loss的 test data loader test_data_loaders = [] for dataset_name in cfg.DATASETS.TEST: test_data_loaders.append({ "name": dataset_name, "data_loader": build_detection_test_loader(cfg, dataset_name, DatasetMapper(cfg, True)) }) logger.info("从第{}轮开始训练".format(start_iter)) with EventStorage(start_iter) as storage: for data, iteration in zip(data_loader, range(start_iter, max_iter)): iteration = iteration + 1 # 每个迭代的开始调用,更新storage对象的游标 storage.step() loss_dict = model(data) losses = sum(loss_dict.values()) assert torch.isfinite(losses).all(), loss_dict loss_dict_reduced = { k: v.item() for k, v in comm.reduce_dict(loss_dict).items() } losses_reduced = sum(loss for loss in loss_dict_reduced.values()) if comm.is_main_process(): # 将该轮前向传播的loss放入storage对象的容器中(storage.histories(),后面读取该容器来打印终端) storage.put_scalars(total_loss=losses_reduced, **loss_dict_reduced) # 反向传播 optimizer.zero_grad() losses.backward() optimizer.step() # 将该轮学习率放入storage对象的容器中 storage.put_scalar("lr", optimizer.param_groups[0]["lr"], smoothing_hint=False) scheduler.step() if (cfg.TEST.EVAL_PERIOD > 0 and iteration % cfg.TEST.EVAL_PERIOD == 0 and iteration != max_iter): do_test(cfg, model) # Compared to "train_net.py", the test results are not dumped to EventStorage comm.synchronize() # if iteration % 21 == 0: # do_loss_eval(cfg, storage, model, test_data_loaders) # for writer in writers: # writer.write() if iteration - start_iter > 5 and (iteration % 20 == 0 or iteration == max_iter): do_loss_eval(cfg, storage, model, test_data_loaders) for writer in writers: writer.write() periodic_checkpointer.step(iteration)
def configure_optimizers(self): optimizer = build_optimizer(self.cfg, self.model) self._best_param_group_id = hooks.LRScheduler.get_best_param_group_id(optimizer) scheduler = build_lr_scheduler(self.cfg, optimizer) return [optimizer], [{"scheduler": scheduler, "interval": "step"}]
cfg.SOLVER.IMS_PER_BATCH = 2 cfg.SOLVER.BASE_LR = 0.001 cfg.SOLVER.MAX_ITER = 10000 cfg.MODEL.ROI_HEADS.BATCH_SIZE_PER_IMAGE = 128 cfg.MODEL.ROI_HEADS.NUM_CLASSES = 1 cfg.OUTPUT_DIR = "MRIBrain/" cfg.MODEL.DEVICE = "cuda" cfg.SOLVER.GAMMA = 0.1 cfg.SOLVER.LR_SCHEDULER_NAME = "WarmupMultiStepLR" cfg.SOLVER.WARMUP_ITERS = 3000 cfg.SOLVER.WARMUP_METHOD = "linear" os.makedirs(cfg.OUTPUT_DIR, exist_ok=True) modelo = build_model(cfg) otimizador = build_optimizer(cfg, modelo) scheduler = build_lr_scheduler(cfg, otimizador) data_loader = build_detection_train_loader(cfg) evaluator = detectron2.evaluation.COCOEvaluator("teste", cfg, distributed=True, output_dir="MRIBrain/") val_loader = build_detection_test_loader(cfg, "teste") treinador = DefaultTrainer(cfg) treinador.resume_or_load(resume=False) treinador.train() cfg.MODEL.WEIGHTS = os.path.join("MRIBrain/", "modelo_final.pth") cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = 0.85
def do_train(cfg, model, resume=False): model.train() optimizer = build_optimizer(cfg, model) scheduler = build_lr_scheduler(cfg, optimizer) checkpointer = DetectionCheckpointer( model, cfg.OUTPUT_DIR, optimizer=optimizer, scheduler=scheduler ) start_iter = ( checkpointer.resume_or_load(cfg.MODEL.WEIGHTS, resume=resume).get("iteration", -1) + 1 ) max_iter = cfg.SOLVER.MAX_ITER writers = ( [ CommonMetricPrinter(max_iter), JSONWriter(os.path.join(cfg.OUTPUT_DIR, "metrics.json")), TensorboardXWriter(cfg.OUTPUT_DIR), ] if comm.is_main_process() else [] ) min_size = cfg.INPUT.MIN_SIZE_TRAIN max_size = cfg.INPUT.MAX_SIZE_TRAIN, sample_style = cfg.INPUT.MIN_SIZE_TRAIN_SAMPLING data_loader = build_detection_train_loader(cfg, mapper=DatasetMapper(cfg, is_train=True, augmentations=[ T.ResizeShortestEdge(min_size, max_size, sample_style), T.RandomApply(T.RandomFlip(prob = 1, vertical = False), prob = 0.5), T.RandomApply(T.RandomRotation(angle = [180], sample_style = 'choice'), prob = 0.1), T.RandomApply(T.RandomRotation(angle = [-10,10], sample_style = 'range'), prob = 0.9), T.RandomApply(T.RandomBrightness(0.5,1.5), prob = 0.5), T.RandomApply(T.RandomContrast(0.5,1.5), prob = 0.5) ])) best_model_weight = copy.deepcopy(model.state_dict()) best_val_loss = None data_val_loader = build_detection_test_loader(cfg, cfg.DATASETS.TEST[0], mapper = DatasetMapper(cfg, True)) logger.info("Starting training from iteration {}".format(start_iter)) with EventStorage(start_iter) as storage: for data, iteration in zip(data_loader, range(start_iter, max_iter)): iteration += 1 start = time.time() storage.step() loss_dict = model(data) losses = sum(loss_dict.values()) assert torch.isfinite(losses).all(), loss_dict loss_dict_reduced = {k: v.item() for k, v in comm.reduce_dict(loss_dict).items()} losses_reduced = sum(loss for loss in loss_dict_reduced.values()) if comm.is_main_process(): storage.put_scalars(total_loss=losses_reduced, **loss_dict_reduced) optimizer.zero_grad() losses.backward() optimizer.step() storage.put_scalar("lr", optimizer.param_groups[0]["lr"], smoothing_hint=False) scheduler.step() if ( cfg.TEST.EVAL_PERIOD > 0 and iteration % cfg.TEST.EVAL_PERIOD == 0 and iteration != max_iter ): logger.setLevel(logging.CRITICAL) print('validating') val_total_loss = do_val_monitor(cfg, model, data_val_loader) logger.setLevel(logging.DEBUG) logger.info(f"validation loss of iteration {iteration}th: {val_total_loss}") storage.put_scalar(name = 'val_total_loss', value = val_total_loss) if best_val_loss is None or val_total_loss < best_val_loss: best_val_loss = val_total_loss best_model_weight = copy.deepcopy(model.state_dict()) comm.synchronize() # สร้าง checkpointer เพิ่มให้ save best model โดยดูจาก val loss if iteration - start_iter > 5 and (iteration % 20 == 0 or iteration == max_iter): for writer in writers: writer.write() model.load_state_dict(best_model_weight) experiment_name = os.getenv('MLFLOW_EXPERIMENT_NAME') checkpointer.save(f'model_{experiment_name}') return model
def do_train(cfg, model, resume=False): model.train() optimizer = build_optimizer(cfg, model) scheduler = build_lr_scheduler(cfg, optimizer) checkpointer = DetectionCheckpointer(model, cfg.OUTPUT_DIR, optimizer=optimizer, scheduler=scheduler) start_iter = (checkpointer.resume_or_load( cfg.MODEL.WEIGHTS, resume=resume).get('iteration', -1) + 1) max_iter = cfg.SOLVER.MAX_ITER periodic_checkpointer = PeriodicCheckpointer(checkpointer, cfg.SOLVER.CHECKPOINT_PERIOD, max_iter=max_iter) writers = ([ CommonMetricPrinter(max_iter), JSONWriter(os.path.join(cfg.OUTPUT_DIR, 'metric.json')), TensorboardXWriter(cfg.OUTPUT_DIR), ] if comm.is_main_process() else []) data_loader = build_detection_train_loader(cfg) logger.info(" Starting training from iteration {}".format(start_iter)) with EventStorage(start_iter) as storage: for data, iteration in zip(data_loader, range(start_iter, max_iter)): iteration = iteration + 1 storage.step() loss_dict = model(data) losses = sum(loss for loss in loss_dict.values()) assert torch.isfinite(losses).all(), loss_dict loss_dict_reduced = { k: v.item() for k, v in comm.reduce_dict(loss_dict).items() } losses_reduced = sum(loss for loss in loss_dict_reduced.values()) if comm.is_main_process(): storage.put_scalars(total_loss=losses_reduced, **loss_dict_reduced) optimizer.zero_grad() losses.backward() optimizer.step() storage.put_scalar("lr", optimizer.param_groups[0]["lr"], smoothing_hint=False) scheduler.step() if (cfg.TEST.EVAL_PERIOD > 0 and iteration % cfg.TEST_EVAL_PERIOC == 0 and iteration != max_iter): do_test(cfg, model) comm.synchronize() if iteration - start_iter > 5 and (iteration % 20 == 0 or iteration == max_iter): for writer in writers: writer.write() periodic_checkpointer.step(iteration)
def do_train(cfg, model, resume=False): """ # TODO: Write docstring """ # Set the model to train model.train() # Create torch optimiser & schedulars optimizer = build_optimizer(cfg, model) scheduler = build_lr_scheduler(cfg, optimizer) # Create a torch checkpointer checkpointer = DetectionCheckpointer( model, cfg.OUTPUT_DIR, optimizer=optimizer, scheduler=scheduler ) # Create starting checkpoint i.e. pre-trained model using weights from config start_iter = ( checkpointer.resume_or_load(cfg.MODEL.WEIGHTS, resume=resume).get("iteration", -1) + 1 ) # Define the number of iterations max_iter = cfg.SOLVER.MAX_ITER # Create a periodic checkpointer at the configured period periodic_checkpointer = PeriodicCheckpointer( checkpointer, cfg.SOLVER.CHECKPOINT_PERIOD, max_iter=max_iter ) # Export checkpoint data to terminal, JSON & tensorboard files writers = ( [ CommonMetricPrinter(max_iter), JSONWriter(os.path.join(cfg.OUTPUT_DIR, "metrics.json")), TensorboardXWriter(cfg.OUTPUT_DIR), ] if comm.is_main_process() else [] ) # Create a data loader to supply the model with training data data_loader = build_detection_train_loader(cfg) logger.info("Starting training from iteration {}".format(start_iter)) with EventStorage(start_iter) as storage: for data, iteration in zip(data_loader, range(start_iter, max_iter)): iteration = iteration + 1 storage.step() loss_dict = model(data) losses = sum(loss_dict.values()) assert torch.isfinite(losses).all(), loss_dict loss_dict_reduced = {k: v.item() for k, v in comm.reduce_dict(loss_dict).items()} losses_reduced = sum(loss for loss in loss_dict_reduced.values()) if comm.is_main_process(): storage.put_scalars(total_loss=losses_reduced, **loss_dict_reduced) optimizer.zero_grad() losses.backward() optimizer.step() storage.put_scalar("lr", optimizer.param_groups[0]["lr"], smoothing_hint=False) scheduler.step() # If eval period has been set, run test at defined interval if ( cfg.TEST.EVAL_PERIOD > 0 and iteration % cfg.TEST.EVAL_PERIOD == 0 and iteration != max_iter ): do_test(cfg, model) comm.synchronize() if iteration - start_iter > 5 and (iteration % 20 == 0 or iteration == max_iter): logger.debug('Logging iteration and loss to Weights & Biases') wandb.log({"iteration": iteration}) wandb.log({"total_loss": losses_reduced}) wandb.log(loss_dict_reduced) for writer in writers: writer.write() periodic_checkpointer.step(iteration)
parser = add_parser_arguments(parser) args = parser.parse_args() torch.manual_seed(args.seed) torch.cuda.set_device(hvd.local_rank()) torch.cuda.manual_seed(args.seed) cudnn.benchmark = True #device = torch.device("cuda") torch.set_num_threads(2) print("Command Line Args:", args) cfg = setup(args) model = build_model(cfg) data_loader = build_detection_train_loader(cfg) # data for memory partition #partition_inputs, partition_targets = None, None for inputs in data_loader: partition_inputs = inputs break optimizer = build_optimizer(cfg, model) # Horovod: broadcast parameters & optimizer state. hvd.broadcast_parameters(model.state_dict(), root_rank=0) hvd.broadcast_optimizer_state(optimizer, root_rank=0) optimizer, grc = wrap_compress_optimizer(model, optimizer, args) main(cfg, args)
def do_train(cfg, model, resume=False): model.train() optimizer = build_optimizer(cfg, model) scheduler = build_lr_scheduler(cfg, optimizer) # checkpointer = DetectionCheckpointer( # model, cfg.OUTPUT_DIR, # optimizer=optimizer, # scheduler=scheduler # ) #do not load checkpointer's optimizer and scheduler checkpointer = DetectionCheckpointer(model, cfg.OUTPUT_DIR) start_iter = (checkpointer.resume_or_load( cfg.MODEL.WEIGHTS, resume=resume).get("iteration", -1) + 1) #model.load_state_dict(optimizer) max_iter = cfg.SOLVER.MAX_ITER writers = ([ CommonMetricPrinter(max_iter), JSONWriter(os.path.join(cfg.OUTPUT_DIR, "metrics.json")), TensorboardXWriter(cfg.OUTPUT_DIR), ] if comm.is_main_process() else []) # compared to "train_net.py", we do not support accurate timing and # precise BN here, because they are not trivial to implement train_data_loader = build_detection_train_loader( cfg, mapper=PathwayDatasetMapper(cfg, True)) # epoch_data_loader = build_detection_test_loader(cfg=cfg, dataset_name= cfg.DATASETS.TRAIN[0], # mapper=PathwayDatasetMapper(cfg, True)) val_data_loader = build_detection_validation_loader( cfg=cfg, dataset_name=cfg.DATASETS.TEST[0], mapper=PathwayDatasetMapper(cfg, False)) if cfg.DATALOADER.ASPECT_RATIO_GROUPING: epoch_num = (train_data_loader.dataset.sampler._size // cfg.SOLVER.IMS_PER_BATCH) + 1 else: epoch_num = train_data_loader.dataset.sampler._size // cfg.SOLVER.IMS_PER_BATCH # periodic_checkpointer = PeriodicCheckpointer( # checkpointer, # #cfg.SOLVER.CHECKPOINT_PERIOD, # epoch_num, # max_iter=max_iter # ) logger.info("Starting training from iteration {}".format(start_iter)) loss_weights = {'loss_cls': 1, 'loss_box_reg': 1} with EventStorage(start_iter) as storage: loss_per_epoch = 0.0 best_loss = 99999.0 best_val_loss = 99999.0 better_train = False better_val = False for data, iteration in zip(train_data_loader, range(start_iter, max_iter)): iteration = iteration + 1 storage.step() loss_dict = model(data) losses = sum(loss for loss in loss_dict.values()) assert torch.isfinite(losses).all(), loss_dict loss_dict_reduced = { k: v.item() * loss_weights[k] for k, v in comm.reduce_dict(loss_dict).items() } losses_reduced = sum(loss for loss in loss_dict_reduced.values()) if comm.is_main_process(): storage.put_scalars(total_loss=losses_reduced, **loss_dict_reduced) optimizer.zero_grad() losses.backward() #prevent gredient explosion torch.nn.utils.clip_grad_norm_(model.parameters(), 1) optimizer.step() #if comm.is_main_process(): storage.put_scalar("lr", optimizer.param_groups[0]["lr"], smoothing_hint=False) scheduler.step() # if ( # # cfg.TEST.EVAL_PERIOD > 0 # # and # iteration % epoch_num == 0 # #iteration % cfg.TEST.EVAL_PERIOD == 0 # and iteration != max_iter # ): # do_test(cfg, model) # # Compared to "train_net.py", the test results are not dumped to EventStorage # comm.synchronize() loss_per_epoch += losses_reduced if iteration % epoch_num == 0 or iteration == max_iter: #one complete epoch epoch_loss = loss_per_epoch / epoch_num #do validation #epoch_loss, epoch_cls_loss, epoch_box_reg_loss = do_validation(epoch_data_loader, model, loss_weights) #val_loss, val_cls_loss, val_box_reg_loss = do_validation(val_data_loader, model, loss_weights) checkpointer.save("model_{:07d}".format(iteration), **{"iteration": iteration}) # calculate epoch_loss and push to history cache #if comm.is_main_process(): storage.put_scalar("epoch_loss", epoch_loss, smoothing_hint=False) # storage.put_scalar("epoch_cls_loss", epoch_cls_loss, smoothing_hint=False) # storage.put_scalar("epoch_box_reg_loss", epoch_box_reg_loss, smoothing_hint=False) # storage.put_scalar("val_loss", val_loss, smoothing_hint=False) # storage.put_scalar("val_cls_loss", val_cls_loss, smoothing_hint=False) # storage.put_scalar("val_box_reg_loss", val_box_reg_loss, smoothing_hint=False) for writer in writers: writer.write() # only save improved checkpoints on epoch_loss # if best_loss > epoch_loss: # best_loss = epoch_loss # better_train = True # if best_val_loss > val_loss: # best_val_loss = val_loss # better_val = True #if better_val: #checkpointer.save("model_{:07d}".format(iteration), **{"iteration": iteration}) #comm.synchronize() #reset loss_per_epoch loss_per_epoch = 0.0 # better_train = False # better_val = False del loss_dict, losses, losses_reduced, loss_dict_reduced torch.cuda.empty_cache()
def start_train(al_cfg, cfg, model, resume=False): early_stopping = EarlyStopping(patience=al_cfg.EARLY_STOP.PATIENCE, delta=al_cfg.EARLY_STOP.DELTA, verbose=True) model.train() optimizer = build_optimizer(cfg, model) scheduler = build_lr_scheduler(cfg, optimizer) checkpointer = DetectionCheckpointer(model, cfg.OUTPUT_DIR, optimizer=optimizer, scheduler=scheduler) start_iter = (checkpointer.resume_or_load( cfg.MODEL.WEIGHTS, resume=resume).get("iteration", -1) + 1) max_iter = cfg.SOLVER.MAX_ITER periodic_checkpointer = PeriodicCheckpointer(checkpointer, cfg.SOLVER.CHECKPOINT_PERIOD, max_iter=max_iter) writers = ([ CommonMetricPrinter(max_iter), JSONWriter(os.path.join(cfg.OUTPUT_DIR, "metrics.json")), TensorboardXWriter(cfg.OUTPUT_DIR), ] if comm.is_main_process() else []) # compared to "train_net.py", we do not support accurate timing and # precise BN here, because they are not trivial to implement data_loader = build_detection_train_loader(cfg) logger.info("Starting training from iteration {}".format(start_iter)) with EventStorage(start_iter) as storage: for data, iteration in zip(data_loader, range(start_iter, max_iter)): iteration = iteration + 1 storage.step() loss_dict = model(data) losses = sum(loss for loss in loss_dict.values()) assert torch.isfinite(losses).all(), loss_dict loss_dict_reduced = { k: v.item() for k, v in comm.reduce_dict(loss_dict).items() } losses_reduced = sum(loss for loss in loss_dict_reduced.values()) if comm.is_main_process(): storage.put_scalars(total_loss=losses_reduced, **loss_dict_reduced) optimizer.zero_grad() losses.backward() optimizer.step() storage.put_scalar("lr", optimizer.param_groups[0]["lr"], smoothing_hint=False) scheduler.step() if (cfg.TEST.EVAL_PERIOD > 0 and iteration % cfg.TEST.EVAL_PERIOD == 0 and iteration != max_iter): results = do_test(cfg, model) bbox_results = results['bbox'] AP = bbox_results['AP'] comm.synchronize() print('AP:', AP, '\tValue:', 1 - (AP / 100)) early_stopping(1 - (AP / 100)) storage.put_scalars(**bbox_results) if early_stopping.counter < 1: checkpointer.save('model_final') if iteration - start_iter > 5 and (iteration % 20 == 0 or iteration == max_iter): for writer in writers: writer.write() periodic_checkpointer.step(iteration) if early_stopping.early_stop: print("EARLY STOPPING INITIATED AT ITERATION:", iteration) # checkpointer.save('model_final') break
def do_train(cfg, model, resume=False): # Set model to training mode model.train() # Create optimizer from config file (returns torch.nn.optimizer.Optimizer) optimizer = build_optimizer(cfg, model) # Create scheduler for learning rate (returns torch.optim.lr._LR_scheduler) scheduler = build_lr_scheduler(cfg, optimizer) print(f"Scheduler: {scheduler}") # Create checkpointer checkpointer = DetectionCheckpointer(model, save_dir=cfg.OUTPUT_DIR, optimizer=optimizer, scheduler=scheduler) # Create start iteration (refernces checkpointer) - https://detectron2.readthedocs.io/modules/checkpoint.html#detectron2.checkpoint.Checkpointer.resume_or_load start_iter = ( # This can be 0 checkpointer.resume_or_load( cfg.MODEL. WEIGHTS, # Use predefined model weights (pretrained model) resume=resume).get("iteration", -1) + 1) # Set max number of iterations max_iter = cfg.SOLVER.MAX_ITER # Create periodiccheckpoint periodic_checkpointer = PeriodicCheckpointer( checkpointer=checkpointer, # How often to make checkpoints? period=cfg.SOLVER.CHECKPOINT_PERIOD, max_iter=max_iter) # Create writers (for saving checkpoints?) writers = ([ # Print out common metrics such as iteration time, ETA, memory, all losses, learning rate CommonMetricPrinter(max_iter=max_iter), # Write scalars to a JSON file such as loss values, time and more JSONWriter(os.path.join(cfg.OUTPUT_DIR, "metrics.json")), # Write all scalars such as loss values to a TensorBoard file for easy visualization TensorboardXWriter(cfg.OUTPUT_DIR), ] if comm.is_main_process() else []) ### Original note from script: ### # compared to "train_net.py", we do not support accurate timing and precise BN # here, because they are not trivial to implement # Build a training data loader based off the training dataset name in the config data_loader = build_detection_train_loader(cfg) # Start logging logger.info("Starting training from iteration {}".format(start_iter)) # Store events with EventStorage(start_iter) as storage: # Loop through zipped data loader and iteration for data, iteration in zip(data_loader, range(start_iter, max_iter)): iteration = iteration + 1 storage.step( ) # update stroage with step - https://detectron2.readthedocs.io/modules/utils.html#detectron2.utils.events.EventStorage.step # Create loss dictionary by trying to model data loss_dict = model(data) losses = sum(loss_dict.values()) # Are losses infinite? If so, something is wrong assert torch.isfinite(losses).all(), loss_dict # TODO - Not quite sure what's happening here loss_dict_reduced = { k: v.item() for k, v in comm.reduce_dict(loss_dict).items() } # Sum up losses losses_reduced = sum(loss for loss in loss_dict_reduced.values()) # # TODO: wandb.log()? log the losses # wandb.log({ # "Total loss": losses_reduced # }) # Update storage if comm.is_main_process(): # Store informate in storage - https://detectron2.readthedocs.io/modules/utils.html#detectron2.utils.events.EventStorage.put_scalars storage.put_scalars(total_loss=losses_reduced, **loss_dict_reduced) # Start doing PyTorch things optimizer.zero_grad() losses.backward() optimizer.step() # Add learning rate to storage information storage.put_scalar("lr", optimizer.param_groups[0]["lr"], smoothing_hint=False) # This is required for your learning rate to change!!!! (not having this meant my learning rate was staying at 0) scheduler.step() # Perform evaluation? if (cfg.TEST.EVAL_PERIOD > 0 and iteration % cfg.TEST.EVAL_PERIOD == 0 and iteration != max_iter): do_test(cfg, model) # TODO - compared to "train_net.py", the test results are not dumped to EventStorage comm.synchronize() # Log different metrics with writers if iteration - start_iter > 5 and (iteration % 20 == 0 or iteration == max_iter): for writer in writers: writer.write() # Update the periodic_checkpointer periodic_checkpointer.step(iteration)
def do_train(cfg, model, resume=False): model.train() optimizer = build_optimizer(cfg, model) scheduler = build_lr_scheduler(cfg, optimizer) checkpointer = DetectionCheckpointer(model, cfg.OUTPUT_DIR, optimizer=optimizer, scheduler=scheduler) checkpointer_spot = DetectionCheckpointer(model, '/opt/ml/checkpoints', optimizer=optimizer, scheduler=scheduler) start_iter = (checkpointer.resume_or_load( cfg.MODEL.WEIGHTS, resume=resume).get("iteration", -1) + 1) max_iter = cfg.SOLVER.MAX_ITER periodic_checkpointer = PeriodicCheckpointer(checkpointer, cfg.SOLVER.CHECKPOINT_PERIOD, max_iter=max_iter) periodic_checkpointer_spot = PeriodicCheckpointer( checkpointer_spot, cfg.SOLVER.CHECKPOINT_PERIOD, max_iter=max_iter) writers = ([ CommonMetricPrinter(max_iter), JSONWriter(os.path.join(cfg.OUTPUT_DIR, "metrics.json")), TensorboardXWriter(cfg.OUTPUT_DIR), ] if comm.is_main_process() else []) # compared to "train_net.py", we do not support accurate timing and # precise BN here, because they are not trivial to implement # data_loader = build_detection_train_loader(cfg) data_loader = build_detection_train_loader( cfg, # mapper=DatasetMapper(cfg, is_train=True # , augmentations=[ # T.Resize((1024, 1024)), # T.RandomBrightness(.75,1.25), # T.RandomFlip(), # T.RandomSaturation(.75,1.25) # ] ) # ) logger.info("Starting training from iteration {}".format(start_iter)) with EventStorage(start_iter) as storage: for data, iteration in zip(data_loader, range(start_iter, max_iter)): iteration = iteration + 1 storage.step() loss_dict = model(data) losses = sum(loss_dict.values()) assert torch.isfinite(losses).all(), loss_dict loss_dict_reduced = { k: v.item() for k, v in comm.reduce_dict(loss_dict).items() } losses_reduced = sum(loss for loss in loss_dict_reduced.values()) if comm.is_main_process(): storage.put_scalars(total_loss=losses_reduced, **loss_dict_reduced) optimizer.zero_grad() losses.backward() optimizer.step() storage.put_scalar("lr", optimizer.param_groups[0]["lr"], smoothing_hint=False) scheduler.step() if (cfg.TEST.EVAL_PERIOD > 0 and iteration % cfg.TEST.EVAL_PERIOD == 0 and iteration != max_iter): do_test(cfg, model) # Compared to "train_net.py", the test results are not dumped to EventStorage comm.synchronize() if iteration % 500 == 0: try: torch.save(model.state_dict(), f'{cfg.OUTPUT_DIR}/model_{iteration}.pth') except: print('save failed') if iteration - start_iter > 5 and (iteration % 20 == 0 or iteration == max_iter): for writer in writers: writer.write() periodic_checkpointer.step(iteration) periodic_checkpointer_spot.step(iteration)
def do_train(cfg, model, resume=False, val_set='firevysor_val'): model.train() optimizer = build_optimizer(cfg, model) scheduler = build_lr_scheduler(cfg, optimizer) # scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, min_lr=1e-6) # scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=100, gamma=0.1, last_epoch=-1) metric = 0 print_every = 50 tensorboard_dir = osp.join(cfg.OUTPUT_DIR, 'tensorboard') checkpoint_dir = osp.join(cfg.OUTPUT_DIR, 'checkpoints') create_dir(tensorboard_dir) create_dir(checkpoint_dir) checkpointer = AdetCheckpointer(model, checkpoint_dir, optimizer=optimizer, scheduler=scheduler) start_iter = (checkpointer.resume_or_load( cfg.MODEL.WEIGHTS, resume=resume).get("iteration", -1) + 1) max_iter = cfg.SOLVER.MAX_ITER periodic_checkpointer = PeriodicCheckpointer(checkpointer, cfg.SOLVER.CHECKPOINT_PERIOD, max_iter=max_iter) writers = ([ CommonMetricPrinter(max_iter), # JSONWriter(os.path.join(cfg.OUTPUT_DIR, "metrics.json")), TensorboardXWriter(tensorboard_dir), ] if comm.is_main_process() else []) data_loader = build_detection_train_loader(cfg) val_dataloader = build_detection_val_loader(cfg, val_set) logger.info("Starting training from iteration {}".format(start_iter)) # [PHAT]: Create a log file log_file = open(cfg.MY_CUSTOM.LOG_FILE, 'w') best_loss = 1e6 count_not_improve = 0 train_size = 2177 epoch_size = int(train_size / cfg.SOLVER.IMS_PER_BATCH) n_early_epoch = 10 with EventStorage(start_iter) as storage: for data, iteration in zip(data_loader, range(start_iter, max_iter)): iteration = iteration + 1 storage.step() loss_dict = model(data) losses = sum(loss for loss in loss_dict.values()) assert torch.isfinite(losses).all(), loss_dict # Update loss dict loss_dict_reduced = { k: v.item() for k, v in comm.reduce_dict(loss_dict).items() } losses_reduced = sum(loss for loss in loss_dict_reduced.values()) if comm.is_main_process(): storage.put_scalars(total_loss=losses_reduced, **loss_dict_reduced) # Early stopping if (iteration > start_iter) and ((iteration - start_iter) % epoch_size == 0): val_loss = do_val(cfg, model, val_dataloader) if val_loss >= best_loss: count_not_improve += 1 # stop if models doesn't improve after <n_early_epoch> epoch if count_not_improve == epoch_size * n_early_epoch: break else: count_not_improve = 0 best_loss = val_loss periodic_checkpointer.save("best_model_early") # print(f"epoch {iteration//epoch_size}, val_loss: {val_loss}") log_file.write( f"Epoch {(iteration-start_iter)//epoch_size}, val_loss: {val_loss}\n" ) comm.synchronize() optimizer.zero_grad() losses.backward() optimizer.step() lr = optimizer.param_groups[0]["lr"] storage.put_scalar("lr", lr, smoothing_hint=False) scheduler.step() if iteration - start_iter > 5 and ( (iteration - start_iter) % print_every == 0 or iteration == max_iter): for writer in writers: writer.write() # Write my log log_file.write( f"[iter {iteration}, best_loss: {best_loss}] total_loss: {losses}, lr: {lr}\n" ) periodic_checkpointer.step(iteration) log_file.close()