def main(args): cfg = setup(args) if args.eval_only: model = Trainer.build_model(cfg) DetectionCheckpointer(model, save_dir=cfg.OUTPUT_DIR).resume_or_load( cfg.MODEL.WEIGHTS, resume=args.resume) res = Trainer.test(cfg, model) if comm.is_main_process(): verify_results(cfg, res) return res trainer = Trainer(cfg) trainer.resume_or_load(resume=args.resume) return trainer.train()
def main(args): cfg = setup(args) register_hair( ) # this is some customized logic to register our hair datasets if args.eval_only: model = Trainer.build_model(cfg) DetectionCheckpointer(model, save_dir=cfg.OUTPUT_DIR).resume_or_load( cfg.MODEL.WEIGHTS, resume=args.resume) res = Trainer.test(cfg, model) if comm.is_main_process(): verify_results(cfg, res) return res trainer = Trainer(cfg) trainer.resume_or_load(resume=args.resume) return trainer.train()
def main(args): register_coco_instances( 'asparagus_train_rotated', { '_background_': 0, 'clump': 1, 'stalk': 2, 'spear': 3, 'bar': 4 }, "./datasets/coco/annotations/train_rotated_637/annotations.json", "./datasets/coco/annotations/train_rotated_637") register_coco_instances( 'asparagus_val_rotated', { '_background_': 0, 'clump': 1, 'stalk': 2, 'spear': 3, 'bar': 4 }, "./datasets/coco/annotations/val_rotated_637/annotations.json", "./datasets/coco/annotations/val_rotated_637") #register_coco_instances('asparagus_bme_train', {'_background_': 0, 'clump': 1, 'stalk': 2, 'spear': 3} , "./datasets/coco/annotations/BME_train/annotations.json", "./datasets/coco/annotations/BME_train") #register_coco_instances('asparagus_bme_val', {'_background_': 0, 'clump': 1, 'stalk': 2, 'spear': 3} , "./datasets/coco/annotations/BME_test/annotations.json", "./datasets/coco/annotations/BME_test") cfg = setup(args) if args.eval_only: model = Trainer.build_model(cfg) DetectionCheckpointer(model, save_dir=cfg.OUTPUT_DIR).resume_or_load( cfg.MODEL.WEIGHTS, resume=args.resume) res = Trainer.test(cfg, model) if cfg.TEST.AUG.ENABLED: res.update(Trainer.test_with_TTA(cfg, model)) if comm.is_main_process(): verify_results(cfg, res) return res """ If you'd like to do anything fancier than the standard training logic, consider writing your own training loop (see plain_train_net.py) or subclassing the trainer. """ trainer = Trainer(cfg) trainer.resume_or_load(resume=args.resume) if cfg.TEST.AUG.ENABLED: trainer.register_hooks([ hooks.EvalHook(0, lambda: trainer.test_with_TTA(cfg, trainer.model)) ]) return trainer.train()
def main(args): cfg = setup(args) if args.eval_only: model = Trainer.build_model(cfg) AdetCheckpointer(model, save_dir=cfg.OUTPUT_DIR).resume_or_load( cfg.MODEL.WEIGHTS, resume=args.resume) res = Trainer.test(cfg, model) # d2 defaults.py if comm.is_main_process(): verify_results(cfg, res) if cfg.TEST.AUG.ENABLED: res.update(Trainer.test_with_TTA(cfg, model)) return res """ If you'd like to do anything fancier than the standard training logic, consider writing your own training loop or subclassing the trainer. """ trainer = Trainer(cfg)
def main(args): cfg = setup(args) if args.eval_only: model = Trainer.build_model(cfg) DetectionCheckpointer(model, save_dir=cfg.OUTPUT_DIR).resume_or_load( cfg.MODEL.WEIGHTS, resume=args.resume) res = Trainer.test(cfg, model) if comm.is_main_process(): verify_results(cfg, res) return res """ If you'd like to do anything fancier than the standard training logic, consider writing your own training loop or subclassing the trainer. """ trainer = Trainer(cfg) trainer.resume_or_load(resume=args.resume) return trainer.train()
def perform_eval(cfg, trainer): """ Uses class methods of the trainer to build, load and evaluate a trained model. :param cfg: Namespace containing all OPMask configs. :param trainer: Trainer used for training the model. :return: Evaluation results. """ model = trainer.build_model(cfg) DetectionCheckpointer(model, save_dir=os.path.join(cfg.OUTPUT_DIR, "models")).resume_or_load( cfg.MODEL.WEIGHTS, resume=True) res = trainer.test(cfg, model) if comm.is_main_process(): verify_results(cfg, res) return res
def main(args): # if args.unitest: # return unitest() cfg = setup(args) for d in ["train", 'val']: # train for 6998images , val for 1199 images DatasetCatalog.register("chefCap_" + d, lambda d=d: get_chefcap_image_dicts()) MetadataCatalog.get("chefCap_" + d).set( thing_classes=list(things_class_dict.keys())) if d == 'val': MetadataCatalog.get("chefCap_val").evaluator_type = "pascal_voc" MetadataCatalog.get("chefCap_val").year = 2012 MetadataCatalog.get( "chefCap_val" ).dirname = "/opt/work/chefCap/detectron2_fasterrcnn/data" # for d in ["/opt/work/chefCap/data/ziped/Making-PascalVOC-export/"]: # DatasetCatalog.register("chefCap_val", # lambda d=d: get_chefcap_image_dicts(d)) # MetadataCatalog.get("chefCap_val").set( # thing_classes=['face-head', 'mask-head', 'face-cap', 'mask-cap']) # MetadataCatalog.get("chefCap_val").evaluator_type = "pascal_voc" # MetadataCatalog.get("chefCap_val").dirname = "/opt/work/chefCap/data/ziped/Making-PascalVOC-export/" # MetadataCatalog.get("chefCap_val").year = 2012 if args.eval_only: model = DefaultTrainer.build_model(cfg) DetectionCheckpointer(model, save_dir=cfg.OUTPUT_DIR).resume_or_load( cfg.MODEL.WEIGHTS, resume=args.resume) res = DefaultTrainer.test(cfg, model) if cfg.TEST.AUG.ENABLED: res.update(DefaultTrainer.test_with_TTA(cfg, model)) if comm.is_main_process(): verify_results(cfg, res) return res trainer = DefaultTrainer(cfg) trainer.resume_or_load(resume=args.resume) # if cfg.TEST.AUG.ENABLED: # trainer.register_hooks( # [hooks.EvalHook(0, lambda: trainer.test_with_TTA(cfg, trainer.model))] # ) return trainer.train()
def main(args): cfg = setup(args) print(cfg) # 注册数据集 register_dataset() checkout_dataset_annotation(name="iecas_THz_2019_train") if args.eval_only: model = Trainer.build_model(cfg) DetectionCheckpointer(model, save_dir=cfg.OUTPUT_DIR).resume_or_load( cfg.MODEL.WEIGHTS, resume=args.resume) res = Trainer.test(cfg, model) if comm.is_main_process(): verify_results(cfg, res) return res trainer = Trainer(cfg) trainer.resume_or_load(resume=args.resume) return trainer.train()
def main(args): from detectron2.data.datasets import register_coco_instances folder_data = "/root/detectron2/MADS_data_train_test/60_40_tonghop" # train_data name = "mads_train" json_file = os.path.join(folder_data, "train.json") image_root = os.path.join(folder_data, "train", "images") # test data name_val = "mads_val" json_file_val = os.path.join(folder_data, "val.json") image_root_val = os.path.join(folder_data, "val", "images") # registr register_coco_instances(name, {}, json_file, image_root) register_coco_instances(name_val, {}, json_file_val, image_root_val) cfg = setup(args) if args.eval_only: model = Trainer.build_model(cfg) DetectionCheckpointer(model, save_dir=cfg.OUTPUT_DIR).resume_or_load( cfg.MODEL.WEIGHTS, resume=args.resume) res = Trainer.test(cfg, model) if cfg.TEST.AUG.ENABLED: res.update(Trainer.test_with_TTA(cfg, model)) if comm.is_main_process(): verify_results(cfg, res) return res """ If you'd like to do anything fancier than the standard training logic, consider writing your own training loop or subclassing the trainer. """ trainer = Trainer(cfg) trainer.resume_or_load(resume=args.resume) if cfg.TEST.AUG.ENABLED: trainer.register_hooks([ hooks.EvalHook(0, lambda: trainer.test_with_TTA(cfg, trainer.model)) ]) return trainer.train()
def main(args): cfg = setup(args) if args.adaptive_pool: assert args.eval_only, "AdaptivePool is supported only in test mode" import detectron2.modeling.poolers detectron2.modeling.poolers.RoIPool = AdaptivePool detectron2.modeling.poolers.ROIAlign = AdaptivePool if args.qat: quantize_decorate() quantize_prepare() trainer = Trainer(cfg) trainer.resume_or_load(resume=args.resume) trainer.model = Trainer.update_model(trainer.model, args.qbackend) trainer.checkpointer.model = trainer.model return trainer.train() elif args.eval_only or args.quant_eval: if args.quant_eval: quantize_decorate() quantize_prepare() model = Trainer.build_model(cfg) model = Trainer.update_model(model, args.qbackend) model.eval() from fvcore.common.checkpoint import _strip_prefix_if_present weights = torch.load(cfg.MODEL.WEIGHTS)['model'] _strip_prefix_if_present(weights, "module.") model.load_state_dict(weights) torch.quantization.convert(model, inplace=True) else: model = Trainer.build_model(cfg) DetectionCheckpointer(model, save_dir=cfg.OUTPUT_DIR).load( cfg.MODEL.WEIGHTS ) res = Trainer.test(cfg, model) if comm.is_main_process(): verify_results(cfg, res) return res trainer = Trainer(cfg) trainer.resume_or_load(resume=args.resume) return trainer.train()
def main(args): cfg = setup(args) if args.eval_only: model = Trainer.build_model(cfg) img = Image.open("datasets/coco/train2017/000000001146.jpg") img.load() d = np.asarray(img, dtype="float32") model = torch.jit.trace(model, d) DetectionCheckpointer(model, save_dir=cfg.OUTPUT_DIR).resume_or_load( cfg.MODEL.WEIGHTS, resume=args.resume) res = Trainer.test(cfg, model) if comm.is_main_process(): verify_results(cfg, res) return res trainer = Trainer(cfg) trainer.resume_or_load(resume=args.resume) return trainer.train()
def main(args): cfg = setup(args) # Regist own dataset. from detectron2.data.datasets import register_coco_instances # train data name = "stefan_train" json_file = "/data_2/jongwon/datasetv3/annotations/train.json" image_root = "/data_2/jongwon/datasetv3/rgb" # test data name_val = "stefan_val" json_file_val = "/data_2/jongwon/datasetv3/annotations/val.json" image_root_val = "/data_2/jongwon/datasetv3/rgb" # registr register_coco_instances(name, {}, json_file, image_root) register_coco_instances(name_val, {}, json_file_val, image_root_val) if args.eval_only: model = Trainer.build_model(cfg) AdetCheckpointer(model, save_dir=cfg.OUTPUT_DIR).resume_or_load( cfg.MODEL.WEIGHTS, resume=args.resume) evaluators = [ Trainer.build_evaluator(cfg, name) for name in cfg.DATASETS.TEST ] res = Trainer.test(cfg, model, evaluators) if comm.is_main_process(): verify_results(cfg, res) if cfg.TEST.AUG.ENABLED: res.update(Trainer.test_with_TTA(cfg, model)) return res """ If you'd like to do anything fancier than the standard training logic, consider writing your own training loop or subclassing the trainer. """ trainer = Trainer(cfg) trainer.resume_or_load(resume=args.resume) if cfg.TEST.AUG.ENABLED: trainer.register_hooks([ hooks.EvalHook(0, lambda: trainer.test_with_TTA(cfg, trainer.model)) ]) return trainer.train()
def train(self): """ Run training. Returns: OrderedDict of results, if evaluation is enabled. Otherwise None. """ logger = logging.getLogger("detectron2") trainable_params = sum([p.numel() for p in self.model.parameters() if p.requires_grad]) total_params = sum([p.numel() for p in self.model.parameters()]) logger.info('Beginning training of model with \n Trainable parameters {} and Total parameters {}'.format( trainable_params, total_params)) super().train(self.start_iter, self.max_iter) if len(self.cfg.TEST.EXPECTED_RESULTS) and comm.is_main_process(): assert hasattr( self, "_last_eval_results" ), "No evaluation results obtained during training!" verify_results(self.cfg, self._last_eval_results) return self._last_eval_results
def main(args): # Setup config node cfg = setup_config(args, random_seed=args.random_seed) # Eval only mode to produce mAP results if args.eval_only: model = Trainer.build_model(cfg) DetectionCheckpointer(model, save_dir=cfg.OUTPUT_DIR).resume_or_load( cfg.MODEL.WEIGHTS, resume=args.resume ) res = Trainer.test(cfg, model) if comm.is_main_process(): verify_results(cfg, res) return res # Build Trainer from config node. Begin Training. trainer = Trainer(cfg) trainer.resume_or_load(resume=args.resume) return trainer.train()
def main(args): cfg = setup(args) # import the relation_retinanet as meta_arch, so they will be registered from relation_retinanet import RelationRetinaNet if args.eval_only: model = Trainer.build_model(cfg) DetectionCheckpointer(model, save_dir=cfg.OUTPUT_DIR).resume_or_load( cfg.MODEL.WEIGHTS, resume=args.resume ) res = Trainer.test(cfg, model, PathwayEvaluator(cfg.DATASETS.TEST[0], cfg, True, False, cfg.OUTPUT_DIR)) if comm.is_main_process(): verify_results(cfg, res) return res trainer = Trainer(cfg) trainer.resume_or_load(resume=args.resume) return trainer.train()
def train(self): """ Args: start_iter, max_iter (int): See docs above """ logger = logging.getLogger(__name__) logger.info("Starting training from iteration {}".format(self.start_iter)) with EventStorage(self.start_iter) as self.storage: try: self.before_train() for self.iter in range(self.start_iter, self.max_iter): self.before_step() self.run_step() self.after_step() finally: self.after_train() if hasattr(self, "_last_eval_results") and comm.is_main_process(): verify_results(self.cfg, self._last_eval_results) return self._last_eval_results
def main(args): cfg = setup(args) show = True register_openlogo(cfg.DATASETS.TRAIN[0], "datasets/data/openlogo", "trainval", "supervised_imageset") register_openlogo(cfg.DATASETS.TEST[0], "datasets/data/openlogo", "test", "supervised_imageset") trainer = DefaultTrainer(cfg) evaluator = OpenLogoDetectionEvaluator(cfg.DATASETS.TEST[0]) if args.eval_only: model = trainer.build_model(cfg) DetectionCheckpointer(model, save_dir=cfg.OUTPUT_DIR).resume_or_load( cfg.MODEL.WEIGHTS, resume=args.resume) if show: visualize(cfg, amount=20) res = trainer.test(cfg, model, evaluators=[evaluator]) if comm.is_main_process(): verify_results(cfg, res) if cfg.TEST.AUG.ENABLED: res.update(trainer.test_with_TTA(cfg, model)) return res trainer = DefaultTrainer(cfg) trainer.resume_or_load(resume=args.resume) if cfg.TEST.AUG.ENABLED: trainer.register_hooks([ hooks.EvalHook(0, lambda: trainer.test_with_TTA(cfg, trainer.model)) ]) return trainer.train()
def main(args): for d in ['train','val']: DatasetCatalog.register('tiny_simdata/'+d, lambda d = d: get_tinysim('../tiny_simdata/'+d)) MetadataCatalog.get('tiny_simdata/'+d).set(thing_classes=['gun','lighter']) MetadataCatalog.get('tiny_simdata/'+d).set(json_file='../tiny_simdata/'+d+'/annotations.json') object_metadata=MetadataCatalog.get('tiny_simdata/train') cfg = setup(args) if args.eval_only: model = Trainer.build_model(cfg) DetectionCheckpointer(model, save_dir=cfg.OUTPUT_DIR).resume_or_load( cfg.MODEL.WEIGHTS, resume=args.resume ) res = Trainer.test(cfg, model) if comm.is_main_process(): verify_results(cfg, res) return res trainer = Trainer(cfg) trainer.resume_or_load(resume=args.resume) return trainer.train()
def main(args): DatasetCatalog.register(f"rsna_det_train", rsna_det_train_dataset) MetadataCatalog.get(f"rsna_det_train").set(thing_classes=["opacity"], evaluator_type="coco") DatasetCatalog.register(f"rsna_det_val", rsna_det_val_dataset) MetadataCatalog.get(f"rsna_det_val").set(thing_classes=["opacity"], evaluator_type="coco") DatasetCatalog.register(f"rsna_det_vinbdi_train", rsna_det_vinbdi_train_dataset) MetadataCatalog.get(f"rsna_det_vinbdi_train").set( thing_classes=["opacity"], evaluator_type="coco") DatasetCatalog.register(f"rsna_det_vinbdi_test", rsna_det_vinbdi_test_dataset) MetadataCatalog.get(f"rsna_det_vinbdi_test").set(thing_classes=["opacity"], evaluator_type="coco") cfg = setup(args) if args.eval_only: model = Trainer.build_model(cfg) DetectionCheckpointer(model, save_dir=cfg.OUTPUT_DIR).resume_or_load( cfg.MODEL.WEIGHTS, resume=args.resume) res = Trainer.test(cfg, model) if cfg.TEST.AUG.ENABLED: res.update(Trainer.test_with_TTA(cfg, model)) if comm.is_main_process(): verify_results(cfg, res) return res """ If you'd like to do anything fancier than the standard training logic, consider writing your own training loop or subclassing the trainer. """ trainer = Trainer(cfg) trainer.resume_or_load(resume=args.resume) if cfg.TEST.AUG.ENABLED: trainer.register_hooks([ hooks.EvalHook(0, lambda: trainer.test_with_TTA(cfg, trainer.model)) ]) return trainer.train()
def main(args): cfg = setup(args) # Load cfg as python dict config = load_yaml(args.config_file) # TODO: Visualize and log training examples and annotations # training_imgs = viz_data(cfg) # wandb.log({"training_examples": training_imgs}) # If evaluation if args.eval_only: model = Trainer.build_model(cfg) DetectionCheckpointer(model, save_dir=cfg.OUTPUT_DIR).resume_or_load( cfg.MODEL.WEIGHTS, resume=args.resume) res = Trainer.test(cfg, model) # FIXME: TTA if cfg.TEST.AUG.ENABLED: res.update(Trainer.test_with_TTA(cfg, model)) if comm.is_main_process(): verify_results(cfg, res) # If training else: trainer = Trainer(cfg) # Load model weights (if specified) trainer.resume_or_load(resume=args.resume) # FIXME: TTA if cfg.TEST.AUG.ENABLED: trainer.register_hooks([ hooks.EvalHook( 0, lambda: trainer.test_with_TTA(cfg, trainer.model)) ]) # Will evaluation be done at end of training? res = trainer.train() # TODO: Visualize and log predictions and groundtruth annotations return res
def main(args): # register the datasets # register_coco_instances('sim_multi_train', {},\ # '../../small_multi/train_annotations.json', \ # '../../../sim_data_center/multi_class_detection/down/JPEGImages/') # register_coco_instances('sim_multi_test', {},\ # '../../small_multi/test_annotations.json', \ # '../../../sim_data_center/multi_class_detection/down/JPEGImages/') register_coco_instances('sim_multi_train', {},\ './dataset/train_annotations.json', \ '../../../sim_data_center/multi_class_detection/down/JPEGImages/') register_coco_instances('sim_multi_test', {},\ './dataset/test_annotations.json', \ '../../../sim_data_center/multi_class_detection/down/JPEGImages/') # register_coco_instances('sim_multi_train', {},\ # './projects/TridentNet_5class/dataset/train_annotations.json', \ # '../sim_data_center/multi_class_detection/down/JPEGImages/') # register_coco_instances('sim_multi_test', {},\ # './projects/TridentNet_5class/dataset/test_annotations.json', \ # '../sim_data_center/multi_class_detection/down/JPEGImages/') cfg = setup(args) if args.eval_only: model = Trainer.build_model(cfg) cfg.defrost() cfg.MODEL.WEIGHTS = os.path.join(cfg.OUTPUT_DIR, "model_final.pth") # cfg.MODEL.RPN.IOU_THRESHOLDS = [0.05, 0.3] # cfg.MODEL.ROI_HEADS.IOU_THRESHOLDS = [0.2] cfg.freeze() DetectionCheckpointer(model, save_dir=cfg.OUTPUT_DIR).resume_or_load( cfg.MODEL.WEIGHTS, resume=args.resume) res = Trainer.test(cfg, model) if comm.is_main_process(): verify_results(cfg, res) return res trainer = Trainer(cfg) trainer.resume_or_load(resume=args.resume) return trainer.train()
def main(args): cfg = setup(args) if args.eval_only: model = Trainer.build_model(cfg) DetectionCheckpointer(model, save_dir=cfg.OUTPUT_DIR).resume_or_load( cfg.MODEL.WEIGHTS, resume=args.resume ) res = Trainer.test(cfg, model) if cfg.TEST.AUG.ENABLED: res.update(Trainer.test_with_TTA(cfg, model)) if comm.is_main_process(): verify_results(cfg, res) return res trainer = Trainer(cfg) trainer.resume_or_load(resume=args.resume) if cfg.TEST.AUG.ENABLED: trainer.register_hooks( [hooks.EvalHook(0, lambda: trainer.test_with_TTA(cfg, trainer.model))] ) return trainer.train()
def main(args): # Regist own dataset. from detectron2.data.datasets import register_coco_instances # train data name = "publaynet_train" json_file = "/home/techainer/linus/publaynet/train.json" image_root = "/home/techainer/linus/publaynet/train/" # test data name_val = "publaynet_val" json_file_val = "/home/techainer/linus/publaynet/val.json" image_root_val = "/home/techainer/linus/publaynet/val" # registr register_coco_instances(name, {}, json_file, image_root) register_coco_instances(name_val, {}, json_file_val, image_root_val) cfg = setup(args) if args.eval_only: model = Trainer.build_model(cfg) DetectionCheckpointer(model, save_dir=cfg.OUTPUT_DIR).resume_or_load( cfg.MODEL.WEIGHTS, resume=args.resume) res = Trainer.test(cfg, model) if cfg.TEST.AUG.ENABLED: res.update(Trainer.test_with_TTA(cfg, model)) if comm.is_main_process(): verify_results(cfg, res) return res """ If you'd like to do anything fancier than the standard training logic, consider writing your own training loop or subclassing the trainer. """ trainer = Trainer(cfg) trainer.resume_or_load(resume=args.resume) if cfg.TEST.AUG.ENABLED: trainer.register_hooks([ hooks.EvalHook(0, lambda: trainer.test_with_TTA(cfg, trainer.model)) ]) return trainer.train()
def main(args): # torch.backends.cudnn.enabled = True # torch.backends.cudnn.benchmark = True cfg = setup(args) print(cfg) # 注册数据集 register_dataset() if args.eval_only: model = Trainer.build_model(cfg) DetectionCheckpointer(model, save_dir=cfg.OUTPUT_DIR).resume_or_load( cfg.MODEL.WEIGHTS, resume=args.resume) res = Trainer.test(cfg, model) if comm.is_main_process(): verify_results(cfg, res) return res trainer = Trainer(cfg) trainer.resume_or_load(resume=args.resume) return trainer.train()
def main(args): cfg = setup(args) # print("cfg:",cfg) # 注册数据集 plain_register_dataset() # # 检测数据集注释是否正确 # checkout_dataset_annotation() # 如果只是进行评估 if args.eval_only: model = Trainer.build_model(cfg) DetectionCheckpointer(model, save_dir=cfg.OUTPUT_DIR).resume_or_load( cfg.MODEL.WEIGHTS, resume=args.resume) res = Trainer.test(cfg, model) if comm.is_main_process(): verify_results(cfg, res) return res trainer = Trainer(cfg) trainer.resume_or_load(resume=args.resume) return trainer.train()
def main(args): cfg = setup(args) json_dir = cfg.INPUT.DIR for d in ["train", "val"]: json_path = json_dir + f"plane_net_{d}_coco_format.json" with open(json_path, "r") as f: dataset_dicts = json.load(f) DatasetCatalog.register("plane_" + d, dataset_dicts) MetadataCatalog.get("plane_" + d).set(thing_classes=["plane"]) plane_metadata = MetadataCatalog.get("plane_train") if args.eval_only: model = Trainer.build_model(cfg) DetectionCheckpointer(model, save_dir=cfg.OUTPUT_DIR).resume_or_load( cfg.MODEL.WEIGHTS, resume=args.resume) res = Trainer.test(cfg, model) if comm.is_main_process(): verify_results(cfg, res) return res trainer = Trainer(cfg) trainer.resume_or_load(resume=args.resume) return trainer.train()
def main(args): _datasets_root = os.environ.get("DATASETS_HOME", "datasets") register_sku110_voc('sku110_train', os.path.join(_datasets_root, 'sku110'), 'train') register_sku110_voc('sku110_trainval', os.path.join(_datasets_root, 'sku110'), 'trainval') register_sku110_voc('sku110_val', os.path.join(_datasets_root, 'sku110'), 'val') register_sku110_voc('sku110_test', os.path.join(_datasets_root, 'sku110'), 'test') cfg = setup(args) if args.eval_only: model = Trainer.build_model(cfg) DetectionCheckpointer(model, save_dir=cfg.OUTPUT_DIR).resume_or_load( cfg.MODEL.WEIGHTS, resume=args.resume ) res = Trainer.test(cfg, model) if comm.is_main_process(): verify_results(cfg, res) return res trainer = Trainer(cfg) trainer.resume_or_load(resume=args.resume) return trainer.train()
def main(args: Any, trainer_class: Optional[Type] = Trainer) -> Optional[Dict]: cfg = setup(args) trainer_class = Trainer if trainer_class is None else trainer_class assert issubclass(trainer_class, TrainerBase) if args.eval_only: model = trainer_class.build_model(cfg) DetectionCheckpointer(model, save_dir=cfg.OUTPUT_DIR).resume_or_load( cfg.MODEL.WEIGHTS, resume=args.resume) res = trainer_class.test(cfg, model) if cfg.TEST.AUG.ENABLED: res.update(trainer_class.test_with_TTA(cfg, model)) if comm.is_main_process(): verify_results(cfg, res) return res trainer = trainer_class(cfg, find_unused_parameters=True) trainer.resume_or_load(resume=args.resume) if cfg.TEST.AUG.ENABLED: trainer.register_hooks([ hooks.EvalHook(0, lambda: trainer.test_with_TTA(cfg, trainer.model)) ]) return trainer.train()
def main(args): register_coco_instances('blob_train', {}, f'{dataset}/train/coco.json', f'{dataset}/train/') register_coco_instances('blob_val', {}, f'{dataset}/val/coco.json', f'{dataset}/val/') #register_coco_instances('blob_test', {}, f'{dataset}/test/coco.json', f'{dataset}/test/') cfg = setup(args) if not os.path.exists(cfg.MODEL.WEIGHTS): download_missing_base_model(cfg) if args.eval_only: model = Trainer.build_model(cfg) AdetCheckpointer(model, save_dir=cfg.OUTPUT_DIR).resume_or_load( cfg.MODEL.WEIGHTS, resume=args.resume ) evaluators = [ Trainer.build_evaluator(cfg, name) for name in cfg.DATASETS.TEST ] res = Trainer.test(cfg, model, evaluators) if comm.is_main_process(): verify_results(cfg, res) if cfg.TEST.AUG.ENABLED: res.update(Trainer.test_with_TTA(cfg, model)) return res """ If you'd like to do anything fancier than the standard training logic, consider writing your own training loop or subclassing the trainer. """ trainer = Trainer(cfg) trainer.resume_or_load(resume=args.resume) if cfg.TEST.AUG.ENABLED: trainer.register_hooks( [hooks.EvalHook(0, lambda: trainer.test_with_TTA(cfg, trainer.model))] ) return trainer.train()
def test(self, ckpt): self.check_pointer._load_model(self.check_pointer._load_file(ckpt)) print('evaluating checkpoint {}'.format(ckpt)) res = Trainer.test(self.cfg, self.model) if comm.is_main_process(): verify_results(self.cfg, res) print(res) if (self.best_res is None) or ( self.best_res is not None and self.best_res['bbox']['AP'] < res['bbox']['AP']): self.best_res = res self.best_file = ckpt print('best results from checkpoint {}'.format(self.best_file)) print(self.best_res) self.all_res["best_file"] = self.best_file self.all_res["best_res"] = self.best_res self.all_res[ckpt] = res os.makedirs(os.path.join(self.cfg.OUTPUT_DIR, 'inference'), exist_ok=True) with open( os.path.join(self.cfg.OUTPUT_DIR, 'inference', 'all_res.json'), 'w') as fp: json.dump(self.all_res, fp)