Пример #1
0
def mAPtest(yaml, weight):
    from detectron2.engine import DefaultTrainer
    from detectron2.config import get_cfg
    cfg = get_cfg()
    cfg.merge_from_file(yaml)
    cfg.DATALOADER.NUM_WORKERS = 2
    # cfg.MODEL.WEIGHTS = "detectron2://COCO-InstanceSegmentation/mask_rcnn_R_101_FPN_3x/137849600/model_final_f10217.pkl"  # initialize from model zoo
    cfg.MODEL.ROI_HEADS.NUM_CLASSES = 3  # 3 classes (Kong, lee, Huh)

    from detectron2.data.datasets import register_coco_instances
    register_coco_instances("Acheck_test", {}, "./Acheck_hair_test.json",
                            "./img_hair_test")

    from detectron2.data import MetadataCatalog
    MetadataCatalog.get("Acheck_test").thing_classes = ["Kong", "Lee", "Huh"]
    Acheck_metadata = MetadataCatalog.get("Acheck_test")
    from detectron2.data import DatasetCatalog
    dataset_dicts = DatasetCatalog.get("Acheck_test")
    cfg.DATASETS.TRAIN = ("Acheck_test", )

    from detectron2.engine import DefaultPredictor

    cfg.MODEL.WEIGHTS = weight
    cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = 0.8  # set the testing threshold for this model
    cfg.DATASETS.TEST = ("Acheck_test", )
    predictor = DefaultPredictor(cfg)
    trainer = DefaultTrainer(cfg)
    trainer.resume_or_load(resume=False)

    from detectron2.evaluation import COCOEvaluator, inference_on_dataset
    from detectron2.data import build_detection_test_loader
    evaluator = COCOEvaluator("Acheck_test", cfg, False, "./output/")
    val_loader = build_detection_test_loader(cfg, "Acheck_test")
    inference_on_dataset(trainer.model, val_loader, evaluator)
Пример #2
0
def inference(config_file, coco_to_kitti_dict):
    cfg = get_cfg()
    cfg.merge_from_file(model_zoo.get_config_file(config_file))
    cfg.DATALOADER.NUM_WORKERS = 4
    cfg.MODEL.WEIGHTS = model_zoo.get_checkpoint_url(config_file)
    cfg.DATASETS.TRAIN = ("kitti_mots_train", )
    cfg.DATASETS.TEST = ("kitti_mots_test", )
    cfg.SOLVER.IMS_PER_BATCH = 8
    cfg.MODEL.ROI_HEADS.BATCH_SIZE_PER_IMAGE = 512

    os.makedirs(cfg.OUTPUT_DIR, exist_ok=True)
    trainer = DefaultTrainer(cfg)
    trainer.resume_or_load(resume=False)

    evaluator = COCOEvaluator("kitti_mots_test",
                              cfg,
                              False,
                              output_dir="./output/")
    val_loader = build_detection_test_loader(cfg, "kitti_mots_test")
    inference_on_dataset(trainer.model, val_loader, evaluator)

    preds = evaluator._predictions

    filtered_preds = filter_preds(preds, coco_to_kitti_dict)
    evaluator._predictions = filtered_preds

    evaluator.evaluate()
Пример #3
0
def test_model(path,
               model,
               weights,
               dataset,
               action_type='test',
               mode="full",
               visualize=False):
    dataset_name = os.path.basename(path)
    test = bottle_loader.register_dataset(path, dataset_name, action_type,
                                          mode)
    bottle_loader.register_dataset(path, dataset, 'train', mode)
    cfg_test = gen_cfg_test(dataset, model, dataset_name)
    cfg = gen_cfg_train(model, weights, dataset)
    cfg.MODEL.WEIGHTS = os.path.join(cfg.OUTPUT_DIR, "model_final.pth")
    cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = 0.9
    trainer = DefaultTrainer(cfg)
    trainer.resume_or_load(resume=False)
    evaluator = COCOEvaluator("%s_%s" % (dataset_name, action_type),
                              cfg_test,
                              False,
                              output_dir="./output_%s/" % (dataset))
    val_loader = build_detection_test_loader(cfg_test,
                                             "%s_%s" % (dataset, 'train'))
    result = inference_on_dataset(trainer.model, val_loader, evaluator)

    #Visualize the test
    if visualize:
        visualize_images_dict(
            dataset_name, test,
            MetadataCatalog.get('%s_%s' % (dataset, 'train')), cfg,
            dataset_name)
    return result
Пример #4
0
def testing():
    name = "SYNTHIA_test"  # "HoliCity_train" "HoliCity_valid"
    assign_global(name)
    cache_pth = f"/home/dxl/Code/PlanarReconstruction/data/SYNTHIA_test_coco_format.json"
    register_coco_instances(name=_name,
                            metadata={'thing_classes': ["P"]},
                            json_file=cache_pth,
                            image_root="/home/dxl/Data/PlaneRecover")

    cfg__ = make_cfg()

    # begin train
    trainer = DefaultTrainer(cfg__)
    # trainer.resume_or_load(resume=True)
    # trainer.train()

    # predict and visualization
    # extract_roi_feature(cfg__)
    # build_predictor_vis(cfg__)
    # evaluator_metric(cfg__)

    # evaluating validation data set
    trainer.resume_or_load(resume=True)
    evaluator, val_loader = build_evaluator(cfg__)
    inference_on_dataset(trainer.model, val_loader, evaluator)
Пример #5
0
def main():
    # log
    setup_logger(output='./output/log.txt')

    # train args
    args = parse()

    # model configurations
    cfg = get_cfg()

    cfg.merge_from_file(args.config)

    register_coco_instances("val_data", {}, args.val_json_path,
                            args.val_img_dir)
    cfg.DATASETS.TRAIN = ("val_data", )
    cfg.DATASETS.TEST = ("val_data", )

    cfg.MODEL.ROI_HEADS.NUM_CLASSES = args.num_class
    cfg.MODEL.WEIGHTS = args.pretrain

    os.makedirs(cfg.OUTPUT_DIR, exist_ok=True)

    # train
    trainer = DefaultTrainer(cfg)
    trainer.resume_or_load(resume=True)

    # validation
    evaluator = COCOEvaluator("val_data",
                              cfg,
                              False,
                              output_dir=cfg.OUTPUT_DIR)
    val_loader = build_detection_test_loader(cfg, "val_data")
    inference_on_dataset(trainer.model, val_loader, evaluator)
Пример #6
0
def main():
    args = parser.parse_args()
    register_coco_instances(args.dataset, {}, args.label, args.file)  # training dataset
    register_coco_instances(args.test_dataset, {}, args.test_label, args.test_file)  # testing dataset

    ### set metadata
    MetadataCatalog.get(args.test_dataset).evaluator_type="coco"
    DatasetCatalog.get(args.test_dataset)

    ### cfg setting
    cfg = get_cfg()
    cfg.merge_from_file(model_zoo.get_config_file(args.model))  
    cfg.DATASETS.TRAIN = (args.dataset,)
    cfg.MODEL.ROI_HEADS.NUM_CLASSES = args.num_class  # excavator, dump_truck, cement_truck
    cfg.MODEL.WEIGHTS = args.weight 
    cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = 0.7   # set the testing threshold for this model
    cfg.DATASETS.TEST = (args.test_dataset,)

    ### trainner setting
    trainer = DefaultTrainer(cfg) 
    trainer.resume_or_load(cfg.MODEL.WEIGHTS)

    ### evaluation setting
    evaluator = COCOEvaluator(args.test_dataset, cfg, False, output_dir="./output/")
    val_loader = build_detection_test_loader(cfg, args.test_dataset)
    inference_on_dataset(trainer.model, val_loader, evaluator)
Пример #7
0
def task_a(model_name, model_file, checkpoint=None, evaluate=True, visualize=True):
    print('Running task A for model', model_name)
    if checkpoint:
        SAVE_PATH = os.path.join('./results_week_5_task_a', model_name + '_wCheckpoint')
    else:
        SAVE_PATH = os.path.join('./results_week_5_task_a', model_name)
    os.makedirs(SAVE_PATH, exist_ok=True)

    # Loading data
    print('Loading data')
    dataloader = MOTS_Dataloader(dataset='motschallenge')
    def mots_train(): return dataloader.get_dicts(train_flag=True)
    def mots_val(): return dataloader.get_dicts(train_flag=False)
    DatasetCatalog.register('MOTS_train', mots_train)
    MetadataCatalog.get('MOTS_train').set(thing_classes=list(MOTS_CATEGORIES.keys()))
    DatasetCatalog.register('MOTS_val', mots_val)
    MetadataCatalog.get('MOTS_val').set(thing_classes=list(MOTS_CATEGORIES.keys()))

    # Load model and configuration
    print('Loading Model')
    cfg = get_cfg()
    cfg.merge_from_file(model_zoo.get_config_file(model_file))
    model_training_metadata = MetadataCatalog.get(cfg.DATASETS.TRAIN[0]) # Store current model training metadata
    cfg.DATASETS.TRAIN = ('MOTS_train', )
    cfg.DATASETS.TEST = ('MOTS_val', )
    cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = 0.5
    cfg.OUTPUT_DIR = SAVE_PATH
    if checkpoint:
        print('Using Checkpoint')
        cfg.MODEL.WEIGHTS = checkpoint
        cfg.MODEL.ROI_HEADS.NUM_CLASSES = 3
    else:
        cfg.MODEL.WEIGHTS = model_zoo.get_checkpoint_url(model_file)
    
    if evaluate:
        model = build_model(cfg)
        DetectionCheckpointer(model).load(cfg.MODEL.WEIGHTS)

        # Evaluation
        print('Evaluating')
        evaluator = COCOEvaluator('MOTS_val', cfg, False, output_dir=SAVE_PATH)
        trainer = DefaultTrainer(cfg)
        trainer.test(cfg, model, evaluators=[evaluator])

    if visualize:
        # Qualitative results: visualize some results
        print('Getting qualitative results')
        predictor = DefaultPredictor(cfg)
        inputs = mots_val()
        inputs = inputs[:20] + inputs[-20:]
        for i, input in enumerate(inputs):
            img = cv2.imread(input['file_name'])
            outputs = predictor(img)
            v = Visualizer(
                img[:, :, ::-1],
                metadata=model_training_metadata,
                scale=0.8,
                instance_mode=ColorMode.IMAGE)
            v = v.draw_instance_predictions(outputs['instances'].to('cpu'))
            cv2.imwrite(os.path.join(SAVE_PATH, 'Inference_' + model_name + '_inf_' + str(i) + '.png'), v.get_image()[:, :, ::-1])
Пример #8
0
def trainILSVRC(args):
    register_ILSVRC()
    yaml_path, outdir, weights_name = get_cfg_info()
    cfg = setup(yaml_path, outdir, weights_name)
    os.makedirs(cfg.OUTPUT_DIR, exist_ok=True)
    trainer = DefaultTrainer(cfg)
    trainer.resume_or_load(resume=True)
    return trainer.train()
Пример #9
0
    def test_default_trainer(self):
        cfg = get_cfg()
        cfg.MODEL.META_ARCHITECTURE = "_SimpleModel"
        cfg.DATASETS.TRAIN = ("coco_2017_val_100", )
        trainer = DefaultTrainer(cfg)

        # test property
        self.assertIs(trainer.model, trainer._trainer.model)
        trainer.model = _SimpleModel()
        self.assertIs(trainer.model, trainer._trainer.model)
Пример #10
0
    def compile(self, n_iter, output_folder, resume=False):
        self.cfg.SOLVER.IMS_PER_BATCH = 5
        self.cfg.SOLVER.BASE_LR = 0.0015
        self.cfg.SOLVER.MAX_ITER = n_iter
        self.cfg.MODEL.ROI_HEADS.BATCH_SIZE_PER_IMAGE = 128

        self.cfg.OUTPUT_DIR = output_folder
        os.makedirs(self.cfg.OUTPUT_DIR, exist_ok=True)

        self.trainer = DefaultTrainer(self.cfg)
        self.trainer.resume_or_load(resume=resume)
Пример #11
0
    def __init__(self, cfg):
        """
        Args:
            cfg (CfgNode):
        """
        self.clip_norm_val = 0.0
        if cfg.SOLVER.CLIP_GRADIENTS.ENABLED:
            if cfg.SOLVER.CLIP_GRADIENTS.CLIP_TYPE == "full_model":
                self.clip_norm_val = cfg.SOLVER.CLIP_GRADIENTS.CLIP_VALUE

        DefaultTrainer.__init__(self, cfg)
Пример #12
0
def task_a_no_KITTI_training(model_name,
                             model_file,
                             evaluate=True,
                             visualize=True):
    print('Running task A for model', model_name)

    SAVE_PATH = os.path.join('./results_week_5_task_a', model_name)
    os.makedirs(SAVE_PATH, exist_ok=True)

    # Load model and configuration
    print('Loading Model')
    cfg = get_cfg()
    cfg.merge_from_file(model_zoo.get_config_file(model_file))
    model_training_metadata = MetadataCatalog.get(
        cfg.DATASETS.TRAIN[0])  # Store current model training metadata
    cfg.DATASETS.TRAIN = ('KITTIMOTS_train', )
    cfg.DATASETS.TEST = ('MOTS_train', )
    cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = 0.5
    cfg.OUTPUT_DIR = SAVE_PATH
    cfg.MODEL.WEIGHTS = model_zoo.get_checkpoint_url(model_file)

    if evaluate:
        model = build_model(cfg)
        DetectionCheckpointer(model).load(cfg.MODEL.WEIGHTS)

        # Evaluation
        print('Evaluating')
        evaluator = COCOEvaluator('MOTS_train',
                                  cfg,
                                  False,
                                  output_dir=SAVE_PATH)
        trainer = DefaultTrainer(cfg)
        trainer.test(cfg, model, evaluators=[evaluator])

    if visualize:
        # Qualitative results: visualize some results
        print('Getting qualitative results')
        predictor = DefaultPredictor(cfg)
        inputs = mots_train()
        inputs = inputs[:20] + inputs[-20:]
        for i, input in enumerate(inputs):
            img = cv2.imread(input['file_name'])
            outputs = predictor(img)
            v = Visualizer(img[:, :, ::-1],
                           metadata=model_training_metadata,
                           scale=0.8,
                           instance_mode=ColorMode.IMAGE)
            v = v.draw_instance_predictions(outputs['instances'].to('cpu'))
            cv2.imwrite(
                os.path.join(
                    SAVE_PATH,
                    'Inference_' + model_name + '_inf_' + str(i) + '.png'),
                v.get_image()[:, :, ::-1])
Пример #13
0
    def test_default_trainer(self):
        cfg = get_cfg()
        cfg.MODEL.META_ARCHITECTURE = "_SimpleModel"
        cfg.DATASETS.TRAIN = ("coco_2017_val_100", )
        with tempfile.TemporaryDirectory(prefix="detectron2_test") as d:
            cfg.OUTPUT_DIR = d
            trainer = DefaultTrainer(cfg)

            # test property
            self.assertIs(trainer.model, trainer._trainer.model)
            trainer.model = _SimpleModel()
            self.assertIs(trainer.model, trainer._trainer.model)
Пример #14
0
def make_trainer(cfg):
    # WARNING [11/13 20:09:42 d2.engine.defaults]: No evaluator found. Use `DefaultTrainer.test(evaluators=)`, or implement its `build_evaluator` method.
    def build_evaluator(cls, cfg, dataset_name, output_folder=None):
        if output_folder is None:
            output_folder = str(Path(cfg.OUTPUT_DIR) / "inference")
        return COCOEvaluator(dataset_name, cfg, True, output_folder)

    setattr(DefaultTrainer, "build_evaluator", classmethod(build_evaluator))

    trainer = DefaultTrainer(cfg)
    trainer.resume_or_load(resume=False)

    return trainer
Пример #15
0
    def test_default_trainer(self):
        # TODO: this test requires manifold access, so changed device to CPU. see: T88318502
        cfg = get_cfg()
        cfg.MODEL.DEVICE = "cpu"
        cfg.MODEL.META_ARCHITECTURE = "_SimpleModel"
        cfg.DATASETS.TRAIN = ("coco_2017_val_100", )
        with tempfile.TemporaryDirectory(prefix="detectron2_test") as d:
            cfg.OUTPUT_DIR = d
            trainer = DefaultTrainer(cfg)

            # test property
            self.assertIs(trainer.model, trainer._trainer.model)
            trainer.model = _SimpleModel()
            self.assertIs(trainer.model, trainer._trainer.model)
def main(args):
	cfg = setup(args)

	if args.eval_only:
		model = Trainer.build_model(cfg)
		DetectionCheckpointer(model, save_dir=cfg.OUTPUT_DIR).resume_or_load(
			cfg.MODEL.WEIGHTS, resume=args.resume
		)
		res = Trainer.test(cfg, model)
		return res

	trainer = DefaultTrainer(cfg)
	trainer.resume_or_load(resume=args.resume)
	return trainer.train()
Пример #17
0
def main(data_dir, dataset_tag, fold_idx, dset):
    setup_logger()

    # read the classes dictionary
    json_file = os.path.join(data_dir, "classes.json")
    with open(json_file) as f:
        classes = json.load(f)

    print(f'setting fold {fold_idx}')
    for d in ["train", "val"]:
        tag = f'{dataset_tag}_fold_{fold_idx}_'
        # don't use fully f strings for register, it merges 'train' and 'val'
        print(f'\t {tag}' + d)
        DatasetCatalog.register(
            tag + d, lambda d=d: get_data_dicts(data_dir, tag + d + '.json'))
        MetadataCatalog.get(tag + d).set(
            thing_classes=sorted([it for _, it in classes.items()]))

    cfg = get_cfg()
    cfg.merge_from_file(
        model_zoo.get_config_file(
            "COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x.yaml"))
    cfg.DATASETS.TRAIN = (f"{dataset_tag}_train_{fold_idx}_fold", )
    cfg.DATASETS.TEST = (f"{dataset_tag}_val_{fold_idx}_fold", )
    cfg.DATALOADER.NUM_WORKERS = 2
    cfg.INPUT.MAX_SIZE_TRAIN = 1000

    cfg.MODEL.ROI_HEADS.BATCH_SIZE_PER_IMAGE = 512
    cfg.MODEL.ROI_HEADS.NUM_CLASSES = len(classes)  # number of classes

    cfg.OUTPUT_DIR = f'output_{fold_idx}'

    trainer = DefaultTrainer(cfg)
    trainer.resume_or_load(resume=False)

    cfg.MODEL.WEIGHTS = os.path.join(cfg.OUTPUT_DIR,
                                     f"model_fold_{fold_idx}.pth")

    cfg.DATASETS.TEST = (f"{dataset_tag}_{dset}_{fold_idx}_fold", )
    evaluator = COCOEvaluator(f"{dataset_tag}_{dset}_{fold_idx}_fold",
                              cfg,
                              False,
                              output_dir=cfg.OUTPUT_DIR)
    val_loader = build_detection_test_loader(
        cfg, f"{dataset_tag}_{dset}_{fold_idx}_fold")
    metrics = inference_on_dataset(trainer.model, val_loader, evaluator)
    with open(os.path.join(cfg.OUTPUT_DIR, f"cocoeval_{dset}_{fold_idx}.json"),
              'w') as f:
        json.dump(metrics, f)
Пример #18
0
def get_card_width():
    from detectron2.data.datasets import register_coco_instances
    register_coco_instances("my_dataset_train", {}, "train/trainval.json",
                            "train/")
    register_coco_instances("my_dataset_val", {}, "train/trainval.json",
                            "train/")

    balloon_metadata = MetadataCatalog.get("my_dataset_train")
    from detectron2.engine import DefaultTrainer

    cfg = get_cfg()
    cfg.merge_from_file(
        model_zoo.get_config_file(
            "COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x.yaml"))
    cfg.DATASETS.TRAIN = ("my_dataset_train", )
    cfg.DATASETS.TEST = ()
    cfg.DATALOADER.NUM_WORKERS = 2
    cfg.MODEL.WEIGHTS = model_zoo.get_checkpoint_url(
        "COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x.yaml"
    )  # Let training initialize from model zoo
    cfg.SOLVER.IMS_PER_BATCH = 2
    cfg.SOLVER.BASE_LR = 0.00025  # pick a good LR
    cfg.SOLVER.MAX_ITER = 300  # 300 iterations seems good enough for this toy dataset; you will need to train longer for a practical dataset
    cfg.SOLVER.STEPS = []  # do not decay learning rate
    cfg.MODEL.ROI_HEADS.BATCH_SIZE_PER_IMAGE = 128  # faster, and good enough for this toy dataset (default: 512)
    cfg.MODEL.ROI_HEADS.NUM_CLASSES = 1  # only has one class (ballon). (see https://detectron2.readthedocs.io/tutorials/datasets.html#update-the-config-for-new-datasets)
    # NOTE: this config means the number of classes, but a few popular unofficial tutorials incorrect uses num_classes+1 here.

    os.makedirs(cfg.OUTPUT_DIR, exist_ok=True)
    trainer = DefaultTrainer(cfg)
    trainer.resume_or_load(resume=False)
    # trainer.train()
    cfg.MODEL.WEIGHTS = "model_final_card.pth"  # path to the model we just trained
    cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = 0.7  # set a custom testing threshold
    predictor = DefaultPredictor(cfg)

    from detectron2.utils.visualizer import ColorMode
    # dataset_dicts = get_balloon_dicts("balloon/val")
    # for d in random.sample(dataset_dicts, 3):
    im = cv2.imread("x.jpg")
    outputs = predictor(im)
    print(
        outputs
    )  # format is documented at https://detectron2.readthedocs.io/tutorials/models.html#model-output-format
    return outputs["instances"].pred_boxes.tensor.cpu().numpy()[0].tolist()


# print(get_card_width())
Пример #19
0
    def __init__(self, cfg):
        """
        Args:
            cfg (CfgNode):
        Use the custom checkpointer, which loads other backbone models
        with matching heuristics.
        """
        cfg = DefaultTrainer.auto_scale_workers(cfg, comm.get_world_size())
        model = self.build_model(cfg)
        optimizer = self.build_optimizer(cfg, model)
        data_loader = self.build_train_loader(cfg)

        if comm.get_world_size() > 1:
            model = DistributedDataParallel(model,
                                            device_ids=[comm.get_local_rank()],
                                            broadcast_buffers=False)

        TrainerBase.__init__(self)
        self._trainer = (AMPTrainer if cfg.SOLVER.AMP.ENABLED else
                         SimpleTrainer)(model, data_loader, optimizer)

        self.scheduler = self.build_lr_scheduler(cfg, optimizer)
        self.checkpointer = DetectionCheckpointer(
            model,
            cfg.OUTPUT_DIR,
            optimizer=optimizer,
            scheduler=self.scheduler,
        )
        self.start_iter = 0
        self.max_iter = cfg.SOLVER.MAX_ITER
        self.cfg = cfg

        self.register_hooks(self.build_hooks())
def validation(cfg: DictConfig) -> None:
    """
    Transfer learning using pretrained models from detectron2 model zoo.

    :param cfg: the configuration dictionary of dataset_model.
    :type cfg: omegaconf.dictconfig.DictConfig.
    :return: None
    """
    log.info('--- Start Validation ---')
    val_dataset_dicts, val_dataset_metadata = register_custom_coco_dataset(
        cfg=cfg, process='val')
    visualizing_coco_dataset(dataset_dicts=val_dataset_dicts,
                             dataset_metadata=val_dataset_metadata,
                             num_ims=cfg.validation.show_images)
    model_cfg: CfgNode = get_model_configs(cfg=cfg, process='val')
    evaluator = COCOEvaluator(dataset_name=cfg.name + '_val',
                              cfg=model_cfg,
                              distributed=False,
                              output_dir=os.getcwd())
    val_loader = build_detection_test_loader(cfg=model_cfg,
                                             dataset_name=cfg.name + '_val')
    trainer: DefaultTrainer = DefaultTrainer(model_cfg)
    trainer.resume_or_load(resume=True)
    inference_on_dataset(model=trainer.model,
                         data_loader=val_loader,
                         evaluator=evaluator)
    log.info('--- Validation Done ---')
Пример #21
0
def test(test_dataset, config):
    """
        Perfoms evaluation on the dataset using the model on the config parameter.
    """

    global MODEL

    config.DATASETS.TEST = test_dataset

    # Set-up trainer
    trainer = DefaultTrainer(config)
    trainer.resume_or_load(resume=False)

    evaluator = COCOEvaluator(test_dataset[0], config, False, output_dir=config.OUTPUT_DIR + '/test')
    val_loader = build_detection_test_loader(config, test_dataset[0])
    return inference_on_dataset(trainer.model, val_loader, evaluator)
Пример #22
0
    def __init__(self, cfg):
        logger = logging.getLogger("detectron2")
        if not logger.isEnabledFor(logging.INFO):
            setup_logger()
        cfg = DefaultTrainer.auto_scale_workers(cfg, comm.get_world_size())
        # Assume these objects must be constructed in this order.
        model = self.build_model(cfg)
        optimizer = self.build_optimizer(cfg, model)
        data_loader, num_per_epoch = self.build_train_loader(cfg)

        # update iteration cfg to epoch cfg
        if cfg.SOLVER.EPOCH.ENABLED:
            cfg = self.adjust_epoch_to_iter(cfg, num_per_epoch)

        # For training, wrap with DDP. But don't need this for inference.
        if comm.get_world_size() > 1:
            model = DistributedDataParallel(model, device_ids=[comm.get_local_rank()], broadcast_buffers=False)
        
        super(DefaultTrainer, self).__init__(model, data_loader, optimizer)

        self.scheduler = self.build_lr_scheduler(cfg, optimizer)
        # Assume no other objects need to be checkpointed.
        # We can later make it checkpoint the stateful hooks
        self.checkpointer = DetectionCheckpointer(
            # Assume you want to save checkpoints together with logs/statistics
            model,
            cfg.OUTPUT_DIR,
            optimizer=optimizer,
            scheduler=self.scheduler,
        )

        self.start_iter = 0
        self.max_iter = cfg.SOLVER.MAX_ITER
        self.cfg = cfg
        self.register_hooks(self.build_hooks())
Пример #23
0
def task_a(model_name, model_file):

    save_path = Path("output/task_a") / model_name
    os.makedirs(save_path, exist_ok=True)
    cfg = base_cfg(model_file, save_path)

    model = build_model(cfg)
    DetectionCheckpointer(model).load(cfg.MODEL.WEIGHTS)

    evaluator = COCOEvaluator("kitti-mots-val",
                              cfg,
                              False,
                              output_dir="./output")
    trainer = DefaultTrainer(cfg)
    trainer.test(cfg, model, evaluators=[evaluator])

    get_qualitative_results(cfg, save_path)
Пример #24
0
 def run_coco_eval(self):
     trainer = DefaultTrainer(self.config)
     evaluator = COCOEvaluator(self.config.DATASETS.TEST[0],
                               self.config,
                               False,
                               output_dir=self.config.OUTPUT_DIR)
     val_loader = build_detection_test_loader(self.config,
                                              self.config.DATASETS.TEST[0])
     print(inference_on_dataset(trainer.model, val_loader, evaluator))
Пример #25
0
    def __init__(self, cfg):
        super().__init__()
        if not logger.isEnabledFor(logging.INFO):  # setup_logger is not called for d2
            setup_logger()
        self.cfg = DefaultTrainer.auto_scale_workers(cfg, comm.get_world_size())
        self.storage: EventStorage = None
        self.model = build_model(self.cfg)

        self.start_iter = 0
        self.max_iter = cfg.SOLVER.MAX_ITER
Пример #26
0
def main(args):
    cfg = setup(args)
    model = DefaultTrainer.build_model(cfg)
    modules = model.get_conv_bn_modules()
    for m in modules:
        print(m.weight.size())
    load_darknet_weights(args.initial_weights, modules)
    save_path = os.path.join(args.output_dir, "yolov3.pth")
    torch.save(model.state_dict(), save_path)
    print("model save to", save_path)
Пример #27
0
    def __init__(self, working_dir, iterations = 5000, batch_size=16, num_classes=300):
        """
        Sets up the configuration for the Detectron model

        :param working_dir: str, directory to save outputs in (stored in working_dir/output)
        :param batch_size: int, batch size for backbone
        :param num_classes: upper bound on number of classes in dataset (number of dense heads)
        """
        self.train_annotations_path = os.path.join(
            working_dir, "data", "train", "processed_annotations.json")
        self.train_images_path = os.path.join(
            working_dir, "data", "train", "images/")
        self.val_annotations_path = os.path.join(
            working_dir, "data", "val", "processed_annotations.json")
        self.val_images_path = os.path.join(
            working_dir, "data", "val", "images/")

        register_coco_instances(
            "my_dataset_train", {}, self.train_annotations_path, self.train_images_path)
        register_coco_instances(
            "my_dataset_val", {}, self.val_annotations_path, self.val_images_path)

        self.cfg = get_cfg()
        # Check the model zoo and use any of the models ( from detectron2 github repo)
        self.cfg.merge_from_file(model_zoo.get_config_file(
            "COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x.yaml"))
        self.cfg.DATASETS.TRAIN = ("my_dataset_train",)
        self.cfg.DATASETS.TEST = ("my_dataset_val", )
        self.cfg.DATALOADER.NUM_WORKERS = 2
        self.cfg.MODEL.WEIGHTS = model_zoo.get_checkpoint_url(
            "COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x.yaml")
        self.cfg.SOLVER.IMS_PER_BATCH = batch_size
        self.cfg.SOLVER.BASE_LR = 0.00025
        self.cfg.MODEL.ROI_HEADS.BATCH_SIZE_PER_IMAGE = 128
        self.cfg.MODEL.ROI_HEADS.NUM_CLASSES = num_classes

        self.cfg.OUTPUT_DIR = os.path.join(working_dir, 'output')
        os.makedirs(self.cfg.OUTPUT_DIR, exist_ok=True)
        self.cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = 0.5

        self.cfg.SOLVER.MAX_ITER = iterations
        self.trainer = DefaultTrainer(self.cfg)
def Train():
    register_coco_instances(
        "custom", {}, "datasets/coco/annotations/instances_train2017.json",
        "datasets/coco/train2017")
    custom_metadata = MetadataCatalog.get("custom")
    dataset_dicts = DatasetCatalog.get("custom")
    for d in random.sample(dataset_dicts, 3):
        img = cv2.imread(d["file_name"])
        visualizer = Visualizer(img[:, :, ::-1],
                                metadata=custom_metadata,
                                scale=1)
        vis = visualizer.draw_dataset_dict(d)
        cv2.imshow('Sample', vis.get_image()[:, :, ::-1])
        cv2.waitKey()

    cfg = get_cfg()
    cfg.merge_from_file(
        "configs/COCO-InstanceSegmentation/mask_rcnn_R_101_FPN_3x.yaml")
    cfg.DATASETS.TRAIN = ("custom", )
    cfg.DATASETS.TEST = ()
    cfg.DATALOADER.NUM_WORKERS = 4
    cfg.MODEL.WEIGHTS = 'model_final_3c3198.pkl'
    cfg.SOLVER.IMS_PER_BATCH = 2
    cfg.SOLVER.BASE_LR = 0.0001

    cfg.SOLVER.MAX_ITER = (150000)
    cfg.MODEL.ROI_HEADS.BATCH_SIZE_PER_IMAGE = (128)
    cfg.MODEL.ROI_HEADS.NUM_CLASSES = 1

    os.makedirs(cfg.OUTPUT_DIR, exist_ok=True)
    trainer = DefaultTrainer(cfg)
    trainer.resume_or_load(resume=False)
    trainer.train()
def train_detectron2():
    dataset_storage = {
        'synthetic': generate_synthetic_datasets(),
        'real': generate_real_datasets()
    }

    cfg = get_cfg()
    cfg.merge_from_file(model_zoo.get_config_file("COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x.yaml"))
    cfg.DATASETS.TRAIN = (SYNTHETIC_DATASET_NAME[MODES[0]], REAL_DATASET_NAME[MODES[0]])
    cfg.DATASETS.TEST = ()
    cfg.DATALOADER.NUM_WORKERS = 2
    cfg.MODEL.WEIGHTS = "detectron2://COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x/137849600/model_final_f10217.pkl"
    cfg.SOLVER.IMS_PER_BATCH = 2
    cfg.SOLVER.BASE_LR = 0.00025  # pick a good LR
    cfg.SOLVER.MAX_ITER = 500
    cfg.MODEL.ROI_HEADS.BATCH_SIZE_PER_IMAGE = 128  # faster, and good enough for this toy dataset (default: 512)
    cfg.MODEL.ROI_HEADS.NUM_CLASSES = len(dataset_storage["synthetic"][MODES[0]]['unit_classes'])
    cfg.OUTPUT_DIR = str(OUTPUT_PATH)

    os.makedirs(cfg.OUTPUT_DIR, exist_ok=True)
    trainer = DefaultTrainer(cfg)
    trainer.resume_or_load(resume=False)
    trainer.train()

    return cfg
def Train():
    register_coco_instances(
        "custom", {}, "/home/lsc/datasets/butterfly/Annotations/train.json",
        "/home/lsc/datasets/butterfly/TrainData/JPEGImages")
    custom_metadata = MetadataCatalog.get("custom")
    dataset_dicts = DatasetCatalog.get("custom")

    cfg = get_cfg()
    cfg.merge_from_file(
        "/home/lsc/detectron2/configs/COCO-Detection/faster_rcnn_X_101_32x8d_FPN_3x.yaml"
    )
    cfg.DATASETS.TRAIN = ("custom", )
    cfg.DATASETS.TEST = ()
    cfg.DATALOADER.NUM_WORKERS = 8
    cfg.MODEL.WEIGHTS = 'detectron2://COCO-Detection/faster_rcnn_X_101_32x8d_FPN_3x/139173657/model_final_68b088.pkl'
    cfg.SOLVER.IMS_PER_BATCH = 2
    cfg.SOLVER.BASE_LR = 0.02
    cfg.SOLVER.MAX_ITER = (500)
    cfg.MODEL.ROI_HEADS.BATCH_SIZE_PER_IMAGE = (128)
    cfg.MODEL.ROI_HEADS.NUM_CLASSES = 94

    os.makedirs(cfg.OUTPUT_DIR, exist_ok=True)
    trainer = DefaultTrainer(cfg)
    trainer.resume_or_load(resume=False)
    trainer.train()