Example #1
0
def main(args):
    cfg = setup(args)

    if args.eval_only:
        model = Trainer.build_model(cfg).s_net
        DetectionCheckpointer(model, save_dir=cfg.OUTPUT_DIR).resume_or_load(
            cfg.MODEL.WEIGHTS, resume=args.resume
        )
        res = Trainer.test(cfg, model)
        if cfg.TEST.AUG.ENABLED:
            res.update(Trainer.test_with_TTA(cfg, model))
        if comm.is_main_process():
            verify_results(cfg, res)
        return res

    trainer = Trainer(cfg)
    trainer.resume_or_load(resume=args.resume)
    return trainer.train()
Example #2
0
def main(args):
    cfg = setup(args)

    model = build_model(cfg)
    logger.info("Model:\n{}".format(model))
    if args.eval_only:
        DetectionCheckpointer(model, save_dir=cfg.OUTPUT_DIR).resume_or_load(
            cfg.MODEL.WEIGHTS, resume=args.resume)
        return do_test(cfg, model)

    distributed = comm.get_world_size() > 1
    if distributed:
        model = DistributedDataParallel(model,
                                        device_ids=[comm.get_local_rank()],
                                        broadcast_buffers=False)

    do_train(cfg, model)
    return do_test(cfg, model)
Example #3
0
def perform_eval(cfg, trainer):
    """
    Uses class methods of the trainer to build, load and evaluate a trained model.
    :param cfg: Namespace containing all OPMask configs.
    :param trainer: Trainer used for training the model.
    :return: Evaluation results.
    """
    model = trainer.build_model(cfg)
    DetectionCheckpointer(model,
                          save_dir=os.path.join(cfg.OUTPUT_DIR,
                                                "models")).resume_or_load(
                                                    cfg.MODEL.WEIGHTS,
                                                    resume=True)

    res = trainer.test(cfg, model)
    if comm.is_main_process():
        verify_results(cfg, res)
    return res
def main(args):
    # setup the config file
    cfg = get_cfg()
    cfg.merge_from_file(args.config_file)
    cfg.merge_from_list(args.opts)
    cfg.freeze()
    default_setup(cfg, args)

    if args.eval_only:
        model = Trainer.build_model(cfg)
        DetectionCheckpointer(model, save_dir=cfg.OUTPUT_DIR).resume_or_load(
            cfg.MODEL.WEIGHTS, resume=args.resume)
        res = Trainer.test(cfg, model)
        return res

    trainer = Trainer(cfg)
    trainer.resume_or_load(resume=args.resume)
    return trainer.train()
def main(args):
    cfg = setup(args)

    if args.eval_only:
        model = Trainer.build_model(cfg)
        DetectionCheckpointer(model, save_dir=cfg.OUTPUT_DIR).resume_or_load(
            cfg.MODEL.WEIGHTS, resume=args.resume)
        res = Trainer.test(cfg, model)
        if comm.is_main_process():
            verify_results(cfg, res)
        return res
    """
    If you'd like to do anything fancier than the standard training logic,
    consider writing your own training loop or subclassing the trainer.
    """
    trainer = Trainer(cfg)
    trainer.resume_or_load(resume=args.resume)
    return trainer.train()
Example #6
0
    def __init__(self, cfg):
        self.cfg = cfg.clone()  # cfg can be modified by model
        self.model = build_model(self.cfg)
        self.model.eval()
        self.metadata = MetadataCatalog.get(cfg.DATASETS.TEST[0])

        checkpointer = DetectionCheckpointer(self.model)
        checkpointer.load(cfg.MODEL.WEIGHTS)

        print('cfg.INPUT.MIN_SIZE_TEST', cfg.INPUT.MIN_SIZE_TEST)
        print('cfg.INPUT.MAX_SIZE_TEST', cfg.INPUT.MAX_SIZE_TEST)
        self.transform_gen = T.ResizeShortestEdge(
            [cfg.INPUT.MIN_SIZE_TEST, cfg.INPUT.MIN_SIZE_TEST],
            cfg.INPUT.MAX_SIZE_TEST)

        self.input_format = cfg.INPUT.FORMAT

        assert self.input_format in ["RGB", "BGR"], self.input_format
def evaluate(dataset):
    register_one_set(dataset)

    cfg = get_my_cfg()
    cfg.MODEL.WEIGHTS = os.path.join(cfg.OUTPUT_DIR, "model_final.pth")
    cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = 0.7

    model = DefaultTrainer.build_model(
        cfg)  # just built the model without weights
    checkpoiner = DetectionCheckpointer(model, cfg.OUTPUT_DIR)
    checkpoiner.resume_or_load(
        cfg.MODEL.WEIGHTS, resume=False)  # loaded the weights we had trained

    evaluator = COCOEvaluator(dataset, ("bbox", ),
                              False,
                              output_dir=os.path.join("output", "evaluate"))
    loader = build_detection_test_loader(cfg, dataset)
    print(inference_on_dataset(model, loader, evaluator))
Example #8
0
def load_model_img():
    global net_img
    global cfg

    time_start = time.time()
    print('\nLoad image feature extraction model')
    logging.disable()
    parser = argparse.ArgumentParser()
    parser.add_argument(
        '--config-file',
        default='model/extract-bua-caffe-r101.yaml',
        metavar='FILE',
        help='path to config file',
    )
    parser.add_argument('--mode',
                        default='caffe',
                        type=str,
                        help='bua_caffe, ...')
    parser.add_argument(
        '--resume',
        action='store_true',
        help='whether to attempt to resume from the checkpoint directory',
    )
    parser.add_argument(
        'opts',
        help='Modify config options using the command-line',
        default=None,
        nargs=argparse.REMAINDER,
    )
    args = parser.parse_args([])
    cfg = setup(args)

    MIN_BOXES = 10
    MAX_BOXES = 100
    CONF_THRESH = 0.2

    net_img = DefaultTrainer.build_model(cfg)
    DetectionCheckpointer(net_img, save_dir=cfg.OUTPUT_DIR).resume_or_load(
        cfg.MODEL.WEIGHTS, resume=args.resume)
    net_img.eval()

    time_end = time.time()
    print('Finish load net model!')
    print('Model load time: {:.3f}s\n'.format(time_end - time_start))
Example #9
0
def main(args):
    # if args.unitest:
    #     return unitest()
    cfg = setup(args)

    for d in ["train", 'val']:
        # train for 6998images , val for 1199 images
        DatasetCatalog.register("chefCap_" + d,
                                lambda d=d: get_chefcap_image_dicts())
        MetadataCatalog.get("chefCap_" + d).set(
            thing_classes=list(things_class_dict.keys()))
        if d == 'val':
            MetadataCatalog.get("chefCap_val").evaluator_type = "pascal_voc"
            MetadataCatalog.get("chefCap_val").year = 2012
            MetadataCatalog.get(
                "chefCap_val"
            ).dirname = "/opt/work/chefCap/detectron2_fasterrcnn/data"

    # for d in ["/opt/work/chefCap/data/ziped/Making-PascalVOC-export/"]:
    #     DatasetCatalog.register("chefCap_val",
    #                             lambda d=d: get_chefcap_image_dicts(d))
    # MetadataCatalog.get("chefCap_val").set(
    #     thing_classes=['face-head', 'mask-head', 'face-cap', 'mask-cap'])
    # MetadataCatalog.get("chefCap_val").evaluator_type = "pascal_voc"
    # MetadataCatalog.get("chefCap_val").dirname = "/opt/work/chefCap/data/ziped/Making-PascalVOC-export/"
    # MetadataCatalog.get("chefCap_val").year = 2012
    if args.eval_only:
        model = DefaultTrainer.build_model(cfg)
        DetectionCheckpointer(model, save_dir=cfg.OUTPUT_DIR).resume_or_load(
            cfg.MODEL.WEIGHTS, resume=args.resume)
        res = DefaultTrainer.test(cfg, model)
        if cfg.TEST.AUG.ENABLED:
            res.update(DefaultTrainer.test_with_TTA(cfg, model))
        if comm.is_main_process():
            verify_results(cfg, res)
        return res

    trainer = DefaultTrainer(cfg)
    trainer.resume_or_load(resume=args.resume)
    # if cfg.TEST.AUG.ENABLED:
    #     trainer.register_hooks(
    #         [hooks.EvalHook(0, lambda: trainer.test_with_TTA(cfg, trainer.model))]
    #     )
    return trainer.train()
Example #10
0
    def __init__(self, num_classes=1):
        cfg = get_cfg()
        cfg.merge_from_file(
            "/content/detectron2_repo/configs/Misc/cascade_mask_rcnn_X_152_32x8d_FPN_IN5k_gn_dconv.yaml"
        )
        cfg.MODEL.WEIGHTS = "/content/tracking_wo_bnw/model_final.pth"
        cfg.MODEL.MASK_ON = False
        cfg.MODEL.ROI_HEADS.NUM_CLASSES = 1
        cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = 0.0

        self.model = build_model(cfg)
        self.model.eval()
        self.model.cuda()

        self.proposal_generator = self.model.proposal_generator
        self.test_nms_thresh = cfg.MODEL.ROI_HEADS.NMS_THRESH_TEST

        checkpointer = DetectionCheckpointer(self.model)
        checkpointer.load(cfg.MODEL.WEIGHTS)
Example #11
0
    def __init__(self, cfg):
        """
        Args:
            cfg (CfgNode):
        """
        super().__init__()
        logger = logging.getLogger("detectron2")
        if not logger.isEnabledFor(
                logging.INFO):  # setup_logger is not called for d2
            setup_logger()
        cfg = DefaultTrainer.auto_scale_workers(cfg, comm.get_world_size())

        # Assume these objects must be constructed in this order.
        model = self.build_model(cfg)
        optimizer = self.build_optimizer(cfg, model)
        data_loader = self.build_train_loader(cfg)

        # For training, wrap with DDP. But don't need this for inference.
        ## add FIND_UNUSED_PARAMETERS
        if comm.get_world_size() > 1:
            model = DistributedDataParallel(
                model,
                device_ids=[comm.get_local_rank()],
                broadcast_buffers=False,
                find_unused_parameters=cfg.SOLVER.FIND_UNUSED_PARAMETERS)
        self._trainer = (AMPTrainer if cfg.SOLVER.AMP.ENABLED else
                         SimpleTrainer)(cfg, model, data_loader, optimizer)

        self.scheduler = self.build_lr_scheduler(cfg, optimizer)
        # Assume no other objects need to be checkpointed.
        # We can later make it checkpoint the stateful hooks
        self.checkpointer = DetectionCheckpointer(
            # Assume you want to save checkpoints together with logs/statistics
            model,
            cfg.OUTPUT_DIR,
            optimizer=optimizer,
            scheduler=self.scheduler,
        )
        self.start_iter = 0
        self.max_iter = cfg.SOLVER.MAX_ITER
        self.cfg = cfg

        self.register_hooks(self.build_hooks())
Example #12
0
def main(args):
    cfg = setup(args)
    print(cfg)

    # 注册数据集
    register_dataset()
    checkout_dataset_annotation(name="iecas_THz_2019_train")
    if args.eval_only:
        model = Trainer.build_model(cfg)
        DetectionCheckpointer(model, save_dir=cfg.OUTPUT_DIR).resume_or_load(
            cfg.MODEL.WEIGHTS, resume=args.resume)
        res = Trainer.test(cfg, model)
        if comm.is_main_process():
            verify_results(cfg, res)
        return res

    trainer = Trainer(cfg)
    trainer.resume_or_load(resume=args.resume)
    return trainer.train()
def getmodel(threadh=0.7):
    cfg = get_cfg()
    #cfg.merge_from_file("model_config.yaml")
    cfg.merge_from_file(model_zoo.get_config_file("Base-RCNN-FPN.yaml"))
    #cfg.MODEL.WEIGHTS = os.path.join(model_path, "model_final_fix.pth")
    cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = threadh 
    #cfg.MODEL.SCORE_THRESH_TEST = 0.5
    cfg.MODEL.DEVICE='cuda'

    model = build_model(cfg)
    
    DetectionCheckpointer(model).load(os.path.join(model_path, "model_final.pth"))

    #model_dict = torch.load(cfg.MODEL.WEIGHTS, map_location=torch.device(cfg.MODEL.DEVICE))
    #model.load_state_dict(model_dict['model'] )
    model.to(cfg.MODEL.DEVICE)
    model.train(False)
    
    return model
Example #14
0
def main(args):

    for d in ["train", "val"]:
        DatasetCatalog.register(
            "carte_" + d,
            lambda d=d: dataset_carto.get_cartography_dicts("dataset/" + d))
        MetadataCatalog.get("carte_" + d).set(
            stuff_classes=["background", "foret", "autoroute", "route"],
            evaluator_type="sem_seg",
            stuff_colors=[(140, 34, 140), (34, 139, 34), (255, 20, 147),
                          (218, 165, 32)])

    cfg = setup(args)

    model = Trainer.build_model(cfg)
    DetectionCheckpointer(model, save_dir=cfg.OUTPUT_DIR).resume_or_load(
        cfg.MODEL.WEIGHTS, resume=args.resume)
    res = Trainer.test(cfg, model)
    return res
Example #15
0
def evaluate():
    initData()
    cfg = get_cfg()
    cfg.merge_from_file(
        model_zoo.get_config_file(
            "COCO-Detection/faster_rcnn_R_50_FPN_3x.yaml"))
    cfg.DATASETS.TEST = ("table_testval", )
    cfg.DATALOADER.NUM_WORKERS = 2
    cfg.SOLVER.IMS_PER_BATCH = 2
    cfg.SOLVER.BASE_LR = 0.00025  # pick a good LR
    cfg.SOLVER.MAX_ITER = 300  # 300 iterations seems good enough for this toy dataset; you may need to train longer for a practical dataset
    cfg.MODEL.ROI_HEADS.BATCH_SIZE_PER_IMAGE = 128  # faster, and good enough for this toy dataset (default: 512)
    cfg.MODEL.ROI_HEADS.NUM_CLASSES = 1  # only has one class (ballon)
    cfg.MODEL.WEIGHTS = '/content/detection/savedmodel/model_final.pth'
    cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = 0.6  # set the testing threshold for this model
    model = build_model(cfg)
    DetectionCheckpointer(model, save_dir=cfg.OUTPUT_DIR).resume_or_load(
        cfg.MODEL.WEIGHTS, resume=True)
    do_test(cfg, model)
Example #16
0
    def __init__(self, cfg):
        self.cfg = cfg.clone()  # cfg can be modified by model
        self.model = build_model(self.cfg)
        self.model.eval()

        print('model device: ', self.model.device)

        self.metadata = MetadataCatalog.get(cfg.DATASETS.TEST[0])

        checkpointer = DetectionCheckpointer(self.model)
        checkpointer.load(cfg.MODEL.WEIGHTS)

        self.input_format = cfg.INPUT.FORMAT
        assert self.input_format in ["RGB", "BGR"], self.input_format

        self.min_size = cfg.INPUT.MIN_SIZE_TEST
        self.max_size = cfg.INPUT.MAX_SIZE_TEST

        self.batch_size = int(cfg.IMAGES_PER_BATCH_TEST)
Example #17
0
    def __init__(self, min_boxes=3, max_boxes=10, threshold=0.5):
        config_file = 'bottom-up-attention.pytorch/configs/bua-caffe/extract-bua-caffe-r101.yaml'
        self._cfg = get_cfg()
        add_bottom_up_attention_config(self._cfg, True)
        self._cfg.merge_from_file(config_file)
        self._cfg.MODEL.DEVICE = 'cpu'

        self._model = DefaultTrainer.build_model(self._cfg)
        DetectionCheckpointer(self._model, save_dir=self._cfg.OUTPUT_DIR).resume_or_load(self._cfg.MODEL.WEIGHTS)
        self._model.eval()

        self._min_boxes = min_boxes
        self._max_boxes = max_boxes
        self._threshold = threshold

        self._classes = ['__background__']
        with open(os.path.join('bottom-up-attention.pytorch', 'evaluation', 'objects_vocab.txt')) as f:
            for object in f.readlines():
                self._classes.append(object.split(',')[0].lower().strip())
    def _test_model(self, config_path, device="cpu"):
        # requires extra dependencies
        from detectron2.export import Caffe2Model, add_export_config, export_caffe2_model

        cfg = get_cfg()
        cfg.merge_from_file(model_zoo.get_config_file(config_path))
        cfg = add_export_config(cfg)
        cfg.MODEL.DEVICE = device

        inputs = [{"image": self._get_test_image()}]
        model = build_model(cfg)
        DetectionCheckpointer(model).load(model_zoo.get_checkpoint_url(config_path))
        c2_model = export_caffe2_model(cfg, model, copy.deepcopy(inputs))

        with tempfile.TemporaryDirectory(prefix="detectron2_unittest") as d:
            c2_model.save_protobuf(d)
            c2_model.save_graph(os.path.join(d, "test.svg"), inputs=copy.deepcopy(inputs))
            c2_model = Caffe2Model.load_protobuf(d)
        c2_model(inputs)[0]["instances"]
Example #19
0
def main(args):

    from detectron2.data.datasets import register_coco_instances
    folder_data = "/root/detectron2/MADS_data_train_test/60_40_tonghop"

    # train_data
    name = "mads_train"
    json_file = os.path.join(folder_data, "train.json")
    image_root = os.path.join(folder_data, "train", "images")

    # test data
    name_val = "mads_val"
    json_file_val = os.path.join(folder_data, "val.json")
    image_root_val = os.path.join(folder_data, "val", "images")

    # registr
    register_coco_instances(name, {}, json_file, image_root)
    register_coco_instances(name_val, {}, json_file_val, image_root_val)

    cfg = setup(args)

    if args.eval_only:
        model = Trainer.build_model(cfg)
        DetectionCheckpointer(model, save_dir=cfg.OUTPUT_DIR).resume_or_load(
            cfg.MODEL.WEIGHTS, resume=args.resume)
        res = Trainer.test(cfg, model)
        if cfg.TEST.AUG.ENABLED:
            res.update(Trainer.test_with_TTA(cfg, model))
        if comm.is_main_process():
            verify_results(cfg, res)
        return res
    """
    If you'd like to do anything fancier than the standard training logic,
    consider writing your own training loop or subclassing the trainer.
    """
    trainer = Trainer(cfg)
    trainer.resume_or_load(resume=args.resume)
    if cfg.TEST.AUG.ENABLED:
        trainer.register_hooks([
            hooks.EvalHook(0,
                           lambda: trainer.test_with_TTA(cfg, trainer.model))
        ])
    return trainer.train()
Example #20
0
def main(args):
    cfg = setup(args)
    
    if args.adaptive_pool:
        assert args.eval_only, "AdaptivePool is supported only in test mode"
        import detectron2.modeling.poolers
        detectron2.modeling.poolers.RoIPool = AdaptivePool
        detectron2.modeling.poolers.ROIAlign = AdaptivePool
    
    if args.qat:
        quantize_decorate()
        quantize_prepare()
        trainer = Trainer(cfg)
        trainer.resume_or_load(resume=args.resume)
        trainer.model = Trainer.update_model(trainer.model, args.qbackend)
        trainer.checkpointer.model = trainer.model
        return trainer.train()
        
    elif args.eval_only or args.quant_eval:
        if args.quant_eval:
            quantize_decorate()
            quantize_prepare()
            model = Trainer.build_model(cfg)
            model = Trainer.update_model(model, args.qbackend)
            model.eval()
            from fvcore.common.checkpoint import _strip_prefix_if_present
            weights = torch.load(cfg.MODEL.WEIGHTS)['model']
            _strip_prefix_if_present(weights, "module.")
            model.load_state_dict(weights)
            torch.quantization.convert(model, inplace=True)
        else:
            model = Trainer.build_model(cfg)
            DetectionCheckpointer(model, save_dir=cfg.OUTPUT_DIR).load(
                cfg.MODEL.WEIGHTS
            )
        res = Trainer.test(cfg, model)
        if comm.is_main_process():
            verify_results(cfg, res)
        return res

    trainer = Trainer(cfg)
    trainer.resume_or_load(resume=args.resume)
    return trainer.train()
Example #21
0
def task_a(model_name, model_file, evaluate=True, visualize=True):
    print('Running task A for model', model_name)

    SAVE_PATH = os.path.join('./results_week_4_task_a', model_name)
    os.makedirs(SAVE_PATH, exist_ok=True)

    # Load model and configuration
    print('Loading Model')
    cfg = get_cfg()
    cfg.merge_from_file(model_zoo.get_config_file(model_file))
    model_training_metadata = MetadataCatalog.get(cfg.DATASETS.TRAIN[0]) # Store current model training metadata
    cfg.DATASETS.TRAIN = ('KITTIMOTS_train', )
    cfg.DATASETS.TEST = ('KITTIMOTS_val', )
    cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = 0.5
    cfg.OUTPUT_DIR = SAVE_PATH
    cfg.MODEL.WEIGHTS = model_zoo.get_checkpoint_url(model_file)

    if evaluate:
        model = build_model(cfg)
        DetectionCheckpointer(model).load(cfg.MODEL.WEIGHTS)

        # Evaluation
        print('Evaluating')
        evaluator = COCOEvaluator('KITTIMOTS_val', cfg, False, output_dir=SAVE_PATH)
        trainer = DefaultTrainer(cfg)
        trainer.test(cfg, model, evaluators=[evaluator])

    if visualize:
        # Qualitative results: visualize some results
        print('Getting qualitative results')
        predictor = DefaultPredictor(cfg)
        inputs = kitti_val()
        inputs = inputs[:20] + inputs[-20:]
        for i, input in enumerate(inputs):
            img = cv2.imread(input['file_name'])
            outputs = predictor(img)
            v = Visualizer(
                img[:, :, ::-1],
                metadata=model_training_metadata,
                scale=0.8,
                instance_mode=ColorMode.IMAGE)
            v = v.draw_instance_predictions(outputs['instances'].to('cpu'))
            cv2.imwrite(os.path.join(SAVE_PATH, 'Inference_' + model_name + '_inf_' + str(i) + '.png'), v.get_image()[:, :, ::-1])
Example #22
0
def main(args):
    cfg = setup(args)

    if args.eval_only:
        model = Trainer.build_model(cfg)
        img = Image.open("datasets/coco/train2017/000000001146.jpg")
        img.load()
        d = np.asarray(img, dtype="float32")
        model = torch.jit.trace(model, d)
        DetectionCheckpointer(model, save_dir=cfg.OUTPUT_DIR).resume_or_load(
            cfg.MODEL.WEIGHTS, resume=args.resume)
        res = Trainer.test(cfg, model)
        if comm.is_main_process():
            verify_results(cfg, res)
        return res

    trainer = Trainer(cfg)
    trainer.resume_or_load(resume=args.resume)
    return trainer.train()
Example #23
0
    def __init__(self, cfg):
        """
        Copyright (c) Facebook, Inc. and its affiliates.
        Adapted Detectron2 class.

        General trainer executing the training as described in the paper.

        Args:
            cfg (CfgNode): Namespace containing all OPMask configs.
        """
        logger = logging.getLogger(__name__)
        if not logger.isEnabledFor(
                logging.INFO):  # setup_logger is not called for d2
            setup_logger(cfg.OUTPUT_DIR, name=__name__)

        # Assume these objects must be constructed in this orde.
        model = self.build_model(cfg)
        optimizer = self.build_optimizer(cfg, model)
        data_loader = self.build_train_loader(cfg)

        # For training, wrap with DDP. But don't need this for inference.
        if comm.get_world_size() > 1:
            model = DistributedDataParallel(model,
                                            device_ids=[comm.get_local_rank()],
                                            broadcast_buffers=False)
        super().__init__(model, data_loader, optimizer)

        self.scheduler = self.build_lr_scheduler(cfg, optimizer)
        # Assume no other objects need to be checkpointed.
        # We can later make it checkpoint the stateful hooks
        self.checkpointer = DetectionCheckpointer(
            # Assume you want to save checkpoints together with logs/statistics
            model,
            os.path.join(cfg.OUTPUT_DIR, "models"),
            optimizer=optimizer,
            scheduler=self.scheduler,
        )
        self.start_iter = 0
        self.max_iter = cfg.SOLVER.MAX_ITER
        self.cfg = cfg

        self.register_hooks(self.build_hooks())
Example #24
0
    def __init__(self, cfg):
        """
        Args:
            cfg (CfgNode):
        """
        logger = logging.getLogger("detectron2")
        if not logger.isEnabledFor(
                logging.INFO):  # setup_logger is not called for d2
            setup_logger()
        # Assume these objects must be constructed in this order.
        model = self.build_model(cfg)
        logger.info('Created The model')
        optimizer = self.build_optimizer(cfg, model)
        logger.info('Created The optimizer')
        data_loader = self.build_train_loader(cfg)
        logger.info('Created The data loader')
        # For training, wrap with DDP. But don't need this for inference.
        if comm.get_world_size() > 1:
            model = DistributedDataParallel(model,
                                            device_ids=[comm.get_local_rank()],
                                            broadcast_buffers=False)
        super().__init__(model, data_loader, optimizer)

        self.scheduler = self.build_lr_scheduler(cfg, optimizer)
        logger.info('Created The scheduler')
        # Assume no other objects need to be checkpointed.
        # We can later make it checkpoint the stateful hooks
        self.checkpointer = DetectionCheckpointer(
            # Assume you want to save checkpoints together with logs/statistics
            model,
            cfg.OUTPUT_DIR,
            optimizer=optimizer,
            scheduler=self.scheduler,
        )
        logger.info('Created The checkpointer')
        self.start_iter = 0
        self.max_iter = cfg.SOLVER.MAX_ITER
        self.cfg = cfg

        self.register_hooks(self.build_hooks())

        logger.info('Registered the hooks')
Example #25
0
def predict(fn):
    cfg = get_cfg()
    cfg.merge_from_file(
        model_zoo.get_config_file(
            "COCO-Keypoints/keypoint_rcnn_R_50_FPN_3x.yaml"))
    cfg.DATALOADER.NUM_WORKERS = 2
    cfg.SOLVER.IMS_PER_BATCH = 2
    cfg.MODEL.DEVICE = "cpu"
    cfg.SOLVER.BASE_LR = 0.00025  # pick a good LR
    cfg.SOLVER.MAX_ITER = 300  # 300 iterations seems good enough for this toy dataset; you may need to train longer for a practical dataset
    cfg.MODEL.ROI_HEADS.BATCH_SIZE_PER_IMAGE = 128  # faster, and good enough for this toy dataset (default: 512)
    cfg.MODEL.ROI_HEADS.NUM_CLASSES = 2  # only has one class (ballon)
    cfg.MODEL.WEIGHTS = '/content/keypoints/workdir/savedmodel/model_final.pth'
    cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = 0.3  # set the testing threshold for this model
    cfg.MODEL.ROI_KEYPOINT_HEAD.NUM_KEYPOINTS = 10
    model = build_model(cfg)
    model.eval()
    checkpointer = DetectionCheckpointer(model)
    checkpointer.load(cfg.MODEL.WEIGHTS)
    imageori = cv2.imread(fn)
    # mywidth=600
    # img = Image.open(f4)
    # wpercent = (mywidth/float(img.size[0]))
    # hsize = int((float(img.size[1])*float(wpercent)))
    # imageori = np.array(img.resize((mywidth,hsize), PIL.Image.ANTIALIAS))
    with torch.no_grad():
        original_image = imageori[:, :, ::-1]
        height, width = imageori.shape[:2]
        image = torch.as_tensor(imageori.astype("float32").transpose(2, 0, 1))
        print('---', imageori.shape, height, width)
        inputs = {"image": image, "height": height, "width": width}
        outputs = model([inputs])[0]
        print(outputs["instances"].pred_classes)
        # print(outputs["instances"].pred_boxes)
        # print(outputs["instances"].pred_keypoints)
        # print(outputs["instances"].pred_keypoints.shape)
    MetadataCatalog.get("my_dataset_train").set(thing_classes=["table", "r"])
    table_metadata = MetadataCatalog.get("my_dataset_train")
    v = Visualizer(imageori[:, :, ::-1], metadata=table_metadata, scale=0.8)
    out = v.draw_instance_predictions(outputs["instances"])
    plt.figure(figsize=(100, 100))
    plt.imshow(out.get_image()[:, :, ::-1])
def main(args):
    cfg = setup(args)
    # import the relation_retinanet as meta_arch, so they will be registered
    from relation_retinanet import RelationRetinaNet

    if args.eval_only:

        model = Trainer.build_model(cfg)
        DetectionCheckpointer(model, save_dir=cfg.OUTPUT_DIR).resume_or_load(
            cfg.MODEL.WEIGHTS, resume=args.resume
        )

        res = Trainer.test(cfg, model, PathwayEvaluator(cfg.DATASETS.TEST[0], cfg, True, False, cfg.OUTPUT_DIR))
        if comm.is_main_process():
            verify_results(cfg, res)
        return res

    trainer = Trainer(cfg)
    trainer.resume_or_load(resume=args.resume)
    return trainer.train()
Example #27
0
def main(args):
    # Setup config node
    cfg = setup_config(args,
                       random_seed=args.random_seed)

    # Eval only mode to produce mAP results
    if args.eval_only:
        model = Trainer.build_model(cfg)
        DetectionCheckpointer(model, save_dir=cfg.OUTPUT_DIR).resume_or_load(
            cfg.MODEL.WEIGHTS, resume=args.resume
        )
        res = Trainer.test(cfg, model)
        if comm.is_main_process():
            verify_results(cfg, res)
        return res

    # Build Trainer from config node. Begin Training.
    trainer = Trainer(cfg)
    trainer.resume_or_load(resume=args.resume)
    return trainer.train()
Example #28
0
    def __init__(self, cfg_path: str, num_workers: int = 4) -> None:
        cfg = get_cfg()
        cfg.merge_from_file(model_zoo.get_config_file(cfg_path))
        # NOTE: you may customize cfg settings
        # cfg.MODEL.DEVICE="cuda" # use gpu by default
        cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = 0.5
        # Find a model from detectron2's model zoo. You can use the https://dl.fbaipublicfiles... url as well
        # you can also give a path to you checkpoint
        cfg.MODEL.WEIGHTS = model_zoo.get_checkpoint_url(cfg_path)

        self.cfg = cfg.clone()
        self.model = build_model(cfg)
        self.model.eval()
        checkpointer = DetectionCheckpointer(self.model)
        checkpointer.load(cfg.MODEL.WEIGHTS)

        self.aug = T.ResizeShortestEdge(
            [cfg.INPUT.MIN_SIZE_TEST, cfg.INPUT.MIN_SIZE_TEST],
            cfg.INPUT.MAX_SIZE_TEST)
        self.pool = Pool(num_workers)
Example #29
0
def main(args):
    cfg = setup(args)

    model = build_model(cfg)  #<class 'detectron2.modeling.meta_arch.rcnn.GeneralizedRCNN'>
    import ipdb;ipdb.set_trace()
    logger.info("Model:\n{}".format(model))
    if args.eval_only:
        DetectionCheckpointer(model, save_dir=cfg.OUTPUT_DIR).resume_or_load(
            cfg.MODEL.WEIGHTS, resume=args.resume
        )
        return do_test(cfg, model)

    distributed = comm.get_world_size() > 1
    if distributed:
        model = DistributedDataParallel(
            model, device_ids=[comm.get_local_rank()], broadcast_buffers=False
        )

    do_train(cfg, model, resume=args.resume)
    return do_test(cfg, model)
Example #30
0
def main(args):

    cfg = setup(args)
    show = True

    register_openlogo(cfg.DATASETS.TRAIN[0], "datasets/data/openlogo",
                      "trainval", "supervised_imageset")
    register_openlogo(cfg.DATASETS.TEST[0], "datasets/data/openlogo", "test",
                      "supervised_imageset")
    trainer = DefaultTrainer(cfg)

    evaluator = OpenLogoDetectionEvaluator(cfg.DATASETS.TEST[0])

    if args.eval_only:

        model = trainer.build_model(cfg)
        DetectionCheckpointer(model, save_dir=cfg.OUTPUT_DIR).resume_or_load(
            cfg.MODEL.WEIGHTS, resume=args.resume)

        if show:
            visualize(cfg, amount=20)

        res = trainer.test(cfg, model, evaluators=[evaluator])

        if comm.is_main_process():
            verify_results(cfg, res)
        if cfg.TEST.AUG.ENABLED:
            res.update(trainer.test_with_TTA(cfg, model))

        return res

    trainer = DefaultTrainer(cfg)
    trainer.resume_or_load(resume=args.resume)

    if cfg.TEST.AUG.ENABLED:
        trainer.register_hooks([
            hooks.EvalHook(0,
                           lambda: trainer.test_with_TTA(cfg, trainer.model))
        ])

    return trainer.train()