Ejemplo n.º 1
0
def detectron_model_and_config() -> t.Tuple[torch.nn.Module, "CfgNode"]:
    model_url: str = "COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x.yaml"

    cfg: "CfgNode" = get_cfg()
    cfg.merge_from_file(model_zoo.get_config_file(model_url))
    # set threshold for this model
    cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = 0.5
    cfg.MODEL.WEIGHTS = model_zoo.get_checkpoint_url(model_url)

    cloned = cfg.clone()
    cloned.MODEL.DEVICE = "cpu"  # running on CI
    model: torch.nn.Module = build_model(cloned)
    model.eval()

    return model, cfg
Ejemplo n.º 2
0
def _get_model(args):

    cfg = get_cfg()
    cfg.merge_from_file(args.config)
    cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = 0.7  # set threshold for this model

    model = build_model(cfg)  # returns a torch.nn.Module
    weights = args.weights if args.weights != None else cfg.MODEL.WEIGHTS

    DetectionCheckpointer(model).load(
        weights
    )  # must load weights this way, can't use cfg.MODEL.WEIGHTS = "..."
    model.train(False)  # inference mode

    return model
def main(args):
    cfg = setup(args)

    model = build_model(cfg)
    logger.info("Model:\n{}".format(model))
    DetectionCheckpointer(model, save_dir=cfg.OUTPUT_DIR).resume_or_load(
        cfg.MODEL.WEIGHTS, resume=args.resume)

    distributed = comm.get_world_size() > 1
    if distributed:
        model = DistributedDataParallel(model,
                                        device_ids=[comm.get_local_rank()],
                                        broadcast_buffers=False)

    do_infer(cfg, args, model)
Ejemplo n.º 4
0
    def __init__(self, cfg):
        self.cfg = cfg.clone()  # cfg can be modified by model
        self.model = build_model(self.cfg)
        self.model.eval()
        self.metadata = MetadataCatalog.get(cfg.DATASETS.TEST[0])

        checkpointer = DetectionCheckpointer(self.model)
        checkpointer.load(cfg.MODEL.WEIGHTS)

        self.aug = T.ResizeShortestEdge(
            [cfg.INPUT.MIN_SIZE_TEST, cfg.INPUT.MIN_SIZE_TEST],
            cfg.INPUT.MAX_SIZE_TEST)

        self.input_format = cfg.INPUT.FORMAT
        assert self.input_format in ["RGB", "BGR"], self.input_format
Ejemplo n.º 5
0
def evaluate():
    train_keys = ["bank"]
    DatasetCatalog.clear()
    MetadataCatalog.clear()
    keypoint_names = [
        "c1", "c2", "c3", "c4", "c5", "c6", "c7", "c8", "c9", "c10"
    ]
    keypoint_connection_rules = [[1, 2, [255, 175, 100]],
                                 [2, 3, [255, 175, 100]],
                                 [3, 4, [255, 175, 100]],
                                 [4, 5, [255, 175, 100]],
                                 [5, 6, [255, 175, 100]],
                                 [6, 7, [255, 175, 100]],
                                 [7, 8, [255, 175, 100]],
                                 [8, 9, [255, 175, 100]],
                                 [9, 10, [255, 175, 100]]]
    keypoint_flip_map = [('c1', 'c10'), ('c2', 'c9'), ('c3', 'c8'),
                         ('c4', 'c7'), ('c5', 'c6')]
    for d in ["testval"]:
        #DatasetCatalog.register("table_testval", lambda d=d: get_icdar_dicts(train_keys,'testjson'))
        register_coco_instances("table_testval", {}, "./annotations.json",
                                "/content/VOC2007/JPEGImages")
        MetadataCatalog.get("table_testval").set(
            thing_classes=["table", "r"],
            keypoint_names=keypoint_names,
            keypoint_connection_rules=keypoint_connection_rules,
            keypoint_flip_map=keypoint_flip_map)

    cfg = get_cfg()
    cfg.merge_from_file(
        model_zoo.get_config_file(
            "COCO-Keypoints/keypoint_rcnn_R_50_FPN_3x.yaml"))
    cfg.DATALOADER.NUM_WORKERS = 2
    cfg.DATASETS.TRAIN = ("table_testval", )
    cfg.DATASETS.TEST = ("table_testval", )
    cfg.SOLVER.IMS_PER_BATCH = 2
    cfg.MODEL.DEVICE = "cpu"
    cfg.SOLVER.BASE_LR = 0.00025  # pick a good LR
    cfg.SOLVER.MAX_ITER = 300  # 300 iterations seems good enough for this toy dataset; you may need to train longer for a practical dataset
    cfg.MODEL.ROI_HEADS.BATCH_SIZE_PER_IMAGE = 128  # faster, and good enough for this toy dataset (default: 512)
    cfg.MODEL.ROI_HEADS.NUM_CLASSES = 2  # only has one class (ballon)
    cfg.MODEL.WEIGHTS = '/content/keypoints/workdir/savedmodel/model_final.pth'
    cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = 0.3  # set the testing threshold for this model
    cfg.MODEL.ROI_KEYPOINT_HEAD.NUM_KEYPOINTS = 10
    model = build_model(cfg)
    DetectionCheckpointer(model, save_dir=cfg.OUTPUT_DIR).resume_or_load(
        cfg.MODEL.WEIGHTS, resume=True)
    do_test(cfg, model)
Ejemplo n.º 6
0
def main(args):
    train_name, num_class = regist_coco_dataset(args.train_annotation, args.thing_classes)
    val_name, _ = regist_coco_dataset(args.val_annotation, args.thing_classes)
    test_name, _ = regist_coco_dataset(args.test_annotation, args.thing_classes)
    cfg, hyperparameters = setup(args, train_name, val_name,test_name, num_class)
    dest_dir = os.path.join(cfg.OUTPUT_DIR, 'sample_compare_result')
    if not args.resume:
        if os.path.isdir(cfg.OUTPUT_DIR):
            shutil.rmtree(cfg.OUTPUT_DIR)
        os.mkdir(cfg.OUTPUT_DIR)
        os.mkdir(dest_dir)
    if hasattr(args, 'opts'):
        mlflow.log_params(hyperparameters)

    model = build_model(cfg)
    logger.info("Model:\n{}".format(model))
    if args.eval_only:
        DetectionCheckpointer(model, save_dir=cfg.OUTPUT_DIR).resume_or_load(
            cfg.MODEL.WEIGHTS, resume=args.resume
        )
        return do_evaluate(cfg, model)

    distributed = comm.get_world_size() > 1
    if distributed:
        model = DistributedDataParallel(
            model, device_ids=[comm.get_local_rank()], broadcast_buffers=False
        )

    model = do_train(cfg, model, resume=args.resume)
    # mlflow.pytorch.log_model(pytorch_model = model,
    #                      artifact_path = 'model_best',
    #                      conda_env = mlflow.pytorch.get_default_conda_env())
    mlflow.log_artifact(os.path.join(cfg.OUTPUT_DIR, f'model_{os.getenv("MLFLOW_EXPERIMENT_NAME")}.pth'))


    results = do_evaluate(cfg, model)
    mlflow.log_metrics({k + '_bbox':v for k,v in results['bbox'].items()})
    mlflow.log_metrics({k + '_segm':v for k,v in results['segm'].items()}) 
    experiment_name = os.getenv('MLFLOW_EXPERIMENT_NAME')
    
    compare_gt_coco(cfg, annotation_file = args.test_annotation,
    dest_dir = dest_dir,
    weight = os.path.join(cfg.OUTPUT_DIR, f'model_{experiment_name}.pth'),
    score_thres_test = 0.7,
    num_sample = num_class
    )

    mlflow.log_artifacts(dest_dir)
Ejemplo n.º 7
0
def load_models():
    global classes

    cfg = get_cfg()
    model_yaml = "COCO-Detection/faster_rcnn_R_50_FPN_1x.yaml"
    cfg.merge_from_file(model_zoo.get_config_file(model_yaml))
    cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = 0.65  # set threshold for this model
    # cfg.MODEL.WEIGHTS = "output/model_final.pth"
    cfg.MODEL.WEIGHTS = "model_facemask_n_socialdistance.pth"
    # cfg.MODEL.WEIGHTS = "/content/drive/My Drive/datasets/facemask/detectron_d4_faster_rcnn_R_50_FPN_1x/model_final.pth"
    cfg.MODEL.ROI_HEADS.NUM_CLASSES = len(classes)
    cfg.MODEL.DEVICE = "cpu"
    predictor = DefaultPredictor(cfg)
    model = build_model(cfg)
    _ = DetectionCheckpointer(model).load(cfg.MODEL.WEIGHTS)
    return cfg, predictor, model
Ejemplo n.º 8
0
def do_activation(cfg):
    data_loader = build_detection_test_loader(cfg, cfg.DATASETS.TEST[0])
    model = build_model(cfg)
    DetectionCheckpointer(model).load(cfg.MODEL.WEIGHTS)
    model.eval()

    counts = Counter()
    total_activations = []
    for idx, data in zip(tqdm.trange(args.num_inputs), data_loader):  # noqa
        count = activation_count_operators(model, data)
        counts += count
        total_activations.append(sum(count.values()))
    logger.info("(Million) Activations for Each Type of Operators:\n" +
                str([(k, v / idx) for k, v in counts.items()]))
    logger.info("Total (Million) Activations: {}±{}".format(
        np.mean(total_activations), np.std(total_activations)))
def main(args):
    os.environ["CUDA_VISIBLE_DEVICES"] = "2"
    cfg = setup(args)
    model = build_model(cfg)
    # logger.info("Model:\n{}".format(model))
    DetectionCheckpointer(model, save_dir=cfg.OUTPUT_DIR).resume_or_load(
        cfg.MODEL.WEIGHTS, resume=args.resume)

    predictor = DefaultPredictor(cfg)
    # thresholds_for_classes = 0.7
    im_names = glob.glob(osp.join(images_dir, '*.tif'))

    img_groups = group_testimages(im_names)
    groupedInference(img_groups, model, predictor)
    # generalInference(model, im_names, predictor)
    return
Ejemplo n.º 10
0
    def __init__(self, cfg):
        self.cfg = cfg.clone()  # cfg can be modified by model
        self.model = build_model(self.cfg)
        self.model.eval()
        self.metadata = MetadataCatalog.get(cfg.DATASETS.TEST[0])

        checkpointer = DetectionCheckpointer(self.model)
        checkpointer.load(cfg.MODEL.WEIGHTS)

        self.transform_gen = T.ResizeShortestEdge(
            [cfg.INPUT.MIN_SIZE_TEST, cfg.INPUT.MIN_SIZE_TEST], cfg.INPUT.MAX_SIZE_TEST
        )

        self.input_format = cfg.INPUT.FORMAT
        # Jamie
        assert self.input_format in ["RGB", "BGR", "BGRT", "BGRTTT", 'UVV', 'UVM', 'BGRTUV', 'BGR_only', 'mid_RGB_out', 'BGRTTT_perturb'], self.input_format
Ejemplo n.º 11
0
def get_model(config_file, weights=None, freeze_at=2):
    cfg = get_cfg()
    cfg.merge_from_file(model_zoo.get_config_file(config_file))
    cfg.MODEL.WEIGHTS = weights
    if weights is None:
        cfg.MODEL.WEIGHTS = model_zoo.get_checkpoint_url(config_file)

    cfg.MODEL.ROI_HEADS.NUM_CLASSES = 80

    cfg.MODEL.BACKBONE.FREEZE_AT = freeze_at

    model = build_model(cfg)
    # model = DefaultTrainer.build_model(cfg)

    # print(model)
    return model.backbone
Ejemplo n.º 12
0
def task_a(model_name, model_file):

    save_path = Path("output/task_a") / model_name
    os.makedirs(save_path, exist_ok=True)
    cfg = base_cfg(model_file, save_path)

    model = build_model(cfg)
    DetectionCheckpointer(model).load(cfg.MODEL.WEIGHTS)

    evaluator = COCOEvaluator("kitti-mots-val",
                              cfg,
                              False,
                              output_dir="./output")
    trainer = DefaultTrainer(cfg)
    trainer.test(cfg, model, evaluators=[evaluator])

    get_qualitative_results(cfg, save_path)
Ejemplo n.º 13
0
def test_detectron2_artifact_pack(detectron2_classifier_class):

    cfg = get_cfg()
    # add project-specific config (e.g., TensorMask)
    # here if you're not running a model in detectron2's core library
    cfg.merge_from_file(
        model_zoo.get_config_file(
            "COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x.yaml"
        )
    )
    cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = 0.5  # set threshold for this model
    # Find a model from detectron2's model zoo.
    # You can use the https://dl.fbaipublicfiles... url as well
    cfg.MODEL.WEIGHTS = model_zoo.get_checkpoint_url(
        "COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x.yaml"
    )
    clone_cfg = cfg.clone()  # cfg can be modified by model
    clone_cfg.MODEL.DEVICE = "cpu"
    model = build_model(clone_cfg)
    model.eval()
    checkpointer = DetectionCheckpointer(model)
    checkpointer.load(cfg.MODEL.WEIGHTS)

    image = imageio.imread('http://images.cocodataset.org/val2017/000000439715.jpg')
    image = image[:, :, ::-1]

    svc = detectron2_classifier_class()
    svc.pack(
        'model',
        model,
        metadata={"device": "cpu"},
        input_model_yaml=model_zoo.get_config_file(
            "COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x.yaml"
        ),
    )
    response = svc.predict(image)
    assert response['scores'][0] > 0.9
    comparison = np.array(response['classes']) == np.array(
        [17, 0, 0, 0, 0, 0, 0, 0, 25, 0, 25, 25, 0, 0, 24]
    )
    assert comparison.all()

    saved_bundle = svc.save()

    svc = load_from_dir(saved_bundle)
    response = svc.predict(image)
Ejemplo n.º 14
0
    def build_model(cls, cfg):
        """
        Returns:
            torch.nn.Module:

        It now calls :func:`detectron2.modeling.build_model`.
        Overwrite it if you'd like a different model.
        """
        logger = logging.getLogger(__name__)
        if not logger.isEnabledFor(
                logging.INFO):  # setup_logger is not called for d2
            setup_logger(cfg.OUTPUT_DIR, name=__name__)

        model = build_model(cfg)
        logger = logging.getLogger(__name__)
        logger.info("Model:\n{}".format(model))
        return model
Ejemplo n.º 15
0
    def __init__(self, config: str, checkpoint: str, conf_threshold: float = 0.5, **kwargs):
        super(DetectronRCNNPredictor).__init__()
        detectron2_cfg = get_cfg()
        detectron2_cfg.merge_from_file(config)
        if checkpoint is not None:
            detectron2_cfg.MODEL.WEIGHTS = checkpoint
        self.model = build_model(detectron2_cfg)
        self.model.eval()

        checkpointer = DetectionCheckpointer(self.model)
        checkpointer.load(detectron2_cfg.MODEL.WEIGHTS)

        self.transform_gen = T.ResizeShortestEdge(
            [detectron2_cfg.INPUT.MIN_SIZE_TEST, detectron2_cfg.INPUT.MIN_SIZE_TEST], detectron2_cfg.INPUT.MAX_SIZE_TEST
        )

        self.conf_threshold = conf_threshold
Ejemplo n.º 16
0
    def __init__(self, cfg):
        self.cfg = cfg.clone()  # cfg can be modified by model
        self.model = build_model(self.cfg)
        self.model.eval()
        self.metadata = MetadataCatalog.get(cfg.DATASETS.TEST[0])

        checkpointer = DetectionCheckpointer(self.model)
        checkpointer.load(cfg.MODEL.WEIGHTS)

        self.pixel_means = np.expand_dims(np.expand_dims(cfg.MODEL.PIXEL_MEAN, axis=0), axis=0)
        self.normalizer = lambda x: (x - self.pixel_means).astype(np.float32)
        self.transform_gen = T.OpenCVResizeShortestEdge(
            [cfg.INPUT.MIN_SIZE_TEST, cfg.INPUT.MIN_SIZE_TEST], cfg.INPUT.MAX_SIZE_TEST
        )

        self.input_format = cfg.INPUT.FORMAT
        assert self.input_format in ["RGB", "BGR"], self.input_format
Ejemplo n.º 17
0
    def __init__(self, cfg):
        """Initialization method.
        Args:
            cfg: The configuration object of PyAnomaly.
        """
        super(Detector, self).__init__()
        auxiliary_cfg = cfg.MODEL.auxiliary.detector
        detector_cfg = get_cfg()
        file_name = auxiliary_cfg.config
        detector_cfg.merge_from_file(model_zoo.get_config_file(file_name))
        detector_cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = 0.5
        detector_cfg.MODEL.ROI_HEADS.NMS_THRESH_TEST = 0.8

        self.det_model = build_model(detector_cfg)

        DetectionCheckpointer(self.det_model).load(auxiliary_cfg.model_path)
        self.det_model.train(False)
Ejemplo n.º 18
0
    def build_model(cls, cfg):
        """
        Returns:
            torch.nn.Module:

        Custom model builder, which deactivates region proposal such that provided ground truth bboxes are used as proposals instead.
        """
        model = build_model(cfg)
        model.proposal_generator = None

        print(
            "Region proposal deactivated, ground truth bounding boxes are used."
        )

        logger = logging.getLogger(__name__)
        logger.info("Model:\n{}".format(model))
        return model
Ejemplo n.º 19
0
    def __init__(self, training_out_path: Path = None,
                 cfg_path: Path = None,
                 model_weigths_path: Path = None,
                 threshold=0.9):
        if training_out_path is not None:
            cfg_path = training_out_path / 'config.yaml' if cfg_path is None else cfg_path
            model_weigths_path = training_out_path / 'model_final.pth' if model_weigths_path is None \
                else model_weigths_path
        cfg = load_cfg_from_file(cfg_path)
        cfg.MODEL.WEIGHTS = str(model_weigths_path)

        self.cfg = cfg
        self.model = build_model(self.cfg).eval()
        checkpointer = DetectionCheckpointer(self.model)
        checkpointer.load(cfg.MODEL.WEIGHTS)

        self.threshold = threshold
Ejemplo n.º 20
0
def predict():

    flag = 0
    for i in range(len(DatasetCatalog.list())):
        if DatasetCatalog.list()[i] == "deep_fashion_test":
            flag = 1
    if flag == 0:
        register_coco_instances(
            "deep_fashion_test", {},
            os.path.join(APP_ROOT, 'data/deepfashion2/annotation_test.json'),
            os.path.join(APP_ROOT, "static"))

    deep_fashion_test_metadata = MetadataCatalog.get("deep_fashion_test")

    deep_fashion_test_dict = DatasetCatalog.get("deep_fashion_test")

    cfg = get_cfg()
    cfg.merge_from_file(
        model_zoo.get_config_file(
            "COCO-Detection/faster_rcnn_R_50_FPN_1x.yaml"))
    cfg.DATASETS.TEST = ("deep_fashion_test", )
    cfg.MODEL.WEIGHTS = model_zoo.get_checkpoint_url(
        "COCO-Detection/faster_rcnn_R_50_FPN_1x.yaml"
    )  # Let training initialize from model zoo
    cfg.MODEL.ROI_HEADS.NUM_CLASSES = 13

    cfg.MODEL.DEVICE = "cpu"
    model = build_model(cfg)

    cfg.MODEL.WEIGHTS = (os.path.join(
        APP_ROOT, 'data/deepfashion2/model_final_clear.pth'))
    cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = 0.7  # set the testing threshold for this model

    predictor = DefaultPredictor(cfg)

    im = cv2.imread(os.path.join(app.config['UPLOAD_FOLDER'], "image.jpg"))
    outputs = predictor(im)
    v = Visualizer(im[:, :, ::-1], deep_fashion_test_metadata, scale=0.5)
    v = v.draw_instance_predictions(outputs["instances"].to("cpu"))
    cv2.imwrite((os.path.join(app.config['UPLOAD_FOLDER'], "prediction.jpg")),
                v.get_image()[:, :, ::-1])
    flash(
        'Успешно! Нажмите Show для просмотра. Нажмите Clear перед следующим использованием детектора.'
    )
    return render_template('index.html', prediction=v.get_image()[:, :, ::-1])
Ejemplo n.º 21
0
def detect(json_dir, video_dir, save_dir):
    starttime = timeit.default_timer()

    Path(save_dir).mkdir(parents=True, exist_ok=True)

    cfgfile = config['detector']['cfgfile']
    weightfile = config['detector']['weightfile']
    confidence_threshold = config['detector']['confidencethreshold']

    cfg = setup_cfg(cfgfile, confidence_threshold)
    model = build_model(cfg)
    DetectionCheckpointer(model).load(weightfile)

    cpu_device = torch.device("cpu")

    class_names = config['detector']['originclassnames']
    cam_datas = get_list_data(json_dir)
    
    for cam_data in cam_datas:
        cam_name = cam_data['camName']
        roi_poly =  Polygon(cam_data['shapes'][0]['points'])

        video_path = os.path.join(video_dir, cam_name + '.mp4')
        video_cap = cv2.VideoCapture(video_path)
        num_frames = int(video_cap.get(cv2.CAP_PROP_FRAME_COUNT))
        
        imgs = []
        for i in tqdm(range(num_frames), desc='Extracting {}'.format(cam_name)):
            success, img = video_cap.read()
            imgs.append(img)

        boxes = detect_detectron2(model, cpu_device, imgs, cam_name, config['detector']['batchsize'])

        # remove bboxes out of MOI
        if config['remove_not_intersec_moi']:
            boxes = [check_intersect_box(box_list, roi_poly) for box_list in boxes]

        if save_dir:
            filepath = os.path.join(save_dir, cam_name)
            boxes = np.array(boxes)
            np.save(filepath, boxes)

    endtime = timeit.default_timer()
    
    print('Detect time: {} seconds'.format(endtime - starttime))
Ejemplo n.º 22
0
	def __init__(self):
		super(detectron, self).__init__()
		self.modelname = "COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x.yaml"
		# self.modelname = "COCO-InstanceSegmentation/mask_rcnn_R_50_C4_1x.yaml"
		self.predictor = None
		self.SCORE_THRESHOLD = 0.7
		self.AREA_FRACTION_THRESHOLD = 0.1

		self.cfg = get_cfg()
		self.cfg.MODEL.DEVICE='cpu'
		self.model = build_model(self.cfg)

		self.cfg.merge_from_file(model_zoo.get_config_file(self.modelname))
		self.cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = 0.5
		self.cfg.MODEL.WEIGHTS = model_zoo.get_checkpoint_url(self.modelname)
		self.predictor = DefaultPredictor(self.cfg)

		print('Predictor loaded...')
Ejemplo n.º 23
0
def main(args):
    cfg = setup(args)

    model = build_model(cfg)
    # logger.info("Model:\n{}".format(model))
    if args.eval_only:
        DetectionCheckpointer(model, save_dir=cfg.OUTPUT_DIR).resume_or_load(
            cfg.MODEL.WEIGHTS, resume=args.resume)
        return do_test(cfg, model)

    distributed = comm.get_world_size() > 1
    if distributed:
        model = DistributedDataParallel(model,
                                        device_ids=[comm.get_local_rank()],
                                        broadcast_buffers=False,
                                        find_unused_parameters=True)
    do_train(cfg, model)
    return do_test(cfg, model)
Ejemplo n.º 24
0
def task_a(model_name, model_file, evaluate=True, visualize=True):
    print('Running task A for model', model_name)

    SAVE_PATH = os.path.join('./results_week_4_task_a', model_name)
    os.makedirs(SAVE_PATH, exist_ok=True)

    # Load model and configuration
    print('Loading Model')
    cfg = get_cfg()
    cfg.merge_from_file(model_zoo.get_config_file(model_file))
    model_training_metadata = MetadataCatalog.get(cfg.DATASETS.TRAIN[0]) # Store current model training metadata
    cfg.DATASETS.TRAIN = ('KITTIMOTS_train', )
    cfg.DATASETS.TEST = ('KITTIMOTS_val', )
    cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = 0.5
    cfg.OUTPUT_DIR = SAVE_PATH
    cfg.MODEL.WEIGHTS = model_zoo.get_checkpoint_url(model_file)

    if evaluate:
        model = build_model(cfg)
        DetectionCheckpointer(model).load(cfg.MODEL.WEIGHTS)

        # Evaluation
        print('Evaluating')
        evaluator = COCOEvaluator('KITTIMOTS_val', cfg, False, output_dir=SAVE_PATH)
        trainer = DefaultTrainer(cfg)
        trainer.test(cfg, model, evaluators=[evaluator])

    if visualize:
        # Qualitative results: visualize some results
        print('Getting qualitative results')
        predictor = DefaultPredictor(cfg)
        inputs = kitti_val()
        inputs = inputs[:20] + inputs[-20:]
        for i, input in enumerate(inputs):
            img = cv2.imread(input['file_name'])
            outputs = predictor(img)
            v = Visualizer(
                img[:, :, ::-1],
                metadata=model_training_metadata,
                scale=0.8,
                instance_mode=ColorMode.IMAGE)
            v = v.draw_instance_predictions(outputs['instances'].to('cpu'))
            cv2.imwrite(os.path.join(SAVE_PATH, 'Inference_' + model_name + '_inf_' + str(i) + '.png'), v.get_image()[:, :, ::-1])
Ejemplo n.º 25
0
    def _test_model(self, config_path, device="cpu"):
        # requires extra dependencies
        from detectron2.export import Caffe2Model, add_export_config, export_caffe2_model

        cfg = get_cfg()
        cfg.merge_from_file(model_zoo.get_config_file(config_path))
        cfg = add_export_config(cfg)
        cfg.MODEL.DEVICE = device

        inputs = [{"image": self._get_test_image()}]
        model = build_model(cfg)
        DetectionCheckpointer(model).load(model_zoo.get_checkpoint_url(config_path))
        c2_model = export_caffe2_model(cfg, model, copy.deepcopy(inputs))

        with tempfile.TemporaryDirectory(prefix="detectron2_unittest") as d:
            c2_model.save_protobuf(d)
            c2_model.save_graph(os.path.join(d, "test.svg"), inputs=copy.deepcopy(inputs))
            c2_model = Caffe2Model.load_protobuf(d)
        c2_model(inputs)[0]["instances"]
Ejemplo n.º 26
0
    def __init__(self, num_classes=1):
        cfg = get_cfg()
        cfg.merge_from_file(
            "/content/detectron2_repo/configs/Misc/cascade_mask_rcnn_X_152_32x8d_FPN_IN5k_gn_dconv.yaml"
        )
        cfg.MODEL.WEIGHTS = "/content/tracking_wo_bnw/model_final.pth"
        cfg.MODEL.MASK_ON = False
        cfg.MODEL.ROI_HEADS.NUM_CLASSES = 1
        cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = 0.0

        self.model = build_model(cfg)
        self.model.eval()
        self.model.cuda()

        self.proposal_generator = self.model.proposal_generator
        self.test_nms_thresh = cfg.MODEL.ROI_HEADS.NMS_THRESH_TEST

        checkpointer = DetectionCheckpointer(self.model)
        checkpointer.load(cfg.MODEL.WEIGHTS)
Ejemplo n.º 27
0
    def __init__(self, cfg):
        self.cfg = cfg.clone()  # cfg can be modified by model
        self.model = build_model(self.cfg)
        self.model.eval()

        print('model device: ', self.model.device)

        self.metadata = MetadataCatalog.get(cfg.DATASETS.TEST[0])

        checkpointer = DetectionCheckpointer(self.model)
        checkpointer.load(cfg.MODEL.WEIGHTS)

        self.input_format = cfg.INPUT.FORMAT
        assert self.input_format in ["RGB", "BGR"], self.input_format

        self.min_size = cfg.INPUT.MIN_SIZE_TEST
        self.max_size = cfg.INPUT.MAX_SIZE_TEST

        self.batch_size = int(cfg.IMAGES_PER_BATCH_TEST)
def main(args):
    cfg = setup(args)

    # import the relation_retinanet as meta_arch, so they will be registered
    from relation_retinanet import RelationRetinaNet

    model = build_model(cfg)
    logger.info("Model:\n{}".format(model))
    if args.eval_only:
        DetectionCheckpointer(model, save_dir=cfg.OUTPUT_DIR).resume_or_load(
            cfg.MODEL.WEIGHTS, resume=args.resume)
        return do_test(cfg, model)
    distributed = comm.get_world_size() > 1
    if distributed:
        model = DistributedDataParallel(model,
                                        device_ids=[comm.get_local_rank()],
                                        broadcast_buffers=False)

    do_train(cfg, model)
Ejemplo n.º 29
0
    def __init__(self, cfg: CfgNode):
        super().__init__()
        self.cfg = cfg
        self.model = build_model(cfg)
        self.storage = None
        # evaluators for validation datasets, split by model tag(default, ema),
        # in the order of DATASETS.TEST
        self.dataset_evaluators = {ModelTag.DEFAULT: []}
        self.save_hyperparameters()
        self.eval_res = None

        self.ema_state: Optional[EMAState] = None
        if cfg.MODEL_EMA.ENABLED:
            self.ema_state = EMAState(
                decay=cfg.MODEL_EMA.DECAY,
                device=cfg.MODEL_EMA.DEVICE or cfg.MODEL.DEVICE,
            )
            self.model_ema = deepcopy(self.model)
            self.dataset_evaluators[ModelTag.EMA] = []
Ejemplo n.º 30
0
def getmodel(threadh=0.7):
    cfg = get_cfg()
    #cfg.merge_from_file("model_config.yaml")
    cfg.merge_from_file(model_zoo.get_config_file("Base-RCNN-FPN.yaml"))
    #cfg.MODEL.WEIGHTS = os.path.join(model_path, "model_final_fix.pth")
    cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = threadh 
    #cfg.MODEL.SCORE_THRESH_TEST = 0.5
    cfg.MODEL.DEVICE='cuda'

    model = build_model(cfg)
    
    DetectionCheckpointer(model).load(os.path.join(model_path, "model_final.pth"))

    #model_dict = torch.load(cfg.MODEL.WEIGHTS, map_location=torch.device(cfg.MODEL.DEVICE))
    #model.load_state_dict(model_dict['model'] )
    model.to(cfg.MODEL.DEVICE)
    model.train(False)
    
    return model