Esempio n. 1
0
    def __init__(self,
                 cfg,
                 instance_mode=ColorMode.SEGMENTATION,
                 parallel=False):
        """
        Args:
            cfg (CfgNode):
            instance_mode (ColorMode):
            parallel (bool): whether to run the model in different processes from visualization.
                Useful since the visualization logic can be slow.
        """
        data_root = "/home/hoangphuc/MASK_RCNN_TOMO/dataset/TOMO_COCO_FORMAT_1"
        register_coco_instances("tomo_blisters", {},
                                data_root + '/annotations.json', data_root)
        self.metadata = MetadataCatalog.get("tomo_blisters")

        self.cpu_device = torch.device("cpu")
        self.instance_mode = instance_mode

        self.parallel = parallel
        if parallel:
            num_gpu = torch.cuda.device_count()
            #self.predictor = AsyncPredictor(cfg, num_gpus=num_gpu)
        else:
            self.predictor = DefaultPredictor(cfg)
Esempio n. 2
0
def prepare_predictor():
    current_path = os.path.abspath(__file__)
    father_path = os.path.abspath(
        os.path.dirname(current_path) + os.path.sep + ".")

    # modelFile = os.path.join(father_path,"models/model20200728.pth")
    #modelFile = os.path.join(father_path,"models/model_6class20200918_final.pth") ### class-6
    modelFile = os.path.join(father_path,
                             "models/model_mvp27class70k_20201015.pth"
                             )  ### class-27;30:model_mvp27class20201014.pth,

    cfgFile = os.path.join(
        father_path,
        "configs/COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x.yaml")

    num_class = len(class_list)
    MetadataCatalog.get(regist_train_name).thing_classes = class_list
    train_metadata = MetadataCatalog.get(regist_train_name)

    # create config
    cfg = get_cfg()
    # below path applies to current installation location of Detectron2
    cfg.merge_from_file(cfgFile)
    cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = 0.2  # set threshold for this model
    cfg.MODEL.ROI_HEADS.NUM_CLASSES = num_class
    cfg.MODEL.WEIGHTS = modelFile
    cfg.MODEL.DEVICE = "cuda"  # we use a GPU
    # cfg.MODEL.DEVICE = "cpu" # we use a GPU

    classes = train_metadata.get("thing_classes", None)
    predictor = DefaultPredictor(cfg)
    print("Predictor has been initialized.")

    return (predictor, classes)
    def __init__(self, instance_mode=ColorMode.IMAGE, parallel=False):
        """
        Args:
            cfg (CfgNode):
            instance_mode (ColorMode):
            parallel (bool): whether to run the model in different processes from visualization.
                Useful since the visualization logic can be slow.
        """
        print("init start\n")

        cfg = get_cfg()
        cfg.merge_from_file(
            "/home/gm/mask_rcnn/detectron2-master/configs/COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x.yaml"
        )
        cfg.merge_from_list([
            'MODEL.WEIGHTS',
            '/home/gm/mask_rcnn/model/facebookresearch_detectron2/model_final_f10217.pkl'
        ])
        # Set score_threshold for builtin models
        cfg.MODEL.RETINANET.SCORE_THRESH_TEST = 0.5
        cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = 0.5
        cfg.MODEL.PANOPTIC_FPN.COMBINE.INSTANCES_CONFIDENCE_THRESH = 0.5
        cfg.freeze()
        self.metadata = MetadataCatalog.get(
            cfg.DATASETS.TEST[0] if len(cfg.DATASETS.TEST) else "__unused")
        self.cpu_device = torch.device("cpu")
        self.instance_mode = instance_mode

        self.parallel = parallel
        self.predictor = DefaultPredictor(cfg)
        print("init finished\n")
Esempio n. 4
0
def test_prepare_predictor():
    from detectron2.engine.defaults import DefaultPredictor
    # current_path = os.getcwd()
    current_path = os.path.abspath(__file__)
    father_path = os.path.abspath(
        os.path.dirname(current_path) + os.path.sep + ".")
    # print("father_path:",father_path)

    modelFile = os.path.join(father_path, 'output/model_final.pth')
    cfgFile = os.path.join(
        father_path,
        "configs/COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x.yaml")

    num_class = len(class_list)
    print(MetadataCatalog.get(regist_train_name))

    MetadataCatalog.get(regist_train_name).thing_classes = class_list

    train_metadata = MetadataCatalog.get(regist_train_name)

    # create config
    cfg = get_cfg()
    # below path applies to current installation location of Detectron2
    cfg.merge_from_file(cfgFile)
    cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = 0.2  # set threshold for this model
    cfg.MODEL.ROI_HEADS.NUM_CLASSES = num_class
    cfg.MODEL.WEIGHTS = modelFile
    # cfg.MODEL.DEVICE = "cuda" # we use a GPU
    cfg.MODEL.DEVICE = "cpu"  # we use a CPU Detectron copy

    classes = train_metadata.get("thing_classes", None)
    predictor = DefaultPredictor(cfg)
    print("Predictor has been initialized.")
    #
    return (predictor, classes)
def prepare_pridctor():

    class_list = ['diaodeng', 'pishafa']  ### 需要自己添加
    train_metadata = 'zc_data'  ### 需要自己添加

    modelFile = "/home/user/qunosen/2_project/0_dvep/1_detectron2/ImageDetectionAPI/d2_object_detection/models/model20200728.pth"

    cfgFile = "/home/user/qunosen/2_project/0_dvep/1_detectron2/ImageDetectionAPI/d2_object_detection/configs/COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x.yaml"

    num_class = len(class_list)
    MetadataCatalog.get(train_metadata).thing_classes = class_list
    train_metadata = MetadataCatalog.get(train_metadata)

    # create config
    cfg = get_cfg()
    # below path applies to current installation location of Detectron2
    cfg.merge_from_file(cfgFile)
    cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = 0.2  # set threshold for this model
    cfg.MODEL.ROI_HEADS.NUM_CLASSES = num_class
    cfg.MODEL.WEIGHTS = modelFile
    cfg.MODEL.DEVICE = "cpu"  # we use a CPU Detectron copy

    classes = train_metadata.get("thing_classes", None)
    predictor = DefaultPredictor(cfg)
    print("Predictor has been initialized.")

    return (predictor, classes)
Esempio n. 6
0
    def __init__(self, cfg, instance_mode=ColorMode.IMAGE, parallel=False):
        """
        :param instance_mode: ColorMode
        :param parallel: whether to run the model in different processes.
        """
        self.metadata = MetadataCatalog.get(
            cfg.DATASETS.TEST[0] if len(cfg.DATASETS.TEST) else "__unused")
        self.cpu_device = torch.device("cpu")
        self.instance_mode = instance_mode

        self.parallel = parallel
        if parallel:
            num_gpu = torch.cuda.device_count()
            self.predictor = AsyncPredictor(cfg, num_gpus=num_gpu)
        else:
            self.predictor = DefaultPredictor(cfg)
Esempio n. 7
0
def prepare_predictor():

    class_list = ['diaodeng', 'pishafa']  ### 需要自己添加
    train_metadata = 'zc_data'  ### 需要自己添加

    # current_path = os.getcwd()
    current_path = os.path.abspath(__file__)
    father_path = os.path.abspath(
        os.path.dirname(current_path) + os.path.sep + ".")
    # print("father_path:",father_path)

    modelFile = os.path.join(father_path, "models/model20200728.pth")
    cfgFile = os.path.join(
        father_path,
        "configs/COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x.yaml")

    num_class = len(class_list)
    MetadataCatalog.get(train_metadata).thing_classes = class_list
    train_metadata = MetadataCatalog.get(train_metadata)

    # create config
    cfg = get_cfg()
    # below path applies to current installation location of Detectron2
    cfg.merge_from_file(cfgFile)
    cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = 0.2  # set threshold for this model
    cfg.MODEL.ROI_HEADS.NUM_CLASSES = num_class
    cfg.MODEL.WEIGHTS = modelFile
    cfg.MODEL.DEVICE = "cuda"  # we use a CPU Detectron copy

    classes = train_metadata.get("thing_classes", None)
    predictor = DefaultPredictor(cfg)
    print("Predictor has been initialized.")

    return (predictor, classes)
def main():
    os.environ["CUDA_VISIBLE_DEVICES"] = "2"
    cfg = setup()
    model = build_model(cfg)
    # logger.info("Model:\n{}".format(model))
    DetectionCheckpointer(model, save_dir=cfg.OUTPUT_DIR).resume_or_load(
        cfg.MODEL.WEIGHTS)

    predictor = DefaultPredictor(cfg)
    # thresholds_for_classes = 0.7
    im_names = glob.glob(osp.join(images_dir, '*.tif'))
    # num_devices = get_world_size()
    with inference_context(model), torch.no_grad():
        for im_name in tqdm(im_names):
            im = Image.open(im_name)
            w, h = im.size
            # imSize = (h, w)
            # mode = im.mode
            im_data = np.array(im)
            # Convert RGB to BGR
            open_cv_image = im_data[:, :, ::-1].copy()
            output = predictor(open_cv_image)
            v = Visualizer(im_data, scale=0.5)
            out = v.draw_instance_predictions(output["instances"].to("cpu"))
            cv2.namedWindow("ins")
            cv2.imshow("ins", out.get_image()[:, :, ::-1])
            cv2.waitKey(1000)
Esempio n. 9
0
def get_predictor():
    cfg = get_cfg()
    cfg.merge_from_file(
        'configs/detectron/COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x.yaml'
    )
    cfg.MODEL.DEVICE = 'cuda'
    cfg.MODEL.WEIGHTS = "detectron2://COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x/137849600/model_final_f10217.pkl"
    return DefaultPredictor(cfg)
Esempio n. 10
0
 def run(self):
     predictor = DefaultPredictor(self.cfg)
     while True:
         task = self.task_queue.get()
         if isinstance(task, AsyncPredictor._StopToken):
             break
         idx, data = task
         result = predictor(data)
         self.result_queue.put(idx, result)
Esempio n. 11
0
 def __init__(self, cfg, instance_mode=ColorMode.IMAGE, parallel=False):
     """
     Args:
         cfg (CfgNode):
         instance_mode (ColorMode):
         parallel (bool): whether to run the model in different processes from visualization.
             Useful since the visualization logic can be slow.
     """
     self.metadata = MetadataCatalog.get("baseball_setup_faster")
     self.cpu_device = torch.device("cpu")
     self.instance_mode = instance_mode
     self.parallel = parallel
     if parallel:
         num_gpu = torch.cuda.device_count()
         self.predictor = AsyncPredictor(cfg, num_gpus=num_gpu)
     else:
         self.predictor = DefaultPredictor(cfg)
     self.predictor = DefaultPredictor(cfg)
Esempio n. 12
0
    def init(self, config_file, params_dict=None):
        """
        Initialize the model for panoptic segmentation.
        
        config_file:
            The path of the configuration file that contains the necessary
            parameters to iniialize the segmentation network (the formats, 
            e.g. YAML, Json and XML etc., are recomended).
        params_dict:
            The necessary parameters to initialize the project. It is in the 
            type of dictionary as follows:
            {
              gpu_id: [-1], # the gpu id (a list of Integers), -1 means using CPU.
              model_path: ['/home/yourmodelpath', ..., ''], # a list of strings.
              reserved: {}  # other necessary parameters.
            }
            NOTE: If overlapped parameters are existed in the configuration file 
            and the variable of params_dict, the latter (params_dict) one will 
            be used.
        
        return: 
            error code: 0 for success; a negative number for the ERROR type.
        """
        # Input checking.
        if not os.path.exists(config_file):
            return self._isee_errors['no_such_file']

        # (user custom code START)
        # Load parameters. 
        # An EXAMPLE using detectron2.
        # Prediction method
        self._SEGMENT_METHOD = params_dict['reserved']['method']
        # Device type
        device_type = params_dict['gpu_id'][0]
        if device_type < 0:
            device = 'cpu'
        else:
            device = 'cuda:{}'.format(params_dict['gpu_id'][0]) # Only one gpu mode is supported
        # Set parameters
        cfg = get_cfg()
        cfg.merge_from_file(config_file)
        cfg.MODEL.DEVICE = device
        cfg.MODEL.WEIGHTS = params_dict['model_path'][0]
        cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = params_dict['reserved']['roi_threshold']
        cfg.MODEL.PANOPTIC_FPN.COMBINE.INSTANCES_CONFIDENCE_THRESH = \
            params_dict['reserved']['panoptic_threshold']
        cfg.freeze()
        # For showing the segmentation results.
        metadata = MetadataCatalog.get(
          cfg.DATASETS.TEST[0] if len(cfg.DATASETS.TEST) else "__unused"
        )
        self._metadata = metadata
        # Create predictor
        self._predictor = DefaultPredictor(cfg)
        # (user custom code END)

        return self._isee_errors['success']
Esempio n. 13
0
    def __init__(self, model_data_dir):
        cfg = get_cfg()
        yaml_path = os.path.abspath(
            os.path.join(file_root, "../..", keypoints_yaml))
        cfg.merge_from_file(yaml_path)
        weights = os.path.join(model_data_dir, keypoints_weights)
        cfg.MODEL.WEIGHTS = weights
        cfg.freeze()

        self.predictor = DefaultPredictor(cfg)
Esempio n. 14
0
    def __init__(self, cfg, parallel=False):
        """
        Args:
            cfg (CfgNode):
            instance_mode (ColorMode):
            parallel (bool): whether to run the model in different processes from visualization.
                Useful since the visualization logic can be slow.
        """
        self.metadata = MetadataCatalog.get(
            cfg.DATASETS.TEST[0] if len(cfg.DATASETS.TEST) else "__unused")
        self.cpu_device = torch.device("cpu")
        #self.instance_mode = instance_mode

        self.parallel = parallel
        if parallel:
            num_gpu = torch.cuda.device_count()
            self.predictor = AsyncPredictor(cfg, num_gpus=num_gpu)
        else:
            self.predictor = DefaultPredictor(cfg)
def model_fn(model_dir):
    model = DefaultPredictor(cfg)
    try:
        model.model.load_state_dict(
            torch.load('/opt/ml/model/model_final.pth')['model'])  #
    except:
        model.model.load_state_dict(
            torch.load('/opt/ml/model/opt/ml/model/model_final.pth',
                       map_location='cpu')["model"])
    return model
Esempio n. 16
0
def init_densepose():
    print(f"Loading config from {config_path}")
    print(f"Loading model from {model_path}")
    opts = []
    global cfg
    cfg = setup_config(config_path, model_path, opts)
    global predictor
    predictor = DefaultPredictor(cfg)
    global context
    context = create_context()
Esempio n. 17
0
def d2_run():

    train_metadata, val_metadata = register_dataset()
    cfg = set_up()
    trainer = DefaultTrainer(cfg)
    trainer.resume_or_load(resume=False)
    trainer.train()

    output_dir = cfg['OUTPUT_DIR']
    save_dir = os.path.join(output_dir, 'result')
    if not os.path.exists(save_dir): os.makedirs(save_dir)

    cfg.MODEL.WEIGHTS = os.path.join(output_dir, "model_final.pth")
    cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = 0.2  # set the testing threshold for this model
    cfg.DATASETS.TEST = (regist_val_name, )

    predictor = DefaultPredictor(cfg)

    test_dir = val_images_dir
    imgs_list = [
        os.path.join(test_dir, file_name) for file_name in os.listdir(test_dir)
        if file_name.endswith(".jpg") or file_name.endswith(".png")
        or file_name.endswith(".bmp") or file_name.endswith(".jpeg")
    ]

    for d in imgs_list:
        im = cv2.imread(d)
        outputs = predictor(im)
        a = outputs["instances"].pred_classes.data.cpu().numpy().tolist(
        ) if outputs["instances"].get_fields()["pred_boxes"] else None
        print(a)

        v = Visualizer(im[:, :, ::-1],
                       metadata=train_metadata,
                       scale=0.9,
                       instance_mode=ColorMode.IMAGE_BW)
        v = v.draw_instance_predictions(outputs["instances"].to("cpu"))
        predict_file = os.path.join(
            save_dir,
            os.path.splitext(os.path.basename(d))[0] + "_predict.png")

        cv2.imwrite(predict_file, v.get_image()[:, :, ::-1])

        if os.path.exists(predict_file): print("Done: %s" % predict_file)

    ### evaluate
    evaluator = COCOEvaluator(regist_val_name,
                              cfg,
                              False,
                              output_dir="./output/")
    val_loader = build_detection_test_loader(cfg, regist_val_name)
    my_eval = inference_on_dataset(trainer.model, val_loader, evaluator)
    print(my_eval)
    log = ("%s evaluate: \n" % (model_name), my_eval)
    print(log, file=open(log_file, "a"))
Esempio n. 18
0
 def __init__(self, cfg, args, instance_mode=ColorMode.IMAGE, parallel=False):
     """
     Args:
         cfg (CfgNode):
         instance_mode (ColorMode):
         parallel (bool): whether to run the model in different processes from visualization.
             Useful since the visualization logic can be slow.
     """
     self.cpu_device = torch.device("cpu")
     self.instance_mode = instance_mode
     self.draw_proposals = args.draw_proposals
     self.thresh = args.confidence_threshold
     self.parallel = parallel
     if parallel:
         num_gpu = torch.cuda.device_count()
         self.predictor = AsyncPredictor(cfg, num_gpus=num_gpu)
     else:
         self.predictor = DefaultPredictor(cfg)
     
     self._init_visualization_metadata(cfg, args)
Esempio n. 19
0
def get_predictor(config, model_weights, dataset, properties, things):
    cfg = get_cfg()
    add_property_config(cfg)
    cfg.merge_from_file(os.path.join(dirname, config))
    cfg.MODEL.WEIGHTS = model_weights
    DatasetCatalog.clear()
    DatasetCatalog.register(dataset, get_dicts)
    MetadataCatalog.get(dataset).set(thing_classes=things)
    MetadataCatalog.get(dataset).set(property_classes=properties)
    cfg.DATASETS.TEST = (dataset,)
    return DefaultPredictor(cfg)
Esempio n. 20
0
    def __init__(self,
                 cfg_object,
                 cfg_keypoint,
                 instance_mode=ColorMode.IMAGE):
        """
        Args:
            cfg (CfgNode):
            instance_mode (ColorMode):
            parallel (bool): whether to run the model in different processes from visualization.
                Useful since the visualization logic can be slow.
        """
        self.metadata_object = MetadataCatalog.get("__unused")

        self.metadata_keypoint = MetadataCatalog.get(
            cfg_keypoint.DATASETS.TEST[0] if len(cfg_keypoint.DATASETS.TEST
                                                 ) else "__unused")

        self.cpu_device = torch.device("cpu")
        self.instance_mode = instance_mode

        self.predictor_object = DefaultPredictor(cfg_object)
        self.predictor_keypoint = DefaultPredictor(cfg_keypoint)

        self.head_pose_module = module_init(cfg_keypoint)
        self.mtcnn = MTCNN()
        self.transformations = transforms.Compose([transforms.Resize(224), \
                                        transforms.CenterCrop(224), transforms.ToTensor(), \
                                        transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])])
        self.softmax = nn.Softmax(dim=1).cuda()

        idx_tensor = [idx for idx in range(66)]
        self.idx_tensor = torch.FloatTensor(idx_tensor).cuda()
        self.data_json = {}
        self.data_json['object_detection'] = {}
        self.data_json['keypoint_detection'] = {}
        self.data_json['head_pose_estimation'] = {}
        self.frame_count = 0

        self.mlp_model = MLP(input_size=26, output_size=1).cuda()
        self.mlp_model.load_state_dict(torch.load(cfg_keypoint.MLP.PRETRAINED))
        self.mlp_model.eval()
    def init(self, config_file, params_dict=None):
        """
        Initialize the densepose estimation model.
        
        config_file:
            The path of the configuration file that contains the necessary
            parameters to iniialize the prediction network (the formats, e.g. 
            YAML, Json and XML etc,, are recomended).
        params_dict:
            The necessary parameters to initialize the project. It is in the 
            type of dictionary as follows:
            {
              gpu_id: [-1], # the gpu id (a list of Integers), -1 means using CPU.
              model_path: ['/home/yourmodelpath', ..., ''], # a list of strings.
              reserved: {}  # other necessary parameters.
            }
            NOTE: If overlapped parameters are existed in the configuration file 
            and the variable of params_dict, the latter (params_dict) one will 
            be used.
        
        return: 
            error code: 0 for success; a negative number for the ERROR type.
        """
        # Input checking.
        if not os.path.exists(config_file):
            return self._isee_errors['no_such_file']

        # (user custom code START)
        # Load parameters. 
        # An EXAMPLE using detectron2.
        # set the method for densepose
        self._ESTIMATION_METHOD = params_dict['reserved']['method']
        # Device type
        device_type = params_dict['gpu_id'][0]
        if device_type < 0: # Only gpu is supported for densepose.
            device = 'cpu'
            return self._isee_errors['bad_device_id']
        else:
            device = 'cuda:{}'.format(params_dict['gpu_id'][0]) # Only one gpu mode is supported
        # Set parameters
        cfg = get_cfg()
        add_densepose_config(cfg)
        cfg.merge_from_file(config_file)
        cfg.MODEL.DEVICE = device
        cfg.MODEL.WEIGHTS = params_dict['model_path'][0]
        cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = params_dict['reserved']['roi_threshold']
        cfg.freeze()
        self._cfg = cfg # For showing the estimation results.
        # Create estimater
        self._estimater = DefaultPredictor(cfg)
        # (user custom code END)

        return self._isee_errors['success']
Esempio n. 22
0
 def execute(cls: type, args: argparse.Namespace):
     predictor = DefaultPredictor(cfg)
     file_list = [args.input]
     context = cls.create_context(args)
     for file_name in file_list:
         img = file_name
         # img = read_image(file_name, format="BGR")  # predictor expects BGR image.
         with torch.no_grad():
             outputs = predictor(img)["instances"]
             out_binary = cls.execute_on_outputs(context, {"file_name": file_name, "image": img}, outputs)
     cls.postexecute(context)
     return out_binary
Esempio n. 23
0
    def __init__(self, cfg, instance_mode=ColorMode.IMAGE, parallel=False):
        """
        Args:
            cfg (CfgNode):
            instance_mode (ColorMode):
            parallel (bool): whether to run the model in different processes from visualization.
                Useful since the visualization logic can be slow.
        """
        self.metadata = MetadataCatalog.get(
            cfg.DATASETS.TEST[0] if len(cfg.DATASETS.TEST) else "__unused"
        )
        self.metadata.thing_classes = ["Aluminium foil", "Can", "Carton", "Cup", "Glass bottle", "Metal bottle cap", "Other", "Paper", "Plastic bottle", "Plastic bottle cap", "Plastic container", "Plastic film", "Plastic lid", "Pop tab", "Straw", "Styrofoam piece", "Wrapper"]
        self.cpu_device = torch.device("cpu")
        self.instance_mode = instance_mode

        self.parallel = parallel
        if parallel:
            num_gpu = torch.cuda.device_count()
            self.predictor = AsyncPredictor(cfg, num_gpus=num_gpu)
        else:
            self.predictor = DefaultPredictor(cfg)
Esempio n. 24
0
    def __init__(self,
                 cfg,
                 arg_metadata=None,
                 instance_mode=ColorMode.IMAGE,
                 parallel=False):
        """
        Args:
            cfg (CfgNode):
            arg_metadata (Metadata): Metadata in Metadata format (not json format)
            instance_mode (ColorMode):
            parallel (bool): whether to run the model in different processes from visualization.
                Useful since the visualization logic can be slow.
        """
        print("According to the config we have " +
              str(cfg.MODEL.ROI_HEADS.NUM_CLASSES) + " classes.")

        #i need to add this metadata stuff according to https://github.com/facebookresearch/detectron2/issues/326 and https://github.com/facebookresearch/detectron2/issues/101
        if (arg_metadata is None):  #default value for COCO
            self.metadata = MetadataCatalog.get(
                cfg.DATASETS.TEST[0] if len(cfg.DATASETS.TEST) else "__unused")
            print("I use the the default metadata which is:")
            print(MetadataCatalog.get(cfg.DATASETS.TEST[0]))
            #cfg.DATASETS.TRAIN is ('coco_2017_train',)
            #cfg.DATASETS.TEST[0] is coco_2017_val
            #MetadataCatalog.get(cfg.DATASETS.TEST[0]) is Metadata(evaluator_type='coco', image_root='datasets/coco/val2017', json_file='datasets/coco/annotations/instances_val2017.json', name='coco_2017_val', thing_classes=['person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train', 'truck', 'boat', 'traffic light', 'fire hydrant', 'stop sign', 'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', 'cow', 'elephant', 'bear', 'zebra', 'giraffe', 'backpack', 'umbrella', 'handbag', 'tie', 'suitcase', 'frisbee', 'skis', 'snowboard', 'sports ball', 'kite', 'baseball bat', 'baseball glove', 'skateboard', 'surfboard', 'tennis racket', 'bottle', 'wine glass', 'cup', 'fork', 'knife', 'spoon', 'bowl', 'banana', 'apple', 'sandwich', 'orange', 'broccoli', 'carrot', 'hot dog', 'pizza', 'donut', 'cake', 'chair', 'couch', 'potted plant', 'bed', 'dining table', 'toilet', 'tv', 'laptop', 'mouse', 'remote', 'keyboard', 'cell phone', 'microwave', 'oven', 'toaster', 'sink', 'refrigerator', 'book', 'clock', 'vase', 'scissors', 'teddy bear', 'hair drier', 'toothbrush'], thing_colors=[[220, 20, 60], [119, 11, 32], [0, 0, 142], [0, 0, 230], [106, 0, 228], [0, 60, 100], [0, 80, 100], [0, 0, 70], [0, 0, 192], [250, 170, 30], [100, 170, 30], [220, 220, 0], [175, 116, 175], [250, 0, 30], [165, 42, 42], [255, 77, 255], [0, 226, 252], [182, 182, 255], [0, 82, 0], [120, 166, 157], [110, 76, 0], [174, 57, 255], [199, 100, 0], [72, 0, 118], [255, 179, 240], [0, 125, 92], [209, 0, 151], [188, 208, 182], [0, 220, 176], [255, 99, 164], [92, 0, 73], [133, 129, 255], [78, 180, 255], [0, 228, 0], [174, 255, 243], [45, 89, 255], [134, 134, 103], [145, 148, 174], [255, 208, 186], [197, 226, 255], [171, 134, 1], [109, 63, 54], [207, 138, 255], [151, 0, 95], [9, 80, 61], [84, 105, 51], [74, 65, 105], [166, 196, 102], [208, 195, 210], [255, 109, 65], [0, 143, 149], [179, 0, 194], [209, 99, 106], [5, 121, 0], [227, 255, 205], [147, 186, 208], [153, 69, 1], [3, 95, 161], [163, 255, 0], [119, 0, 170], [0, 182, 199], [0, 165, 120], [183, 130, 88], [95, 32, 0], [130, 114, 135], [110, 129, 133], [166, 74, 118], [219, 142, 185], [79, 210, 114], [178, 90, 62], [65, 70, 15], [127, 167, 115], [59, 105, 106], [142, 108, 45], [196, 172, 0], [95, 54, 80], [128, 76, 255], [201, 57, 1], [246, 0, 122], [191, 162, 208]], thing_dataset_id_to_contiguous_id={1: 0, 2: 1, 3: 2, 4: 3, 5: 4, 6: 5, 7: 6, 8: 7, 9: 8, 10: 9, 11: 10, 13: 11, 14: 12, 15: 13, 16: 14, 17: 15, 18: 16, 19: 17, 20: 18, 21: 19, 22: 20, 23: 21, 24: 22, 25: 23, 27: 24, 28: 25, 31: 26, 32: 27, 33: 28, 34: 29, 35: 30, 36: 31, 37: 32, 38: 33, 39: 34, 40: 35, 41: 36, 42: 37, 43: 38, 44: 39, 46: 40, 47: 41, 48: 42, 49: 43, 50: 44, 51: 45, 52: 46, 53: 47, 54: 48, 55: 49, 56: 50, 57: 51, 58: 52, 59: 53, 60: 54, 61: 55, 62: 56, 63: 57, 64: 58, 65: 59, 67: 60, 70: 61, 72: 62, 73: 63, 74: 64, 75: 65, 76: 66, 77: 67, 78: 68, 79: 69, 80: 70, 81: 71, 82: 72, 84: 73, 85: 74, 86: 75, 87: 76, 88: 77, 89: 78, 90: 79})
        else:  #custom metadata: this is my adaption so we can use our own classes for trained model on demo.py
            self.metadata = arg_metadata  #this is a dict that already includes name, thing_classes etc.
            print("I use the given metadata which is:")
            print(self.metadata)
            #self.metadata is Metadata(name='Custom_Audi_A2D2_Dataset_Training', thing_classes=['Animal', 'Bicycle', 'Bus', 'Car', 'Cyclist', 'EmergencyVehicle', 'MotorBiker', 'Motorcycle', 'Pedestrian', 'Truck', 'UtilityVehicle', 'VanSUV', 'Misc'])

        self.cpu_device = torch.device("cpu")
        self.instance_mode = instance_mode

        self.parallel = parallel
        if parallel:
            num_gpu = torch.cuda.device_count()
            self.predictor = AsyncPredictor(cfg, num_gpus=num_gpu)
        else:
            self.predictor = DefaultPredictor(cfg)
Esempio n. 25
0
    def __init__(self, cfg, instance_mode=ColorMode.IMAGE, parallel=False):
        """
        Args:
            cfg (CfgNode):
            instance_mode (ColorMode):
            parallel (bool): whether to run the model in different processes from visualization.
                Useful since the visualization logic can be slow.
        """
        DatasetCatalog.register("pfallcnt_pred", lambda d: [])
        MetadataCatalog.get("pfallcnt_pred").set(thing_classes=["0", "1"],
                                                 thing_colors=[(0, 255, 0),
                                                               (255, 0, 0)])
        self.metadata = MetadataCatalog.get("pfallcnt_pred")
        self.cpu_device = torch.device("cpu")
        self.instance_mode = instance_mode

        self.parallel = parallel
        if parallel:
            num_gpu = torch.cuda.device_count()
            self.predictor = AsyncPredictor(cfg, num_gpus=num_gpu)
        else:
            self.predictor = DefaultPredictor(cfg)
Esempio n. 26
0
def prepare_predictor(threshold=0.5):
    print('prepare predictor...')
    cfg = get_cfg()
    add_densepose_config(cfg)
    model_file = './projects/DensePose/configs/densepose_rcnn_R_101_FPN_DL_s1x.yaml'
    model_weights = 'https://dl.fbaipublicfiles.com/densepose/densepose_rcnn_R_101_FPN_DL_s1x/165712116/model_final_844d15.pkl'
    cfg.merge_from_file(model_file)
    cfg.MODEL.WEIGHTS = model_weights
    cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = threshold
    cfg.freeze()
    predictor = DefaultPredictor(cfg)
    # print(cfg.dump())

    return predictor
Esempio n. 27
0
    def __init__(self, cfg, instance_mode=ColorMode.IMAGE, parallel=False):
        """
        Args:
            cfg (CfgNode):
            instance_mode (ColorMode):
            parallel (bool): whether to run the model in different processes from visualization.
                Useful since the visualization logic can be slow.
        """
        self.metadata = MetadataCatalog.get("__unused")
        unified_label_file = json.load(
            open(cfg.MULTI_DATASET.UNIFIED_LABEL_FILE))
        self.metadata.thing_classes = [
            '{}'.format([xx for xx in x['name'].split('_') if xx != ''][0]) \
                for x in unified_label_file['categories']]
        self.cpu_device = torch.device("cpu")
        self.instance_mode = instance_mode

        self.parallel = parallel
        if parallel:
            num_gpu = torch.cuda.device_count()
            self.predictor = AsyncPredictor(cfg, num_gpus=num_gpu)
        else:
            self.predictor = DefaultPredictor(cfg)
Esempio n. 28
0
    def __init__(self,
                 config_file="E:/DL/det2/YJH/faster_rcnn_R_50_FPN_1x.yaml",
                 thres=0.5,
                 class_names=["nc", "pgw"]):
        self.class_names = class_names
        self.cpu_device = torch.device("cpu")
        cfg = setup_cfg(config_file, thres)
        self.predictor = DefaultPredictor(cfg)
        print("Init Model: ", config_file, cfg.MODEL.WEIGHTS, thres,
              class_names)

        self.image = None
        self.font_size = 0.5
        self.font_bold = 2
 def __init__(self, cfg, instance_mode=ColorMode.IMAGE, parallel=False):
     """
     Args:
         cfg (CfgNode):
         instance_mode (ColorMode):
         parallel (bool): whether to run the model in different processes from visualization.
             Useful since the visualization logic can be slow.
     """
     self.metadata = MetadataCatalog.get(
         "baseball_scoreboard"
     )
     self.cpu_device = torch.device("cpu")
     self.instance_mode = instance_mode
     self.parallel = parallel
     self.predictor = DefaultPredictor(cfg)
Esempio n. 30
0
 def inference(self, image):
     predictor = DefaultPredictor(self.cfg)
     outputs = predictor(image)
     instances = outputs['instances']
     if len(instances) == 0:
         return None
     confident_detections = instances[instances.scores == max(
         instances.scores)]
     box = np.asarray(
         confident_detections.pred_boxes.tensor.cpu().numpy()[0], dtype=int)
     (h, w) = image.shape[:2]
     (start_x, start_y, end_x, end_y) = box
     (start_x, start_y) = (max(0, start_x), max(0, start_y))
     (end_x, end_y) = (min(w - 1, end_x), min(h - 1, end_y))
     cropped = image[start_x:end_x, start_y:end_y]
     return cropped