def main():
    parser = argparse.ArgumentParser(
        description="PyTorch Object Detection Webcam Demo")
    parser.add_argument(
        "--config-file",
        default="configs/caffe2/e2e_mask_rcnn_X_101_32x8d_FPN_1x_caffe2.yaml",
        metavar="FILE",
        help="path to config file",
    )
    parser.add_argument(
        "--confidence-threshold",
        type=float,
        default=0.6,
        help="Minimum score for the prediction to be shown",
    )
    parser.add_argument(
        "--min-image-size",
        type=int,
        default=256,
        help="Smallest size of the image to feed to the model. "
        "Model was trained with 800, which gives best results",
    )
    parser.add_argument(
        "--show-mask-heatmaps",
        dest="show_mask_heatmaps",
        help="Show a heatmap probability for the top masks-per-dim masks",
        action="store_true",
    )
    parser.add_argument(
        "--masks-per-dim",
        type=int,
        default=2,
        help="Number of heatmaps per dimension to show",
    )
    parser.add_argument(
        "opts",
        help="Modify model config options using the command-line",
        default=None,
        nargs=argparse.REMAINDER,
    )
    parser.add_argument("--svo-filename",
                        help="Optional SVO input filepath",
                        default=None)

    args = parser.parse_args()

    # load config from file and command-line arguments
    cfg.merge_from_file(args.config_file)
    cfg.merge_from_list(args.opts)
    cfg.freeze()

    # prepare object that handles inference plus adds predictions on top of image
    coco_demo = COCODemo(
        cfg,
        confidence_threshold=args.confidence_threshold,
        show_mask_heatmaps=args.show_mask_heatmaps,
        masks_per_dim=args.masks_per_dim,
        min_image_size=args.min_image_size,
    )

    init_cap_params = sl.InitParameters()
    if args.svo_filename:
        print("Loading SVO file " + args.svo_filename)
        init_cap_params.set_from_svo_file(args.svo_filename)
        init_cap_params.svo_real_time_mode = True
    init_cap_params.camera_resolution = sl.RESOLUTION.HD720
    init_cap_params.depth_mode = sl.DEPTH_MODE.ULTRA
    init_cap_params.coordinate_units = sl.UNIT.METER
    init_cap_params.depth_stabilization = True
    init_cap_params.camera_image_flip = sl.FLIP_MODE.AUTO
    init_cap_params.coordinate_system = sl.COORDINATE_SYSTEM.RIGHT_HANDED_Y_UP

    cap = sl.Camera()
    if not cap.is_opened():
        print("Opening ZED Camera...")
    status = cap.open(init_cap_params)
    if status != sl.ERROR_CODE.SUCCESS:
        print(repr(status))
        exit()

    display = True
    runtime = sl.RuntimeParameters()
    left = sl.Mat()
    ptcloud = sl.Mat()
    depth_img = sl.Mat()
    depth = sl.Mat()

    res = sl.Resolution(1280, 720)

    py_transform = sl.Transform(
    )  # First create a Transform object for TrackingParameters object
    tracking_parameters = sl.PositionalTrackingParameters(
        init_pos=py_transform)
    tracking_parameters.set_as_static = True
    err = cap.enable_positional_tracking(tracking_parameters)
    if err != sl.ERROR_CODE.SUCCESS:
        exit(1)

    running = True
    keep_people_only = True

    if coco_demo.cfg.MODEL.MASK_ON:
        print("Mask enabled!")
    if coco_demo.cfg.MODEL.KEYPOINT_ON:
        print("Keypoints enabled!")

    while running:
        start_time = time.time()
        err_code = cap.grab(runtime)
        if err_code != sl.ERROR_CODE.SUCCESS:
            break

        cap.retrieve_image(left, sl.VIEW.LEFT, resolution=res)
        cap.retrieve_image(depth_img, sl.VIEW.DEPTH, resolution=res)
        cap.retrieve_measure(depth, sl.MEASURE.DEPTH, resolution=res)
        cap.retrieve_measure(ptcloud, sl.MEASURE.XYZ, resolution=res)
        ptcloud_np = np.array(ptcloud.get_data())

        img = cv2.cvtColor(left.get_data(), cv2.COLOR_RGBA2RGB)
        prediction = coco_demo.select_top_predictions(
            coco_demo.compute_prediction(img))

        # Keep people only
        if keep_people_only:
            labels_tmp = prediction.get_field("labels")
            people_coco_label = 1
            keep = torch.nonzero(labels_tmp == people_coco_label).squeeze(1)
            prediction = prediction[keep]

        composite = img.copy()
        humans_3d = None
        masks_3d = None
        if coco_demo.show_mask_heatmaps:
            composite = coco_demo.create_mask_montage(composite, prediction)
        composite = coco_demo.overlay_boxes(composite, prediction)
        if coco_demo.cfg.MODEL.MASK_ON:
            masks_3d = get_masks3d(prediction, depth)
            composite = coco_demo.overlay_mask(composite, prediction)
        if coco_demo.cfg.MODEL.KEYPOINT_ON:
            # Extract 3D skeleton from the ZED depth
            humans_3d = get_humans3d(prediction, ptcloud_np)
            composite = coco_demo.overlay_keypoints(composite, prediction)
        if True:
            overlay_distances(prediction, get_boxes3d(prediction, ptcloud_np),
                              composite, humans_3d, masks_3d)
            composite = coco_demo.overlay_class_names(composite, prediction)

        print(" Time: {:.2f} s".format(time.time() - start_time))

        if display:
            cv2.imshow("COCO detections", composite)
            cv2.imshow("ZED Depth", depth_img.get_data())
            key = cv2.waitKey(10)
            if key == 27:
                break  # esc to quit
Esempio n. 2
0
def main():
    parser = argparse.ArgumentParser(
        description="PyTorch Object Detection Inference")
    parser.add_argument(
        "--config-file",
        default=
        "/private/home/fmassa/github/detectron.pytorch_v2/configs/e2e_faster_rcnn_R_50_C4_1x_caffe2.yaml",
        metavar="FILE",
        help="path to config file",
    )
    parser.add_argument("--local_rank", type=int, default=0)
    parser.add_argument(
        "--ckpt",
        help=
        "The path to the checkpoint for test, default is the latest checkpoint.",
        default=None,
    )
    parser.add_argument(
        "opts",
        help="Modify config options using the command-line",
        default=None,
        nargs=argparse.REMAINDER,
    )

    args = parser.parse_args()

    config_file = args.config_file  #"../configs/caffe2/e2e_mask_rcnn_R_50_FPN_1x_caffe2.yaml"

    # update the config options with the config file
    cfg.merge_from_file(config_file)
    # manual override some options
    cfg.merge_from_list(["MODEL.DEVICE", "cpu"])
    cfg.merge_from_list(args.opts)

    cfg.freeze()

    for conf_thresh in [0.1, 0.3, 0.5, 0.7, 0.9]:
        coco_demo = COCODemo(
            cfg,
            min_image_size=800,
            confidence_threshold=conf_thresh,
        )

        paths_catalog = import_file("maskrcnn_benchmark.config.paths_catalog",
                                    cfg.PATHS_CATALOG, True)
        DatasetCatalog = paths_catalog.DatasetCatalog
        for dataset_name in cfg.DATASETS.TEST:

            print(dataset_name)
            dataset = DatasetCatalog.get(dataset_name)
            # print(dataset)
            # print(len(dataset))
            print(dataset)

            dataset = FolderDataset(dataset['args']['data_dir'],
                                    dataset['args']['split'])
            COCODemo.CATEGORIES = dataset.CLASSES
            for image, target, index in tqdm(dataset):

                image_name = dataset.img_files[index].split("/")[-1]

                image = np.array(image)

                all_labels = [
                    coco_demo.CATEGORIES[i]
                    for i in target.get_field("labels").tolist()
                ]
                if len(all_labels) > 1:
                    print(all_labels)

                ### GROND TRUTH
                result = image.copy()
                if coco_demo.show_mask_heatmaps:
                    return coco_demo.create_mask_montage(result, target)
                result = coco_demo.overlay_boxes(result, target)
                # result = coco_demo.overlay_boxes(result, target)
                if coco_demo.cfg.MODEL.MASK_ON:
                    result = coco_demo.overlay_mask(result, target)
                if coco_demo.cfg.MODEL.KEYPOINT_ON:
                    result = coco_demo.overlay_keypoints(result, target)
                # result = coco_demo.overlay_class_names(result, top_predictions)

                labels = [
                    coco_demo.CATEGORIES[i]
                    for i in target.get_field("labels").tolist()
                ]
                boxes = target.bbox

                for box, label in zip(boxes, labels):
                    x, y = box[:2]
                    cv2.putText(result, label, (x, y),
                                cv2.FONT_HERSHEY_SIMPLEX, 5, (255, 255, 255),
                                1)

                result = Image.fromarray(result)

                for label_GT in all_labels:
                    if ".tif" in image_name:
                        out = os.path.join(
                            cfg.OUTPUT_DIR, f"inference_{conf_thresh}",
                            dataset_name, label_GT,
                            image_name.replace(".tif", "_GT.tif"))
                    if ".jpg" in image_name:
                        out = os.path.join(
                            cfg.OUTPUT_DIR, f"inference_{conf_thresh}",
                            dataset_name, label_GT,
                            image_name.replace(".jpg", "_GT.jpg"))
                    if ".JPG" in image_name:
                        out = os.path.join(
                            cfg.OUTPUT_DIR, f"inference_{conf_thresh}",
                            dataset_name, label_GT,
                            image_name.replace(".JPG", "_GT.JPG"))

                    os.makedirs(os.path.dirname(out), exist_ok=True)
                    # result.save(out)
                    if not os.path.exists(out):
                        result.save(out)

                ### PREDICTION
                predictions = coco_demo.compute_prediction(image)
                top_predictions = coco_demo.select_top_predictions(predictions)
                # print(top_predictions)

                result = image.copy()
                if coco_demo.show_mask_heatmaps:
                    return coco_demo.create_mask_montage(
                        result, top_predictions)
                result = coco_demo.overlay_boxes(result, top_predictions)
                # result = coco_demo.overlay_boxes(result, target)
                if coco_demo.cfg.MODEL.MASK_ON:
                    result = coco_demo.overlay_mask(result, top_predictions)
                if coco_demo.cfg.MODEL.KEYPOINT_ON:
                    result = coco_demo.overlay_keypoints(
                        result, top_predictions)
                result = coco_demo.overlay_class_names(result, top_predictions)

                result = Image.fromarray(result)

                for label_GT in all_labels:
                    out = os.path.join(cfg.OUTPUT_DIR,
                                       f"inference_{conf_thresh}",
                                       dataset_name, label_GT, image_name)

                    os.makedirs(os.path.dirname(out), exist_ok=True)
                    if not os.path.exists(out):
                        result.save(out)

                ### PREDICTION BEST only
                # predictions = coco_demo.compute_prediction(image)
                top_predictions = coco_demo.select_top_predictions(
                    predictions, best_only=True)
                # print(top_predictions)

                result = image.copy()
                if coco_demo.show_mask_heatmaps:
                    return coco_demo.create_mask_montage(
                        result, top_predictions)
                result = coco_demo.overlay_boxes(result, top_predictions)
                # result = coco_demo.overlay_boxes(result, target)
                if coco_demo.cfg.MODEL.MASK_ON:
                    result = coco_demo.overlay_mask(result, top_predictions)
                if coco_demo.cfg.MODEL.KEYPOINT_ON:
                    result = coco_demo.overlay_keypoints(
                        result, top_predictions)
                result = coco_demo.overlay_class_names(result, top_predictions)

                result = Image.fromarray(result)

                for label_GT in all_labels:
                    if ".tif" in image_name:
                        out = os.path.join(
                            cfg.OUTPUT_DIR, f"inference_{conf_thresh}",
                            dataset_name, label_GT,
                            image_name.replace(".tif", "_best.tif"))
                    if ".jpg" in image_name:
                        out = os.path.join(
                            cfg.OUTPUT_DIR, f"inference_{conf_thresh}",
                            dataset_name, label_GT,
                            image_name.replace(".jpg", "_best.jpg"))
                    if ".JPG" in image_name:
                        out = os.path.join(
                            cfg.OUTPUT_DIR, f"inference_{conf_thresh}",
                            dataset_name, label_GT,
                            image_name.replace(".JPG", "_best.JPG"))

                    os.makedirs(os.path.dirname(out), exist_ok=True)
                    if not os.path.exists(out):
                        result.save(out)
                     min_image_size=800,
                     confidence_threshold=0.6)
fourcc = cv2.VideoWriter_fourcc(*'XVID')

cap = cv2.VideoCapture('tmp/S2_Cars_day_cut.mp4')
out = cv2.VideoWriter('tmp/test_S2_Cars_day.avi', fourcc, 20.0, size)

index = 0
while (cap.isOpened()):
    ret, frame_bgr = cap.read()
    frame_bgr = cv2.resize(frame_bgr, size)
    index += 1

    if not ret:
        break

    with log.Tick():
        predictions = coco_demo.compute_prediction(frame_bgr)
        top_predictions = coco_demo.select_top_predictions(predictions)

        result = frame_bgr.copy()
        result = coco_demo.overlay_mask(result, top_predictions)
        result = coco_demo.overlay_boxes(result, top_predictions)
        result = coco_demo.overlay_class_names(result, top_predictions)

    cv2.imshow('result', result)
    out.write(result)

    if 32 == cv2.waitKey(1):
        break
out.release()