def render_frame(prms):
    frame_id, frame_path, tubelets, rendered_frames_dir, cfg = prms

    rendered_frame_path = rendered_frames_dir / frame_path.name
    if rendered_frame_path.exists():
        return

    tubelet_ids = []
    tubelet_instances = []
    tubelet_instance_projections = []
    for tubelet_id, tubelet in enumerate(tubelets):
        tubelet_ids.append(tubelet_id)
        tubelet_instances.append(tubelet.get_instance(frame_id))
        tubelet_instance_projections.append(
            tubelet.get_projected_instance(frame_id))

    frame = cv2.imread(str(frame_path))
    frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
    frame = cv2.merge([frame, frame, frame])

    visualizer._SMALL_OBJECT_AREA_THRESH = 1
    v = visualizer.Visualizer(frame[:, :, ::-1],
                              MetadataCatalog.get(cfg.DATASETS.TRAIN[0]),
                              scale=1.2)
    v = draw_instance_predictions(v, tubelet_ids, tubelet_instances,
                                  tubelet_instance_projections)
    cv2.imwrite(str(rendered_frame_path), v.get_image()[:, :, ::-1])
def pred_viz(im, outputs, metadata, output_path):
    v = visualizer.Visualizer(im[:, :, ::-1],
                              metadata=metadata,
                              instance_mode=ColorMode.SEGMENTATION)
    v = v.draw_instance_predictions(outputs["instances"].to("cpu"))

    if args.save_viz:
        cv2.imwrite(output_path, v.get_image()[:, :, ::-1])

    cv2.imshow('', v.get_image()[:, :, ::-1])
    cv2.waitKey(0)
Exemplo n.º 3
0
def visualize(args, mode, _appcfg):
    name = "hmd"
    subset = "train"
    dataset_name = get_dataset_name(name, subset)
    metadata = load_and_register_dataset(name, subset, _appcfg)
    dataset_dicts = DatasetCatalog.get(dataset_name)

    N = 30
    for d in random.sample(dataset_dicts, N):
        # print("d: {}".format(d))
        # print("annos: {}".format(d.get("annotations", None)))
        # print("annos: {}".format(d.get("annotations", None)[0]))
        image_filepath = d["file_name"]
        # image_filepath = "/aimldl-dat/data-gaze/AIML_Annotation/ods_job_230119/images/images-p2-050219_AT2/291018_114342_16718_zed_l_057.jpg"
        # image_filepath = "/aimldl-dat/data-gaze/AIML_Annotation/ods_job_230119/images/images-p2-050219_AT2/291018_114252_16716_zed_l_099.jpg"
        im = cv2.imread(image_filepath)

        # visualize_predictions(im, outputs, metadata)
        v = visualizer.Visualizer(im[:, :, ::-1], metadata=metadata)

        v = v.draw_dataset_dict(d)
        cv2.imshow('', v.get_image()[:, :, ::-1])
        cv2.waitKey(0)
Exemplo n.º 4
0
def draw_vis(
    input: Dict[str, Any],
    metadata: Optional[Metadata],
    scale: float = 1.0,
    draw_instances: bool = True,
) -> np.ndarray:
    """
        input (Dict[str, Any]): a dict containing an "image" key (format
            provided by "image_format" key if not RGB) and an optional
            "instances" key to draw instances.
            note: the "instances" should be on cpu.
    """
    img = input["image"]
    if input.get("image_format", "RGB") != "RGB":
        img = convert_image_to_rgb(img, input["image_format"])
    if torch.is_tensor(img):
        img = img.numpy().astype("uint8")

    if draw_instances and "instances" in input:
        vis = visualizer.Visualizer(img, metadata, scale=scale)
        visimg = vis.draw_instance_predictions(input["instances"])
        img = visimg.get_image()
    return img
def visualize(args, mode, _appcfg):
    name = "hmd"
    subset = "train"

    if args.subset:
        subset = args.subset

    dataset_name = get_dataset_name(name, subset)
    metadata = load_and_register_dataset(name, subset, _appcfg)
    dataset_dicts = DatasetCatalog.get(dataset_name)

    N = 20
    for d in random.sample(dataset_dicts, N):
        # log.debug("d: {}".format(d))
        # log.debug("annos: {}".format(d.get("annotations", None)))
        # log.debug("annos: {}".format(d.get("annotations", None)[0]))
        image_filepath = d["file_name"]
        im = cv2.imread(image_filepath)

        v = visualizer.Visualizer(im[:, :, ::-1], metadata=metadata)

        v = v.draw_dataset_dict(d)
        cv2.imshow('', v.get_image()[:, :, ::-1])
        cv2.waitKey(0)
Exemplo n.º 6
0
def predict(args, mode, _appcfg):
    name = "hmd"
    subset = "val"
    BASE_IMAGE_PATH = "/aimldl-dat/samples/Trafic_Signs"

    if args.path:
        BASE_IMAGE_PATH = args.path

    PREDICTION_OUTPUT_PATH = "/aimldl-dat/samples/Predictions"

    print("BASE_IMAGE_PATH: {}".format(BASE_IMAGE_PATH))

    dataset_name = get_dataset_name(name, subset)

    cfg = config.get_cfg()
    # cfg.merge_from_file("/aimldl-cod/external/detectron2/configs/COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x.yaml")
    cfg.merge_from_file(
        model_zoo.get_config_file(
            "COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x.yaml"))

    cfg.DATALOADER.NUM_WORKERS = 2
    cfg.SOLVER.IMS_PER_BATCH = 2
    cfg.SOLVER.BASE_LR = 0.00025
    # cfg.SOLVER.MAX_ITER = 300    # 300 iterations seems good enough, but you can certainly train longer
    cfg.MODEL.ROI_HEADS.BATCH_SIZE_PER_IMAGE = 128  # faster, and good enough for this toy dataset
    cfg.MODEL.ROI_HEADS.NUM_CLASSES = 3

    cfg.OUTPUT_DIR = "/codehub/apps/detectron2/release"
    cfg.MODEL.WEIGHTS = os.path.join(cfg.OUTPUT_DIR, "model_final.pth")

    cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = 0.7  # set the testing threshold for this model
    cfg.DATASETS.TEST = (dataset_name)
    # print("cfg: {}".format(cfg.dump()))

    #Predict from a directory
    metadata = MetadataCatalog.get(dataset_name).set(
        thing_classes=['signage', 'traffic_light', 'traffic_sign'])
    # print("Metadata: {}".format(metadata))

    predictor = DefaultPredictor(cfg)

    for image in os.listdir(BASE_IMAGE_PATH):

        image_filepath = os.path.join(BASE_IMAGE_PATH, image)
        output_path = PREDICTION_OUTPUT_PATH + "/" + image

        # image_filepath = "/aimldl-dat/data-gaze/AIML_Annotation/ods_job_230119/images/images-p2-050219_AT2/291018_114342_16718_zed_l_057.jpg"
        # image_filepath = "/aimldl-dat/data-gaze/AIML_Annotation/ods_job_230119/images/images-p2-050219_AT2/291018_114252_16716_zed_l_099.jpg"
        # print("image_filepath: {}".format(image_filepath))

        im = cv2.imread(image_filepath)

        outputs = predictor(im)

        # visualize_predictions(im, outputs, metadata)
        v = visualizer.Visualizer(im[:, :, ::-1],
                                  metadata=metadata,
                                  instance_mode=ColorMode.SEGMENTATION)
        v = v.draw_instance_predictions(outputs["instances"].to("cpu"))
        # cv2.imwrite(output_path, v.get_image()[:, :, ::-1])
        cv2.imshow('', v.get_image()[:, :, ::-1])
        cv2.waitKey(0)
                "category_id": 0,
                "iscrowd": 0
            }
            objs.append(obj)
        record["annotations"] = objs
        dataset_dicts.append(record)
    return dataset_dicts


for d in ["train", "val"]:
    DatasetCatalog.register(
        "balloon_" + d,
        lambda d=d: get_balloon_dicts(
            "/aimldl-dat/data-public/balloon_dataset/balloon/" + d))
    MetadataCatalog.get("balloon_" + d).set(thing_classes=["balloon"])
balloon_metadata = MetadataCatalog.get("balloon_train")

dataset_dicts = get_balloon_dicts(
    "/aimldl-dat/data-public/balloon_dataset/balloon/train")
for d in random.sample(dataset_dicts, 3):
    img = cv2.imread(d["file_name"])
    viz = visualizer.Visualizer(img[:, :, ::-1],
                                metadata=balloon_metadata,
                                scale=0.5)
    vis = viz.draw_dataset_dict(d)
    # vis.save("/aimldl-dat/temp/det02.jpg")
    # cv2.imwrite("/aimldl-dat/temp/det01.jpg", vis.get_image()[:, :, ::-1])
    cv2.imshow('', vis.get_image()[:, :, ::-1])
    # cv2.imshow('', img)
    cv2.waitKey(0)
Exemplo n.º 8
0
#                "surfboard", "tennis racket", "bottle", "wine glass", "cup",
#                "fork", "knife", "spoon", "bowl", "banana", "apple",
#                "sandwich", "orange", "broccoli", "carrot", "hot dog", "pizza",
#                "donut", "cake", "chair", "couch", "potted plant", "bed",
#                "dining table", "toilet", "tv", "laptop", "mouse", "remote",
#                "keyboard", "cell phone", "microwave", "oven", "toaster",
#                "sink", "refrigerator", "book", "clock", "vase", "scissors",
#                "teddy bear", "hair drier", "toothbrush"])

metadata = MetadataCatalog.get(dataset_name)
print("metadata: {}".format(metadata))
# MetadataCatalog.get(dataset_name).set(json_file=json_file, image_root=image_root, evaluator_type=dataset_name, **metadata)

metadata.thing_classes = ['person', 'bicycle', 'car', 'motorcycle', 'airplane']
metadata.thing_dataset_id_to_contiguous_id = {1: 0, 2: 1, 3: 2, 4: 3, 5: 4}

# MetadataCatalog.get(dataset_name).set(thing_classes=thing_classes, metadata=thing_dataset_id_to_contiguous_id)

# MetadataCatalog.get(dataset_name).set(json_file=json_file, image_root=image_root, evaluator_type=dataset_name, metadata={})

N = 2
for d in random.sample(dataset_dicts_train, N):
    img = cv2.imread(d["file_name"])
    viz = visualizer.Visualizer(img[:, :, ::-1], metadata=metadata, scale=1)
    vis = viz.draw_dataset_dict(d)
    # vis.save("/aimldl-dat/temp/det02.jpg")
    # cv2.imwrite("/aimldl-dat/temp/det01.jpg", vis.get_image()[:, :, ::-1])
    cv2.imshow('', vis.get_image()[:, :, ::-1])
    # cv2.imshow('', img)
    cv2.waitKey(0)