Ejemplo n.º 1
0
def draw_detections_3D(image, detections, cam, model_map):
    """Draws 6D detections onto resized image

        Parameters
        ----------
        image: Numpy array, normalized to [0-1]
        detections: A list of detections for this image, coming from SSD.detect() in the form
            [l, t, r, b, name, confidence, 6D_pose0, ..., 6D_poseN]
        cam: Intrinsics for rendering
        model_map: Mapping of model name to Model3D instance {'obj': model3D}

    """
    if not detections:
        return np.copy(image)

    ren = Renderer((image.shape[1], image.shape[0]), cam)
    ren.clear()
    out = np.copy(image)
    for det in detections:
        model = model_map[det[4]]
        for pose in det[6:]:
            ren.draw_model(model, pose)
            ren.draw_boundingbox(model, pose)
    col, dep = ren.finish()

    # Copy the rendering over into the scene
    mask = np.dstack((dep, dep, dep)) > 0
    out[mask] = col[mask]
    return out
Ejemplo n.º 2
0
def draw_detections_3D(image, detections, cam, model_map):
    """Draws 6D detections onto resized image

        Parameters
        ----------
        image: Numpy array, normalized to [0-1]
        detections: A list of detections for this image, coming from SSD.detect() in the form
            [l, t, r, b, name, confidence, 6D_pose0, ..., 6D_poseN]
        cam: Intrinsics for rendering
        model_map: Mapping of model name to Model3D instance {'obj': model3D}

    """
    if not detections:
        return np.copy(image)

    ren = Renderer((image.shape[1], image.shape[0]), cam)
    ren.clear()
    out = np.copy(image)
    for det in detections:
        model = model_map[det[4]]
        for pose in det[6:]:
            ren.draw_model(model, pose)
            ren.draw_boundingbox(model, pose)
    col, dep = ren.finish()

    # Copy the rendering over into the scene
    mask = np.dstack((dep, dep, dep)) > 0
    out[mask] = col[mask]
    return out
        perturbed_pose = perturb_pose(gt_pose, max_rot_pert, max_trans_pert)

        refinable = Refinable(model=bench.models[str(int(obj))], label=0, hypo_pose=perturbed_pose,
                              metric_crop_shape=croppings[dataset_name]['obj_{:02d}'.format(int(obj))], input_col=col)

        for i in range(iterations):
            refinable.input_col = col.copy()

            start = timer()
            refiner.iterative_contour_alignment(refinable=refinable, max_iterations=1)
            end = timer()

            # Rendering of results
            ren.clear()
            ren.draw_background(col)
            ren.draw_boundingbox(refinable.model, refinable.hypo_pose)
            ren.draw_model(refinable.model, refinable.hypo_pose, ambient=0.5, specular=0, shininess=100,
                           light_col=[1, 1, 1], light=[0, 0, -1])
            render_col, _ = ren.finish()
            render_col = render_col.copy()

            cv2.imshow("Input Image", col)

            # Draw FPS in top left corner
            fps = "FPS: " + str(int(1 / (end - start)))

            cv2.rectangle(render_col, (0, 0), (133, 40), (1., 1., 1.), -1)
            cv2.putText(render_col, fps, (3, 30), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 0), 2)

            cv2.imshow("Refined Output", render_col)
            cv2.waitKey(500)