Esempio n. 1
0
def bpycv_test():
    result = bpycv.render_data()

    cv2.imwrite(
        r"D:\tmp\rgb.jpg",
        result["image"][..., ::-1])  # transfer RGB image to opencv's BGR

    cv2.imwrite(r"D:\tmp\inst.png", result["inst"],
                [cv2.IMWRITE_PNG_COMPRESSION, 0])

    cv2.imwrite(r"D:\tmp\sem.png", result["sem"],
                [cv2.IMWRITE_PNG_COMPRESSION, 0])
Esempio n. 2
0
def render_gt(current_run_base_dir, data_dir, rendering_frames):
    """Renders GT for current for all frames and saves it in gt_base_dir"""
    bpy.types.ImageFormatSettings.color_depth = 16
    # gt rendering
    for i, frame in enumerate(rendering_frames):
        logging.info(
            f"Render GT frame {frame}  ({i+1}/{len(rendering_frames)})")
        bpy.context.scene.frame_set(frame)
        result = bpycv.render_data(render_image=False, render_annotation=True)
        cv2.imwrite(
            os.path.join(current_run_base_dir, "all", "semantic_segmentation",
                         "semantic_segmentation" + str(frame) + ".png"),
            result["inst"])
        disparity = post_processing.generate_disparity(result["depth"],
                                                       result["inst"],
                                                       data_dir)
        cv2.imwrite(
            os.path.join(current_run_base_dir, "all", "disparity",
                         "disparity" + str(frame) + ".png"), disparity)
Esempio n. 3
0
for index in range(1, 20):
    # create cube and sphere as instance at random location
    location = [random.uniform(-2, 2) for _ in range(3)]
    if index % 2:
        bpy.ops.mesh.primitive_cube_add(size=0.5, location=location)
        categories_id = 1
    else:
        bpy.ops.mesh.primitive_uv_sphere_add(radius=0.5, location=location)
        categories_id = 2
    obj = bpy.context.active_object
    # set each instance a unique inst_id, which is used to generate instance annotation.
    obj["inst_id"] = categories_id * 1000 + index

# render image, instance annoatation and depth in one line code
# result["ycb_meta"] is 6d pose GT
result = bpycv.render_data()

# save result
cv2.imwrite(
    "demo-rgb.jpg", result["image"][..., ::-1]
)  # transfer RGB image to opencv's BGR

# save instance map as 16 bit png
# the value of each pixel represents the inst_id of the object to which the pixel belongs
cv2.imwrite("demo-inst.png", np.uint16(result["inst"]))

# convert depth units from meters to millimeters
depth_in_mm = result["depth"] * 1000
cv2.imwrite("demo-depth.png", np.uint16(depth_in_mm))  # save as 16bit png

# visualization inst_rgb_depth for human