Beispiel #1
0
def get_albedo_by_ray_intersection(tmesh,
                                   blender_cam,
                                   reso,
                                   ortho_view_scale=1.):
    def get_arr_index_from_flat_index(flat_index):
        if flat_index < 0 or flat_index >= reso * reso:
            return None

        row_idx = int(flat_index / reso)
        col_idx = flat_index - row_idx * reso

        return (row_idx, col_idx)

    r_locations, r_dirs = get_rays_from_ori_ortho_view(reso, 1.0)
    r_locations = np.reshape(r_locations, (-1, 3))
    r_dirs = np.reshape(r_dirs, (-1, 3))

    RT_bcam2world = blender_camera_util.get_bcam2world_RT_matrix_from_blender(
        blender_cam)
    r_locations, r_dirs = transform_points(r_locations,
                                           RT_bcam2world), transform_points(
                                               r_dirs, RT_bcam2world)
    # ray testing
    print('Ray intersection testing...')
    tri_indices, r_indices = tmesh.ray.intersects_id(r_locations, r_dirs)
    print('Ray intersection testing done.')
    albedo_arr = np.ones((reso, reso, 3))
    all_mesh_tri_colors = tmesh.visual.face_colors
    for hit_idx, tri_idx in enumerate(tri_indices):
        hit_color = all_mesh_tri_colors[tri_idx]
        albedo_arr_idx = get_arr_index_from_flat_index(r_indices[hit_idx])
        albedo_arr[albedo_arr_idx] = hit_color[:3]

    return albedo_arr
Beispiel #2
0
def bcam2world_RT_matrixes(rot_angles_list):
    scene = bpy.context.scene
    ######### filename for output ##############

    # setup camera and render
    cam_init_location = (0., 0.5, 0.)
    cam = get_default_camera()
    cam.location = cam_init_location
    cam.data.type = 'ORTHO'
    cam.data.ortho_scale = 1
    cam.data.clip_start = 0
    cam.data.clip_end = 100 # a value that is large enough
    cam_constraint = cam.constraints.new(type='TRACK_TO')
    cam_constraint.track_axis = 'TRACK_NEGATIVE_Z'
    cam_constraint.up_axis = 'UP_Y'
    b_empty = get_lookat_target(cam)
    cam_constraint.target = b_empty # track to a empty object at the origin

    RT_dict = {}

    for xyz_angle in rot_angles_list:

        # rotate camera
        euler_rot_mat = euler2mat(radians(xyz_angle[0]), radians(xyz_angle[1]), radians(xyz_angle[2]), 'sxyz')
        new_cam_location = np.dot(euler_rot_mat, np.array(cam_init_location))
        cam.location = new_cam_location

        bpy.context.scene.update() # NOTE: important! Not doing rendering (which updates the scene) but we need to update the scene after we reset the camera
        
        # transform from depth to 3D world point cloud
        RT_bcam2world = blender_camera_util.get_bcam2world_RT_matrix_from_blender(cam)
        key_tuple = tuple(xyz_angle)
        RT_dict[key_tuple] = RT_bcam2world

    return RT_dict
def scan_point_cloud(depth_file_output, normal_file_output, albedo_file_output,
                     matidx_file_output, args):
    scene = bpy.context.scene
    ######### filename for output ##############
    if 'ShapeNetCore' not in args.obj:
        model_identifier = args.obj.split('/')[-1].split('.')[0]
        correct_normal = False
    else:
        model_identifier = args.obj.split('/')[-3]
        correct_normal = True
    fp = os.path.join(args.output_folder, model_identifier)
    scene.render.image_settings.file_format = 'PNG'  # set output format to .png

    # scan shapenet shape into point cloud with features
    all_points_normals_colors_mindices = None
    for i in range(0, 3):
        cam = get_default_camera()
        if i == 1: cam_init_location = (0, 0, 0.5)
        else: cam_init_location = (0, 0.5, 0)
        cam.data.type = 'ORTHO'
        cam.data.ortho_scale = args.orth_scale
        cam.data.clip_start = 0
        cam.data.clip_end = 100  # a value that is large enough
        cam_constraint = cam.constraints.new(type='TRACK_TO')
        cam_constraint.track_axis = 'TRACK_NEGATIVE_Z'
        cam_constraint.up_axis = 'UP_Y'
        b_empty = get_lookat_target(cam)
        cam_constraint.target = b_empty  # track to a empty object at the origin

        for rot_angle in range(0, 359, 20):
            if i == 0:
                xyz_angle = [rot_angle, 0, 0]
            elif i == 1:
                xyz_angle = [0, rot_angle, 0]
            elif i == 2:
                xyz_angle = [0, 0, rot_angle]

            # rotate camera
            euler_rot_mat = euler2mat(radians(xyz_angle[0]),
                                      radians(xyz_angle[1]),
                                      radians(xyz_angle[2]), 'sxyz')
            new_cam_location = np.dot(euler_rot_mat,
                                      np.array(cam_init_location))
            cam.location = new_cam_location

            scene.render.filepath = fp + '-rotx=%.2f_roty=%.2f_rotz=%.2f' % (
                xyz_angle[0], xyz_angle[1], xyz_angle[2])
            depth_file_output.file_slots[
                0].path = scene.render.filepath + "_depth"
            normal_file_output.file_slots[
                0].path = scene.render.filepath + "_normal"
            albedo_file_output.file_slots[
                0].path = scene.render.filepath + "_albedo"
            matidx_file_output.file_slots[
                0].path = scene.render.filepath + "_matidx"

            # render and write out
            bpy.ops.render.render(write_still=True)  # render still

            depth_arr, hard_mask_arr = util.read_depth_and_get_mask(
                scene.render.filepath + "_depth0001.exr")
            normal_arr = util.read_and_correct_normal(
                scene.render.filepath + "_normal0001.exr",
                correct_normal=correct_normal,
                mask_arr=hard_mask_arr)
            albedo_arr = util.read_exr_image(scene.render.filepath +
                                             "_albedo0001.exr")
            matidx_arr = util.read_exr_image(scene.render.filepath +
                                             "_matidx0001.exr")[:, :, 0]
            # and the clip value range
            depth_arr = np.clip(depth_arr, a_min=0, a_max=1)
            normal_arr = np.clip(normal_arr, a_min=-1, a_max=1)
            albedo_arr = np.clip(albedo_arr, a_min=0, a_max=1)

            # process renderings to get the point cloud
            xyz_arr = util.get_3D_points_from_ortho_depth(
                depth_arr, args.orth_scale)
            xyz_normal_rgb_midx = np.reshape(
                np.concatenate([
                    xyz_arr, normal_arr, albedo_arr,
                    np.expand_dims(matidx_arr, -1)
                ],
                               axis=-1), (-1, 10))
            xyz_normal_rgb_midx = util.remove_bg_points(xyz_normal_rgb_midx)
            # transform from depth to 3D world point cloud
            RT_bcam2world = blender_camera_util.get_bcam2world_RT_matrix_from_blender(
                cam)
            # matrix for switching back axis of the obj file when output
            xyz_normal_rgb_midx[:, :3] = util.transform_points(
                xyz_normal_rgb_midx[:, :3],
                np.dot(R_axis_switching_BtoS, RT_bcam2world))
            xyz_normal_rgb_midx[:, 3:6] = util.transform_points(
                xyz_normal_rgb_midx[:, 3:6],
                np.dot(R_axis_switching_BtoS, RT_bcam2world))
            if all_points_normals_colors_mindices is None:
                all_points_normals_colors_mindices = xyz_normal_rgb_midx
            else:
                all_points_normals_colors_mindices = np.concatenate(
                    [all_points_normals_colors_mindices, xyz_normal_rgb_midx],
                    axis=0)

            # remove renderings
            os.remove(scene.render.filepath + '.png')
            os.remove(scene.render.filepath + "_normal0001.exr")
            os.remove(scene.render.filepath + "_depth0001.exr")
            os.remove(scene.render.filepath + "_albedo0001.exr")
            os.remove(scene.render.filepath + "_matidx0001.exr")

    return all_points_normals_colors_mindices