示例#1
0
    def __init__(self, resolution=(224, 224), orig_img=False, wireframe=False):
        self.resolution = resolution

        self.faces = get_smpl_faces()
        self.orig_img = orig_img
        self.wireframe = wireframe
        self.renderer = pyrender.OffscreenRenderer(
            viewport_width=self.resolution[0],
            viewport_height=self.resolution[1],
            point_size=1.0)

        # set the scene
        self.scene = pyrender.Scene(bg_color=[0.0, 0.0, 0.0, 0.0],
                                    ambient_light=(0.3, 0.3, 0.3))

        light = pyrender.PointLight(color=[1.0, 1.0, 1.0], intensity=1)

        light_pose = np.eye(4)
        light_pose[:3, 3] = [0, -1, 1]
        self.scene.add(light, pose=light_pose)

        light_pose[:3, 3] = [0, 1, 1]
        self.scene.add(light, pose=light_pose)

        light_pose[:3, 3] = [1, 1, 2]
        self.scene.add(light, pose=light_pose)
示例#2
0
    def __init__(self, resolution=(224, 224), orig_img=False, wireframe=False):

        # set renderer
        self.resolution = resolution
        self.renderer = pyrender.OffscreenRenderer(
            viewport_width=self.resolution[0],
            viewport_height=self.resolution[1],
            point_size=3.0)
        self.faces = get_smpl_faces()
        self.orig_img = orig_img
        self.wireframe = wireframe
        self.scene = pyrender.Scene(bg_color=[0.0, 0.0, 0.0, 0.0], \
                                            ambient_light=(0.3, 0.3, 0.3))
示例#3
0
def render_image(img,
                 verts,
                 cam,
                 faces=None,
                 angle=None,
                 axis=None,
                 resolution=224,
                 output_fn=None):
    if faces is None:
        faces = get_smpl_faces()

    mesh = trimesh.Trimesh(vertices=verts, faces=faces)

    Rx = trimesh.transformations.rotation_matrix(math.radians(180), [1, 0, 0])
    mesh.apply_transform(Rx)

    if angle and axis:
        R = trimesh.transformations.rotation_matrix(math.radians(angle), axis)
        mesh.apply_transform(R)

    if output_fn:
        mesh.export(output_fn)
        camera_translation = np.array(
            [-cam[1], cam[2], 2 * 5000. / (img.shape[0] * cam[0] + 1e-9)])
        np.save(output_fn.replace('.obj', '.npy'), camera_translation)

        # Save the rotated mesh
        # R = trimesh.transformations.rotation_matrix(math.radians(270), [0,1,0])
        # rotated_mesh = mesh.copy()
        # rotated_mesh.apply_transform(R)
        # rotated_mesh.export(output_fn.replace('.obj', '_rot.obj'))

    scene = pyrender.Scene(bg_color=[0.0, 0.0, 0.0, 0.0],
                           ambient_light=(0.3, 0.3, 0.3))

    material = pyrender.MetallicRoughnessMaterial(metallicFactor=0.0,
                                                  alphaMode='OPAQUE',
                                                  baseColorFactor=(1.0, 1.0,
                                                                   0.9, 1.0))
    mesh = pyrender.Mesh.from_trimesh(mesh, material=material)
    scene.add(mesh, 'mesh')

    camera_pose = np.eye(4)

    camera = WeakPerspectiveCamera(scale=cam[0],
                                   translation=cam[1:],
                                   zfar=1000.)
    scene.add(camera, pose=camera_pose)

    light = pyrender.PointLight(color=[1.0, 1.0, 1.0], intensity=1)

    light_pose = np.eye(4)
    light_pose[:3, 3] = [0, -1, 1]
    scene.add(light, pose=light_pose)

    light_pose[:3, 3] = [0, 1, 1]
    scene.add(light, pose=light_pose)

    light_pose[:3, 3] = [1, 1, 2]
    scene.add(light, pose=light_pose)

    r = pyrender.OffscreenRenderer(viewport_width=resolution,
                                   viewport_height=resolution,
                                   point_size=1.0)

    color, _ = r.render(scene, flags=pyrender.RenderFlags.RGBA)
    # color = color[:, ::-1, :]
    valid_mask = (color[:, :, -1] > 0)[:, :, np.newaxis]

    output_img = color[:, :, :-1] * valid_mask + (1 - valid_mask) * img

    image = output_img.astype(np.uint8)
    text = f's: {cam[0]:.2f}, tx: {cam[1]:.2f}, ty: {cam[2]:.2f}'
    cv2.putText(image, text, (5, 10), 0, 0.4, color=(0, 255, 0))

    return image
示例#4
0
def visualize_preds(image,
                    preds,
                    target=None,
                    target_exists=True,
                    dataset='common',
                    vis_hmr=False):
    with torch.no_grad():
        if isinstance(image, torch.Tensor):
            image = torch2numpy(image)
            # import random
            # image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
            # cv2.imwrite(f'sample_images/{random.randint(0,100)}.jpg', image)

    pred_theta = preds['theta']
    pred_camera = pred_theta[:3]
    # pred_pose   = pred_theta[3:75]
    # pred_shape  = pred_theta[75:]
    pred_kp_2d = preds['kp_2d']
    pred_verts = preds['verts']

    if target_exists:
        target_kp_2d = target['kp_2d']

    pred_kp_2d = np.concatenate(
        [pred_kp_2d, np.ones((pred_kp_2d.shape[0], 1))], axis=-1)

    faces = get_smpl_faces()

    pred_image = draw_skeleton(image.copy(), pred_kp_2d, dataset=dataset)
    if target_exists:
        if vis_hmr:
            target_verts = target['verts']
            target_cam = target['cam']

            target_image = render_image(img=image.copy(),
                                        verts=target_verts,
                                        faces=faces,
                                        cam=target_cam)
        else:
            target_image = draw_skeleton(image.copy(),
                                         target_kp_2d,
                                         dataset=dataset)

    render = render_image(img=image.copy(),
                          verts=pred_verts,
                          faces=faces,
                          cam=pred_camera)

    white_img = np.zeros_like(image)
    render_side = render_image(img=white_img.copy(),
                               verts=pred_verts,
                               faces=faces,
                               cam=pred_camera,
                               angle=90,
                               axis=[0, 1, 0])

    if target_exists:
        result_image = np.hstack(
            [image, pred_image, target_image, render, render_side])
    else:
        result_image = np.hstack([image, pred_image, render, render_side])

    return result_image