コード例 #1
0
ファイル: main.py プロジェクト: SkyVault/STLAnimator
    def __init__(self, models, models_ui, parent=None, size=(640, 480)):
        super().__init__(parent)

        self.width = int(size[0])
        self.height = int(size[1])

        self.resizeGL(self.width, self.height)

        self.models = models
        self.models_ui = models_ui

        self.timer = 0
        timer = QTimer(self)
        timer.timeout.connect(self.update)
        timer.start(1000 / 60.0)

        self.app = parent
        self.dist = 12
        self.angle = 0.0
        self.lastpos = (-1, -1)
        self.mouseWithin = False

        self.scene = pyrender.Scene(bg_color=[0.2, 0.2, 0.2, 1])

        self.camera = pyrender.PerspectiveCamera(yfov=np.pi / 3.0,
                                                 aspectRatio=1.0)
        s = np.sqrt(2) / 2

        camera_pose = np.array([
            [0.0, -s, s, 1.3],
            [1.0, 0.0, 0.0, 0.0],
            [0.0, s, s, 1.35],
            [0.0, 0.0, 0.0, 1.0],
        ])

        self.scene.add(self.camera, pose=camera_pose)

        self.side_light_1 = pyrender.PointLight(color=np.ones(3),
                                                intensity=22,
                                                name="side-light-1",
                                                range=100)

        self.side_light_2 = pyrender.PointLight(color=np.ones(3),
                                                intensity=22,
                                                name="side-light-2",
                                                range=100)

        self.scene.add(self.side_light_1, pose=np.array(translate((2, 1, 1))))
        self.scene.add(self.side_light_2,
                       pose=np.array(translate((-1, 1, -2))))

        self.offscreenRenderer = pyrender.OffscreenRenderer(
            self.width, self.height)
        self.color, depth = self.offscreenRenderer.render(self.scene)
コード例 #2
0
ファイル: renderer.py プロジェクト: liuguoyou/CenterHMR
    def __init__(self, faces, resolution=(224,224), orig_img=False, wireframe=False):
        self.resolution = resolution

        self.faces = faces
        self.orig_img = orig_img
        self.wireframe = wireframe
        self.renderer = pyrender.OffscreenRenderer(
            viewport_width=self.resolution[0],
            viewport_height=self.resolution[1],
            point_size=1.0)

        # set the scene
        self.scene = pyrender.Scene(bg_color=[0.0, 0.0, 0.0, 0.0], ambient_light=(0.3, 0.3, 0.3))

        light = pyrender.PointLight(color=[1.0, 1.0, 1.0], intensity=1)

        light_pose = np.eye(4)
        light_pose[:3, 3] = [0, -1, 1]
        self.scene.add(light, pose=light_pose)

        light_pose[:3, 3] = [0, 1, 1]
        self.scene.add(light, pose=light_pose)

        light_pose[:3, 3] = [1, 1, 2]
        self.scene.add(light, pose=light_pose)
コード例 #3
0
def render_mesh(mesh, height, width):
    scene = pyrender.Scene(ambient_light=[.3, .3, .3], bg_color=[255, 255, 255])

    rgb_per_v = np.zeros_like(mesh.v)
    rgb_per_v[:, 0] = 0.53
    rgb_per_v[:, 1] = 0.81
    rgb_per_v[:, 2] = 0.98

    tri_mesh = trimesh.Trimesh(vertices=0.001*mesh.v, faces=mesh.f, vertex_colors=rgb_per_v)
    render_mesh = pyrender.Mesh.from_trimesh(tri_mesh, smooth=True)
    scene.add(render_mesh, pose=np.eye(4))

    camera = pyrender.camera.OrthographicCamera(xmag=0.001*0.5*width, ymag=0.001*0.5*height, znear=0.01, zfar=10)
    camera_pose = np.eye(4)
    camera_pose[:3, 3] = np.array([0.001*0.5*width, 0.001*0.5*height, 1.0])
    scene.add(camera, pose=camera_pose)

    light = pyrender.PointLight(color=[1.0, 1.0, 1.0], intensity=1.0)
    light_pose = np.eye(4)
    light_pose[:3, 3] = np.array([1.0, 1.0, 1.0])
    scene.add(light, pose=light_pose.copy())

    light_pose[:3, 3] = np.array([0.0, 1.0, 1.0])
    scene.add(light, pose=light_pose.copy())

    r = pyrender.OffscreenRenderer(viewport_width=width, viewport_height=height)
    color, _ = r.render(scene)
    return color[..., ::-1].copy()
コード例 #4
0
def rendering(mat):
    pcd = open3d.geometry.PointCloud()
    pcd.points = open3d.utility.Vector3dVector(mat["verts"])
    pcd.normals = open3d.utility.Vector3dVector(mat["normal_vec"])
    # pcd.estimate_normals()

    # estimate radius for rolling ball
    distances = pcd.compute_nearest_neighbor_distance()
    avg_dist = np.mean(distances)
    radius = 1.5 * avg_dist

    mesh = open3d.geometry.TriangleMesh.create_from_point_cloud_ball_pivoting(
        pcd, open3d.utility.DoubleVector([radius, radius * 5])
    )

    # mesh, density = open3d.geometry.TriangleMesh.create_from_point_cloud_poisson(pcd)

    # create the triangular mesh with the vertices and faces from open3d
    tri_mesh = trimesh.Trimesh(
        np.asarray(mesh.vertices),
        np.asarray(mesh.triangles),
        vertex_normals=np.asarray(mesh.vertex_normals),
        process=False,
    )
    # tri_mesh.export("test.stl")

    # tri_mesh.convex.is_convex(tri_mesh)

    mesh = pyrender.Mesh.from_trimesh(tri_mesh, smooth=False)

    # mesh = pyrender.Mesh.from_points(pcd.points, normals=pcd.normals)

    center = np.zeros(3)
    # center = (mat["verts"].min(axis=0) + mat["verts"].max(axis=0)) / 2
    # center[1] = mat["verts"][:, 1].min() + 10
    center[1] = -5
    center[2] = mat["verts"][:, 2].min()

    # compose scene
    scene = pyrender.Scene(ambient_light=[0.1, 0.1, 0.1], bg_color=[0.7, 0.7, 0.7])
    camera = pyrender.PerspectiveCamera(yfov=np.pi / 4.0)
    # camera = pyrender.IntrinsicsCamera(1, 1, 0, 0)
    # light = pyrender.DirectionalLight(color=[1, 1, 1], intensity=1)
    light = pyrender.PointLight(color=[1.0, 1.0, 1.0], intensity=2.5e3)

    scene.add(mesh, pose=np.eye(4))
    scene.add(light, pose=[[1, 0, 0, center[0]], [0, 1, 0, center[1]], [0, 0, -1, center[2] - 20], [0, 0, 0, 1]])
    scene.add(camera, pose=[[1, 0, 0, center[0]], [0, -1, 0, center[1]], [0, 0, -1, center[2] - 40], [0, 0, 0, 1]])

    # render scene
    # r = pyrender.OffscreenRenderer(800, 768)
    # color, _ = r.render(scene)
    # r.delete()
    pyrender.Viewer(scene)

    return color
コード例 #5
0
ファイル: train.py プロジェクト: t-walker-21/DenseFusion
def visualize_prediction(target_r, target_t, pred_r, pred_t, model_id):
    """

    Function to visualize prediction of object pose
    """

    #rotation = np.eye(3)

    # Get random translation
    translation = np.random.uniform(-0.07, 0.07, 3)
    translation[2] += 1
    translation[2] *= -1

    # Build transformation matrix
    mat = np.eye(4)
    mat[:3, :3] = rotation
    mat[:3, 3] = translation

    # Create light object
    light = pyrender.PointLight(color=[1.0, 1.0, 1.0],
                                intensity=np.random.normal(10, 5))

    # Create a scene
    scene = pyrender.Scene()

    # Create camera node object
    nc = pyrender.Node(camera=camera, matrix=np.eye(4))

    # Create object node object
    no = pyrender.Node(mesh=model, matrix=mat)

    # Create light node object
    nl = pyrender.Node(light=light, matrix=np.eye(4))

    # Add camera to scene
    scene.add_node(nc)

    # Add object to scene
    scene.add_node(no, parent_node=nc)

    # Add light to scene
    scene.add_node(nl, parent_node=nc)

    # Create object renderer
    render = pyrender.OffscreenRenderer(image_shape[0], image_shape[1])

    # Render images
    color, depth = render.render(scene)

    # Convert color
    color = cv2.cvtColor(color, cv2.COLOR_BGR2RGB)

    if show_image:
        # Show image
        cv2.imshow("image", color)
        cv2.waitKey(0)
コード例 #6
0
def render(obj):
    import pyrender, os

    m = pyrender.Mesh.from_trimesh(obj.mesh, smooth=False)

    scene = pyrender.Scene(ambient_light=[0.1, 0.1, 0.1, 0.1])
    light = pyrender.PointLight(intensity=500)
    scene.add(m)
    #scene.add(light)
    pyrender.Viewer(scene)
コード例 #7
0
    def __init__(self, z=0, x=0, y=0, width=1024, light_color=None, f=None):
        if light_color is None:
            light_color = np.array([1.0, 1.0, 1.0])

        if f is None:
            f = np.array([4754.97941935 / 2, 4754.97941935 / 2])

        self.mesh = None
        frustum = {"near": 0.01, "far": 100.0, "height": 1024, "width": width}
        camera_params = {
            "c": np.array([x, y]),
            "k": np.array([-0.19816071, 0.92822711, 0, 0, 0]),
            "f": f,
        }
        intensity = 1.5
        self.rgb_per_v = None

        self.scene = pyrender.Scene(ambient_light=[0.2, 0.2, 0.2],
                                    bg_color=[255, 255, 255])
        camera = pyrender.IntrinsicsCamera(
            fx=camera_params["f"][0],
            fy=camera_params["f"][1],
            cx=camera_params["c"][0],
            cy=camera_params["c"][1],
            znear=frustum["near"],
            zfar=frustum["far"],
        )

        camera_pose = np.eye(4)
        camera_pose[:3, 3] = np.array([0, 0, 1.0 - z])
        self.scene.add(camera, pose=camera_pose)

        angle = np.pi / 6.0
        pos = [0, 0, 1]

        light = pyrender.PointLight(color=light_color, intensity=intensity)

        light_pose = np.eye(4)
        light_pose[:3, 3] = pos
        self.scene.add(light, pose=light_pose.copy())

        light_pose[:3, 3] = cv2.Rodrigues(np.array([angle, 0, 0]))[0].dot(pos)
        self.scene.add(light, pose=light_pose.copy())

        light_pose[:3, 3] = cv2.Rodrigues(np.array([-angle, 0, 0]))[0].dot(pos)
        self.scene.add(light, pose=light_pose.copy())

        light_pose[:3, 3] = cv2.Rodrigues(np.array([0, -angle, 0]))[0].dot(pos)
        self.scene.add(light, pose=light_pose.copy())

        light_pose[:3, 3] = cv2.Rodrigues(np.array([0, angle, 0]))[0].dot(pos)
        self.scene.add(light, pose=light_pose.copy())

        self.r = pyrender.OffscreenRenderer(viewport_width=frustum["width"],
                                            viewport_height=frustum["height"])
コード例 #8
0
ファイル: watchdog.py プロジェクト: WBrandes/3DErrorDetection
 def add_light(self, light_pos, light_color, light_intensity, light_range):
     translation = np.array([
         [1.0, 0.0, 0.0, light_pos[0]],
         [0.0, 1.0, 0.0, light_pos[1]],
         [0.0, 0.0, 1.0, light_pos[2]],
         [0.0, 0.0, 0.0, 1.0],
     ])
     self.lights.append([
         pyrender.PointLight(color=light_color,
                             intensity=light_intensity,
                             range=light_range), translation
     ])
コード例 #9
0
ファイル: rendering.py プロジェクト: zivzone/latentfusion
 def _update_light_nodes(self, num_lights):
     delta = num_lights - len(self.light_nodes)
     if delta < 0:
         for _ in range(abs(delta)):
             self.scene.remove_node(self.light_nodes.pop())
     elif delta > 0:
         for _ in range(delta):
             light_node = self.scene.add(pyrender.PointLight(
                 color=np.ones(3), intensity=0.0),
                                         pose=np.eye(4),
                                         name='point_light')
             self.light_nodes.append(light_node)
コード例 #10
0
 def __init__(self, width, height, objects):
     self.scene = pyrender.Scene(bg_color=np.zeros(4))
     for tmesh in objects:
         tmesh.apply_transform(np.array([
             [ 1.0, 0.0, 0.0, 0.0],
             [ 0.0, 0.0, 1.0, 0.0],
             [ 0.0, 1.0, 0.0, 0.0],
             [ 0.0, 0.0, 0.0, 1.0]
         ]))
         self.scene.add(pyrender.Mesh.from_trimesh(tmesh))
     light = pyrender.PointLight(color=np.ones(3), intensity=30.0)
     self.scene.add(light)
     self.renderer = pyrender.OffscreenRenderer(width, height)
コード例 #11
0
    def __init__(self, options):
        """Constructor."""

        # RGB frames path
        self.rgb_path = os.path.join('datasets', 'ycb-video', options.video_id,
                                     'rgb')

        # Estimates path
        self.estimates_path = os.path.join('results', options.algorithm, 'nrt',
                                           options.mask_set, 'validation',
                                           options.object, options.video_id,
                                           'object-tracking_estimate.txt')

        # Mesh path
        object_mesh_path = os.path.join('models', 'YCB_models', 'models',
                                        options.object, 'textured')
        if options.mesh_type == 'low-quality':
            object_mesh_path += '_simple'
        object_mesh_path += '.obj'

        # Mesh
        trimesh_mesh = trimesh.load(object_mesh_path)
        mesh = pyrender.Mesh.from_trimesh(trimesh_mesh)

        # Scene
        self.scene = pyrender.Scene(bg_color=[0.0, 0.0, 0.0])

        # Camera
        fx = 1066.8
        fy = 1067.5
        cx = 312.99
        cy = 241.31
        width = 640
        height = 480
        camera_transform = Quaternion(axis=[1.0, 0.0, 0.0],
                                      angle=numpy.pi).transformation_matrix

        self.camera = pyrender.IntrinsicsCamera(fx=fx, fy=fy, cx=cx, cy=cy)
        self.scene.add(self.camera, pose=camera_transform)

        # Light
        self.light = pyrender.PointLight(intensity=20.0)
        self.scene.add(self.light)

        # Object node
        self.mesh_node = pyrender.Node(mesh=mesh, matrix=numpy.eye(4))
        self.scene.add_node(self.mesh_node)

        # Renderer
        self.renderer = pyrender.OffscreenRenderer(width, height)
コード例 #12
0
ファイル: renderer.py プロジェクト: suddhu/tacto
    def _init_light(self):
        """
        Set up light
        """

        # Load light from config file
        light = self.conf.sensor.lights

        origin = np.array(light.origin)

        xyz = []
        if light.polar:
            # Apply polar coordinates
            thetas = light.xrtheta.thetas
            rs = light.xrtheta.rs
            xs = light.xrtheta.xs
            for i in range(len(thetas)):
                theta = np.pi / 180 * thetas[i]
                xyz.append(
                    [xs[i], rs[i] * np.cos(theta), rs[i] * np.sin(theta)])
        else:
            # Apply cartesian coordinates
            xyz = np.array(light.xyz.coords)

        colors = np.array(light.colors)
        intensities = light.intensities

        # Save light nodes
        self.light_nodes = []
        self.light_poses0 = []

        for i in range(len(colors)):

            color = colors[i]
            light_pose_0 = euler2matrix(angles=[0, 0, 0],
                                        translation=xyz[i] + origin)

            light = pyrender.PointLight(color=color, intensity=intensities[i])
            light_node = pyrender.Node(light=light, matrix=light_pose_0)

            self.scene.add_node(light_node)
            self.light_nodes.append(light_node)
            self.light_poses0.append(light_pose_0)
            self.current_light_nodes.append(light_node)

            # Add extra light node into scene_depth
            light_node_depth = pyrender.Node(light=light, matrix=light_pose_0)
            self.scene_depth.add_node(light_node_depth)
コード例 #13
0
def render(obsinfo, bg_color=[0.0, 0.0, 0.0], wireframe=False):
    scene = pyrender.Scene(bg_color=bg_color)
    camera = pyrender.PerspectiveCamera(
        yfov=np.radians(obsinfo.fov.fovy), aspectRatio=obsinfo.fov.aspect
    )

    # rot_z(180) . rot_y(180)
    camera_pose = np.array(
        [[1, 0, 0, 0], [0, -1, 0, 0], [0, 0, -1, 0], [0, 0, 0, 1]],
        dtype=np.float64,
    )
    scene.add(camera, pose=camera_pose)

    star_image = np.zeros(
        shape=(obsinfo.height, obsinfo.width, 4), dtype=np.uint8
    )

    for star in obsinfo.stars:
        star_image += render_star(star, obsinfo.width, obsinfo.height)

    for solar_object in obsinfo.solar_objects:
        mesh, pose = render_solar_object(solar_object, wireframe)
        scene.add(mesh, pose=pose)

    # light = pyrender.PointLight(color=[1.0, 1.0, 1.0], intensity=3.8e27)
    light = pyrender.PointLight(color=[1.0, 1.0, 1.0], intensity=3.8e17)
    pose = np.identity(4)
    pose[0:3, 3] = -obsinfo.pos
    scene.add(light, pose=pose)

    # Render the scene
    r = pyrender.OffscreenRenderer(obsinfo.width, obsinfo.height)
    flags = pyrender.RenderFlags.RGBA | pyrender.RenderFlags.SHADOWS_DIRECTIONAL
    foreground, _ = r.render(scene, flags=flags)

    # background layer: star_image, foreground layer:foreground
    bg_color.append(1.0)
    bg_color_int = (np.array(bg_color) * 255).astype(int)
    return np.where(
        foreground != bg_color_int,
        foreground,
        np.where(star_image != [0, 0, 0, 0], star_image, bg_color_int),
    )
コード例 #14
0
ファイル: renderer.py プロジェクト: zeta1999/SSP-3D
    def __init__(self, faces, img_res=512):
        self.renderer = pyrender.OffscreenRenderer(viewport_width=img_res,
                                                   viewport_height=img_res,
                                                   point_size=1.0)
        self.camera_center = [img_res // 2, img_res // 2]
        self.faces = faces
        self.img_res = img_res

        # set the scene
        self.scene = pyrender.Scene(bg_color=[0.0, 0.0, 0.0, 0.0],
                                    ambient_light=(0.3, 0.3, 0.3))

        light = pyrender.PointLight(color=[1.0, 1.0, 1.0], intensity=1.)

        light_pose = np.eye(4)
        light_pose[:3, 3] = [0, -1, 1]
        self.scene.add(light, pose=light_pose)

        light_pose[:3, 3] = [0, 1, 1]
        self.scene.add(light, pose=light_pose)
コード例 #15
0
def _add_lighting(scene, light_type, random_range=(1, 4)):
    '''Takes scene and adds random amout of lighting.
    random_range:   Range to pick number of lights from.'''
    n = random.randrange(  # Number of lights
        random_range[0], random_range[1])

    for _ in range(n):  # Add directional lights
        d = None
        if 'directional_lights' == light_type:
            d = pyrender.DirectionalLight(color=[1.0, 1.0, 1.0], intensity=2)
        elif 'point_lights' == light_type:
            d = pyrender.PointLight(color=[1.0, 1.0, 1.0], intensity=2)
        else:
            raise Exception(
                'Light type not recognized, should be \"direction_lights\" or \"point_lights\", not {}'
                .format(light_type))

        _add_model(scene, d)

    return scene
コード例 #16
0
    def get_frame(self, phi, theta):
        scene = pyrender.Scene(bg_color=np.zeros(4))
        scene.add(self.mesh)
        camera = pyrender.PerspectiveCamera(yfov=v_fov * pi / 180)
        camera_rot_y = np.array([
            [1.0, 0.0, 0.0, 0.0],
            [0.0, cos(phi), -sin(phi), 0.0],
            [0.0, sin(phi), cos(phi), 0.0],
            [0.0, 0.0, 0.0, 1.0],
        ])
        camera_rot_x = np.array([
            [cos(theta), 0.0, sin(theta), 0.0],
            [0.0, 1.0, 0.0, 0.0],
            [-sin(theta), 0.0, cos(theta), 0.0],
            [0.0, 0.0, 0.0, 1.0],
        ])
        camera_rot_z = np.eye(4)
        # camera_rot_z = np.array([
        #        [1, 0.0, 0.0, 0.0],
        #        [0.0, cos(gamma), sin(gamma), 0.0],
        #        [0.0, -sin(gamma), cos(gamma), 0.0],
        #        [0.0, 0.0, 0.0, 1.0],
        #     ])
        camera_pose = np.matmul(np.matmul(camera_rot_y, camera_rot_x),
                                camera_rot_z)
        camera_pose[0][3] = 0.0
        camera_pose[1][3] = 0.0
        camera_pose[2][3] = 7.0
        # camera_pose =  np.array([
        #        [1.0, 0.0, 0.0, -20],
        #        [0.0, 1.0, 0.0, 3],
        #        [0.0, 0.0, 1.0, 150],
        #        [0.0, 0.0, 0.0, 1.0],
        #     ])
        scene.add(camera, pose=camera_pose)

        # Set up the light
        light = pyrender.PointLight(color=np.ones(3), intensity=30.0)
        scene.add(light, pose=camera_pose)
        color, depth = self.renderer.render(scene)
        return color, depth
    def __init__(self, resolution=(256, 256)):
        self.resolution = resolution

        self.faces = np.load(config.SMPL_FACES_PATH)
        self.renderer = pyrender.OffscreenRenderer(
            viewport_width=self.resolution[0],
            viewport_height=self.resolution[1],
            point_size=1.0)

        # set the scene
        self.scene = pyrender.Scene(bg_color=[0.0, 0.0, 0.0, 0.0],
                                    ambient_light=(0.3, 0.3, 0.3))

        light = pyrender.PointLight(color=[1.0, 1.0, 1.0], intensity=1.)

        light_pose = np.eye(4)
        light_pose[:3, 3] = [0, -1, 1]
        self.scene.add(light, pose=light_pose)

        light_pose[:3, 3] = [0, 1, 1]
        self.scene.add(light, pose=light_pose)
コード例 #18
0
    def __init__(self, resolution=(224, 224), orig_img=False, wireframe=False):
        self.resolution = resolution

        self.faces = get_smpl_faces()
        self.orig_img = orig_img
        self.wireframe = wireframe
        self.renderer = pyrender.OffscreenRenderer(
            viewport_width=self.resolution[0],
            viewport_height=self.resolution[1],
            point_size=1.0)

        # set the scene
        self.scene = pyrender.Scene(bg_color=[0.0, 0.0, 0.0, 0.0],
                                    ambient_light=(0.3, 0.3, 0.3))

        light = pyrender.PointLight(color=[1.0, 1.0, 1.0], intensity=1)

        light_pose = np.eye(4)
        light_pose[:3, 3] = [0, -1, 1]
        self.scene.add(light, pose=light_pose)

        light_pose[:3, 3] = [0, 1, 1]
        self.scene.add(light, pose=light_pose)

        light_pose[:3, 3] = [1, 1, 2]
        self.scene.add(light, pose=light_pose)

        self.colors_dict = {
            'red': np.array([0.5, 0.2, 0.2]),
            'pink': np.array([0.7, 0.5, 0.5]),
            'neutral': np.array([0.7, 0.7, 0.6]),
            'purple': np.array([0.5, 0.5, 0.7]),
            'green': np.array([0.5, 0.55, 0.3]),
            'sky': np.array([0.3, 0.5, 0.55]),
            'white': np.array([1.0, 0.98, 0.94]),
        }
コード例 #19
0
    def __init__(self, synthetic=False):
        self.synthetic = synthetic
        os.environ['PYOPENGL_PLATFORM'] = 'egl'
        tscene = trimesh.load('/cvlabdata2/cvlab/datasets_protopap/deepim/data/models/swisscube/swisscube.obj')
        mesh = pyrender.Mesh.from_trimesh(list(tscene.geometry.values()), smooth=False)

        if synthetic:
            width, height = 1024, 1024
        else:
            width, height = 2048, 2048


        self.renderer = pyrender.OffscreenRenderer(viewport_width=width, viewport_height=height, point_size=1.0)
        scene = pyrender.Scene(ambient_light=[0.02, 0.02, 0.02], bg_color=[0, 0, 0])

        light = pyrender.PointLight(color=[1.0, 1.0, 1.0], intensity=1000000.0)
        
        if synthetic:
            fx, fy, cx, cy = 607, 607, 512, 512
        else:
            fx, fy, cx, cy = 4000, 4000, 1024, 1024

        self.intrinsic = np.array([fx, 0, cx, 0, fy, cy, 0, 0, 1]).reshape((3, 3))
        cam = pyrender.IntrinsicsCamera(fx, fy, cx, cy, zfar=2000)
        cam_rot = R.from_euler('y', 180, degrees=True).as_matrix()
        cam_matrix = to_homo(cam_rot, np.zeros((3,)))

        self.nm = pyrender.Node(mesh=mesh, matrix=np.eye(4))
        nl = pyrender.Node(light=light, matrix=np.eye(4))
        nc = pyrender.Node(camera=cam, matrix=cam_matrix)

        scene.add_node(self.nm)
        scene.add_node(nl)
        scene.add_node(nc)

        self.scene = scene
コード例 #20
0
# pyrender.Viewer(scene)

import trimesh
import pyrender
import numpy as np
import math
import collada

red = [1.0, 0.0, 0.0]
green = [0.0, 1.0, 0.0]
blue = [0.0, 0.0, 1.0]
black = [0.0, 0.0, 0.0]
gray = [0.5, 0.5, 0.5]
white = [1.0, 1.0, 1.0]

point_light = pyrender.PointLight(color=[1.0, 1.0, 1.0], intensity=2.0)
spot_light = pyrender.SpotLight(color=[1.0, 1.0, 1.0],
                                intensity=2.0,
                                innerConeAngle=0.05,
                                outerConeAngle=0.5)
directional_light = pyrender.DirectionalLight(color=[1.0, 1.0, 1.0],
                                              intensity=2.0)

perspective_camera = pyrender.PerspectiveCamera(yfov=np.pi / 3.0,
                                                aspectRatio=1.414)
ortho_camera = pyrender.OrthographicCamera(xmag=1.0, ymag=1.0)


def display(mesh, light=None, camera=None):
    scene = pyrender.Scene(ambient_light=white)
    scene.add(mesh)
コード例 #21
0
def render_image(img,
                 verts,
                 cam,
                 faces=None,
                 angle=None,
                 axis=None,
                 resolution=224,
                 output_fn=None):
    if faces is None:
        faces = get_smpl_faces()

    mesh = trimesh.Trimesh(vertices=verts, faces=faces)

    Rx = trimesh.transformations.rotation_matrix(math.radians(180), [1, 0, 0])
    mesh.apply_transform(Rx)

    if angle and axis:
        R = trimesh.transformations.rotation_matrix(math.radians(angle), axis)
        mesh.apply_transform(R)

    if output_fn:
        mesh.export(output_fn)
        camera_translation = np.array(
            [-cam[1], cam[2], 2 * 5000. / (img.shape[0] * cam[0] + 1e-9)])
        np.save(output_fn.replace('.obj', '.npy'), camera_translation)

        # Save the rotated mesh
        # R = trimesh.transformations.rotation_matrix(math.radians(270), [0,1,0])
        # rotated_mesh = mesh.copy()
        # rotated_mesh.apply_transform(R)
        # rotated_mesh.export(output_fn.replace('.obj', '_rot.obj'))

    scene = pyrender.Scene(bg_color=[0.0, 0.0, 0.0, 0.0],
                           ambient_light=(0.3, 0.3, 0.3))

    material = pyrender.MetallicRoughnessMaterial(metallicFactor=0.0,
                                                  alphaMode='OPAQUE',
                                                  baseColorFactor=(1.0, 1.0,
                                                                   0.9, 1.0))
    mesh = pyrender.Mesh.from_trimesh(mesh, material=material)
    scene.add(mesh, 'mesh')

    camera_pose = np.eye(4)

    camera = WeakPerspectiveCamera(scale=cam[0],
                                   translation=cam[1:],
                                   zfar=1000.)
    scene.add(camera, pose=camera_pose)

    light = pyrender.PointLight(color=[1.0, 1.0, 1.0], intensity=1)

    light_pose = np.eye(4)
    light_pose[:3, 3] = [0, -1, 1]
    scene.add(light, pose=light_pose)

    light_pose[:3, 3] = [0, 1, 1]
    scene.add(light, pose=light_pose)

    light_pose[:3, 3] = [1, 1, 2]
    scene.add(light, pose=light_pose)

    r = pyrender.OffscreenRenderer(viewport_width=resolution,
                                   viewport_height=resolution,
                                   point_size=1.0)

    color, _ = r.render(scene, flags=pyrender.RenderFlags.RGBA)
    # color = color[:, ::-1, :]
    valid_mask = (color[:, :, -1] > 0)[:, :, np.newaxis]

    output_img = color[:, :, :-1] * valid_mask + (1 - valid_mask) * img

    image = output_img.astype(np.uint8)
    text = f's: {cam[0]:.2f}, tx: {cam[1]:.2f}, ty: {cam[2]:.2f}'
    cv2.putText(image, text, (5, 10), 0, 0.4, color=(0, 255, 0))

    return image
コード例 #22
0
def render_mesh_helper(mesh, t_center, rot=np.zeros(3), v_colors=None, errors=None, error_unit='m', min_dist_in_mm=0.0, max_dist_in_mm=3.0, z_offset=0):
    camera_params = {'c': np.array([400, 400]),
                     'k': np.array([-0.19816071, 0.92822711, 0, 0, 0]),
                     'f': np.array([4754.97941935 / 2, 4754.97941935 / 2])}

    frustum = {'near': 0.01, 'far': 3.0, 'height': 800, 'width': 800}

    mesh_copy = Mesh(mesh.v, mesh.f)
    # mesh_copy.v[:] = np.matmul(mesh_copy.v[:], rot)
    # mesh_copy.v[:] = cv2.Rodrigues(rot)[0].dot((mesh_copy.v-t_center).T).T+t_center

    if errors is not None:
        intensity = 0.5
        unit_factor = get_unit_factor('mm')/get_unit_factor(error_unit)
        errors = unit_factor*errors

        norm = mpl.colors.Normalize(vmin=min_dist_in_mm, vmax=max_dist_in_mm)
        cmap = cm.get_cmap(name='jet')
        colormapper = cm.ScalarMappable(norm=norm, cmap=cmap)
        rgba_per_v = colormapper.to_rgba(errors)
        rgb_per_v = rgba_per_v[:, 0:3]
    elif v_colors is not None:
        intensity = 0.5
        rgb_per_v = v_colors
    else:
        intensity = 1.5
        rgb_per_v = None

    tri_mesh = trimesh.Trimesh(vertices=mesh_copy.v, faces=mesh_copy.f, vertex_colors=rgb_per_v)
    render_mesh = pyrender.Mesh.from_trimesh(tri_mesh, smooth=True)

    scene = pyrender.Scene(ambient_light=[.2, .2, .2], bg_color=[255, 255, 255])
    camera = pyrender.IntrinsicsCamera(fx=camera_params['f'][0],
                                      fy=camera_params['f'][1],
                                      cx=camera_params['c'][0],
                                      cy=camera_params['c'][1],
                                      znear=frustum['near'],
                                      zfar=frustum['far'])

    scene.add(render_mesh, pose=np.eye(4))

    camera_pose = np.eye(4)
    camera_pose[:3,3] = np.array([0, 0, 1.0-z_offset])
    scene.add(camera, pose=[[1, 0, 0, 0],
                            [0, 1, 0, 0],
                            [0, 0, 1, 1],
                            [0, 0, 0, 1]])

    angle = np.pi / 6.0
    pos = camera_pose[:3,3]
    light_color = np.array([1., 1., 1.])
    light = pyrender.PointLight(color=light_color, intensity=intensity)

    light_pose = np.eye(4)
    light_pose[:3,3] = pos
    scene.add(light, pose=light_pose.copy())

    light_pose[:3,3] = cv2.Rodrigues(np.array([angle, 0, 0]))[0].dot(pos)
    scene.add(light, pose=light_pose.copy())

    light_pose[:3,3] =  cv2.Rodrigues(np.array([-angle, 0, 0]))[0].dot(pos)
    scene.add(light, pose=light_pose.copy())

    light_pose[:3,3] = cv2.Rodrigues(np.array([0, -angle, 0]))[0].dot(pos)
    scene.add(light, pose=light_pose.copy())

    light_pose[:3,3] = cv2.Rodrigues(np.array([0, angle, 0]))[0].dot(pos)
    scene.add(light, pose=light_pose.copy())

    flags = pyrender.RenderFlags.SKIP_CULL_FACES
    r = pyrender.OffscreenRenderer(viewport_width=frustum['width'], viewport_height=frustum['height'])
    color, _ = r.render(scene, flags=flags)

    return color[..., ::-1]
コード例 #23
0
        T_cam1_cam2[0:3, 3] = t
        T_cam2 = T_cam1 @ T_cam1_cam2
        setup = [T_cam1, T_cam2, R, t]
        setups.append(setup)

    return setups


R_cam1_cam2 = np.eye(3)
t_cam1_cam2 = np.array((0.5, 0, 0))

setups = get_cam_setups(R_cam1_cam2, t_cam1_cam2,
                        1000)  #[[cam1_pose, cam2_pose]]

scene = pyrender.Scene(ambient_light=np.array([0.02, 0.02, 0.02, 0.1]))
point_l = pyrender.PointLight(color=np.ones(3), intensity=50.0)

l_pose = np.array([[1.0, 0.0, 0.0, 0.0], [0.0, 1.0, 0.0, 0.0],
                   [0.0, 0.0, 1.0, 2.0], [0.0, 0.0, 0.0, 1.0]])
point_l_node = scene.add(point_l, pose=l_pose)

drillpink_trimesh = trimesh.load('Models/drillpink.obj')
drillpink_obj = pyrender.Mesh.from_trimesh(drillpink_trimesh)

drillred_trimesh = trimesh.load('Models/drillred.obj')
drillred_obj = pyrender.Mesh.from_trimesh(drillred_trimesh)

drillyellow_trimesh = trimesh.load('Models/drillyellow.obj')
drillyellow_obj = pyrender.Mesh.from_trimesh(drillyellow_trimesh)

obj_list = [drillpink_obj, drillred_obj, drillyellow_obj]
コード例 #24
0
ファイル: master.py プロジェクト: rileyalankirk/PerlinNoise
def main():

    cube_tasks = 10
    perlin_iterations = 10
    size = [20,20,20]

    # Create tasks to do banded perlin noise, and then through all results and average them together to finish banded perlin noise
    environment = np.zeros((size[0],size[1],size[2]))
    data = [[size[0], size[1], size[2], 2.0, 1.0]]*perlin_iterations
    for result in ~cave_gen_worker.generate_perlin_noise.starmap(zip(data)):
        environment += np.array(pickle.loads(result)) / perlin_iterations

    # Getting all data to be above 0 for marching cubes program
    environment = environment + np.abs(np.min(environment))
    environment = environment.reshape(size[0],size[1],size[2])


    cubes_data = []

    # Getting threshold to render marching cubes at.
    threshold = np.mean(environment)

    # Now we need to split the data up into slices for sending out as separate tasks
    group_size = size[0]//cube_tasks
    # Want to make sure the group size is at least 2, since if it were 1 or 0 we wouldn't be sending any actual cubes
    if group_size < 2:
        group_size = 2
    for i in range(cube_tasks):
        start_index = i*group_size
        end_index = (i+1)*group_size + 1

        '''
        It's possible that the way the slicing is done, we could end up with a single 2D
        slice as the last thing sent to a worker. This of course wouldn't work too well
        with marching cubes, so we check for this below and simply add that would-be 1-layer
        slice on to the slice that comes before it
        '''
        if end_index >= size[0]-2:
            cubes_data.append([environment[start_index:], threshold, [start_index, 0, 0]])
            break
        cubes_data.append([environment[start_index:end_index], threshold, [start_index, 0, 0]])

    vertices = []
    indices = []

    # Create marching cubes tasks and go through all results
    for result in ~cave_gen_worker.build_marching_cube_mesh.starmap(zip(cubes_data)):
        # Get vertices and indices data generated
        new_vertices, new_indices = pickle.loads(result)
        new_indices = np.array(new_indices)

        # Add on the vertices and indices to our lists
        new_indices += len(vertices)
        vertices += list(new_vertices)
        indices += list(new_indices)

    # Now render the scene

    # Setting up meshes
    colors = [[0.25, 0.5, 0.25]] * len(vertices)
    tri_mesh = trimesh.Trimesh(vertices=vertices, faces=indices, vertex_colors=colors)
    mesh = pyrender.Mesh.from_trimesh(tri_mesh)

    scene = pyrender.Scene()
    scene.add(mesh)


    # Setting up camera object
    camera = pyrender.PerspectiveCamera(yfov=np.pi / 3.0, aspectRatio=1.0)
    s = np.sqrt(2)/2
    camera_pose = np.array([
       [0.0, -s,   s,   size[0]*1.25],
       [1.0,  0.0, 0.0, size[1]/2.5],
       [0.0,  s,   s,   size[2]*1.25],
       [0.0,  0.0, 0.0, 1.0],
    ])

    scene.add(camera, pose=camera_pose)


    # Setting up lighting
    lights = []

    for a in range(2):
        for b in range(2):
            for c in range(2):
                lights.append(np.array([
                   [0.0, -s,   s,   size[0] * (-1**a)],
                   [1.0,  0.0, 0.0, size[1] * (-1**b)],
                   [0.0,  s,   s,   size[2] * (-1**c)],
                   [0.0,  0.0, 0.0, 1.0],
                ]))

    light = pyrender.PointLight(color=[1.0, 1.0, 1.0], intensity=1000.0, range=1000.0)

    for l in lights:
        scene.add(light, pose=l)

    # Finally, render. This will open an interactive viewer.
    pyrender.Viewer(scene, render_flags={"cull_faces":False})
コード例 #25
0
bounds = bunny_tmesh.bounds
bunny_tmesh.vertices /= (bounds[1] - bounds[0]).max()

# assume an opengl camera model: +x/y/z -> right/up/back.
camera = pyrender.PerspectiveCamera(yfov=np.pi / 3, aspectRatio=1)

# put the camera at (0, 0, 1.5) in world frame with up +y facing -z.
start_c2w = np.array([[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 1.5], [0, 0, 0, 1]])
# and rotate it around +y.
c2ws = circular_c2ws_around_y(start_c2w, num_poses=N)

# put the light at (0, 1.5, 0) in world frame with up +z facing -y.
light_c2w = np.array(
    [[1, 0, 0, 0], [0, 0, 1, 1.5], [0, -1, 0, 0], [0, 0, 0, 1]]
)
light = pyrender.PointLight(color=np.ones(3), intensity=5.0)

r = pyrender.OffscreenRenderer(*img_wh)
scene = pyrender.Scene()
mesh_node = pyrender.Node(
    mesh=pyrender.Mesh.from_trimesh(bunny_tmesh), matrix=np.eye(4)
)
camera_node = pyrender.Node(camera=camera, matrix=np.eye(4))
light_node = pyrender.Node(light=light, matrix=light_c2w)
scene.add_node(mesh_node)
scene.add_node(camera_node)
scene.add_node(light_node)

plot_dict = dict(bunny=dict(mesh=bunny_tmesh))

for i, c2w in enumerate(c2ws):
コード例 #26
0
def render_mesh_helper(mesh,
                       t_center,
                       rot=np.zeros(3),
                       tex_img=None,
                       v_colors=None,
                       errors=None,
                       error_unit='m',
                       min_dist_in_mm=0.0,
                       max_dist_in_mm=3.0,
                       z_offset=0):
    camera_params = {
        'c': np.array([400, 400]),
        'k': np.array([-0.19816071, 0.92822711, 0, 0, 0]),
        'f': np.array([4754.97941935 / 2, 4754.97941935 / 2])
    }

    frustum = {'near': 0.01, 'far': 3.0, 'height': 800, 'width': 800}

    mesh_copy = Mesh(mesh.v, mesh.f)
    mesh_copy.v[:] = cv2.Rodrigues(rot)[0].dot(
        (mesh_copy.v - t_center).T).T + t_center

    texture_rendering = tex_img is not None and hasattr(
        mesh, 'vt') and hasattr(mesh, 'ft')
    if texture_rendering:
        intensity = 0.5
        tex = pyrender.Texture(source=tex_img, source_channels='RGB')
        material = pyrender.material.MetallicRoughnessMaterial(
            baseColorTexture=tex)

        # Workaround as pyrender requires number of vertices and uv coordinates to be the same
        temp_filename = '%s.obj' % next(tempfile._get_candidate_names())
        mesh.write_obj(temp_filename)
        tri_mesh = trimesh.load(temp_filename, process=False)
        try:
            os.remove(temp_filename)
        except:
            print('Failed deleting temporary file - %s' % temp_filename)
        render_mesh = pyrender.Mesh.from_trimesh(tri_mesh, material=material)
    elif errors is not None:
        intensity = 0.5
        unit_factor = get_unit_factor('mm') / get_unit_factor(error_unit)
        errors = unit_factor * errors

        norm = mpl.colors.Normalize(vmin=min_dist_in_mm, vmax=max_dist_in_mm)
        cmap = cm.get_cmap(name='jet')
        colormapper = cm.ScalarMappable(norm=norm, cmap=cmap)
        rgba_per_v = colormapper.to_rgba(errors)
        rgb_per_v = rgba_per_v[:, 0:3]
    elif v_colors is not None:
        intensity = 0.5
        rgb_per_v = v_colors
    else:
        intensity = 1.5
        rgb_per_v = None

    if not texture_rendering:
        tri_mesh = trimesh.Trimesh(vertices=mesh_copy.v,
                                   faces=mesh_copy.f,
                                   vertex_colors=rgb_per_v)
        render_mesh = pyrender.Mesh.from_trimesh(tri_mesh, smooth=True)

    scene = pyrender.Scene(ambient_light=[.2, .2, .2],
                           bg_color=[255, 255, 255])
    camera = pyrender.IntrinsicsCamera(fx=camera_params['f'][0],
                                       fy=camera_params['f'][1],
                                       cx=camera_params['c'][0],
                                       cy=camera_params['c'][1],
                                       znear=frustum['near'],
                                       zfar=frustum['far'])

    scene.add(render_mesh, pose=np.eye(4))

    camera_pose = np.eye(4)
    camera_pose[:3, 3] = np.array([0, 0, 1.0 - z_offset])
    scene.add(camera,
              pose=[[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 1], [0, 0, 0, 1]])

    angle = np.pi / 6.0
    pos = camera_pose[:3, 3]
    light_color = np.array([1., 1., 1.])
    light = pyrender.PointLight(color=light_color, intensity=intensity)

    light_pose = np.eye(4)
    light_pose[:3, 3] = pos
    scene.add(light, pose=light_pose.copy())

    light_pose[:3, 3] = cv2.Rodrigues(np.array([angle, 0, 0]))[0].dot(pos)
    scene.add(light, pose=light_pose.copy())

    light_pose[:3, 3] = cv2.Rodrigues(np.array([-angle, 0, 0]))[0].dot(pos)
    scene.add(light, pose=light_pose.copy())

    light_pose[:3, 3] = cv2.Rodrigues(np.array([0, -angle, 0]))[0].dot(pos)
    scene.add(light, pose=light_pose.copy())

    light_pose[:3, 3] = cv2.Rodrigues(np.array([0, angle, 0]))[0].dot(pos)
    scene.add(light, pose=light_pose.copy())

    flags = pyrender.RenderFlags.SKIP_CULL_FACES
    try:
        r = pyrender.OffscreenRenderer(viewport_width=frustum['width'],
                                       viewport_height=frustum['height'])
        color, _ = r.render(scene, flags=flags)
    except:
        print('pyrender: Failed rendering frame')
        color = np.zeros((frustum['height'], frustum['width'], 3),
                         dtype='uint8')

    return color[..., ::-1]
コード例 #27
0
ファイル: generate_dummy.py プロジェクト: crs904620522/idr
    Pr = np.eye(4)
    Pr[:3,:3] = euler_to_rotation_matrix(pitch, yaw, roll)

    Pt = np.eye(4)
    Pt[2,3] = -z

    return Pr.dot(Pt)

def to_gl(pose):
    return pose.dot(np.array([[1, 0, 0, 0], [0, -1, 0, 0], [0, 0, -1, 0], [0, 0, 0, 1]]))

# Define geometry and properties
mesh = pyrender.Mesh.from_trimesh(get_colored_cube(), smooth=False)

# Define light
lights = [pyrender.PointLight(intensity=500.0) for _ in range(VIEWS)]
light_poses = [to_gl(get_random_pose()) for _ in range(VIEWS)]

# Define camera
intrinsics = get_intrinsics()

images = []
masks = []
poses = []
for view in range(VIEWS):

    # Define scene
    scene = pyrender.Scene()
    scene.add(mesh)
    for l, lp in zip(lights, light_poses): scene.add(l, pose=lp)
    pose = get_random_pose()
コード例 #28
0
    def render_multiview(self, vertices, K, R, T, imglist, trackId=0, return_depth=False, return_color=False,
        bg_color=[0.0, 0.0, 0.0, 0.0], camera=None):
        # List to store rendered scenes
        output_images, output_colors, output_depths = [], [], []
        # Need to flip x-axis
        rot = trimesh.transformations.rotation_matrix(
            np.radians(180), [1, 0, 0])
        nViews = len(imglist)
        for nv in range(nViews):
            img = imglist[nv]
            self.renderer.viewport_height = img.shape[0]
            self.renderer.viewport_width = img.shape[1]
            # Create a scene for each image and render all meshes
            scene = pyrender.Scene(bg_color=bg_color,
                                   ambient_light=(0.3, 0.3, 0.3))
            camera_pose = np.eye(4)

            # for every person in the scene
            if isinstance(vertices, dict):
                for trackId, data in vertices.items():
                    vert = data['vertices'].copy()
                    faces = data['faces']
                    col = data.get('col', trackId)
                    vert = vert @ R[nv].T + T[nv]
                    mesh = trimesh.Trimesh(vert, faces)
                    mesh.apply_transform(rot)
                    trans = [0, 0, 0]

                    material = pyrender.MetallicRoughnessMaterial(
                        metallicFactor=0.0,
                        alphaMode='OPAQUE',
                        baseColorFactor=colors[col % len(colors)])
                    mesh = pyrender.Mesh.from_trimesh(
                        mesh,
                        material=material)
                    scene.add(mesh, 'mesh')
            else:
                verts = vertices @ R[nv].T + T[nv]
                mesh = trimesh.Trimesh(verts, self.faces)
                mesh.apply_transform(rot)
                trans = [0, 0, 0]

                material = pyrender.MetallicRoughnessMaterial(
                    metallicFactor=0.0,
                    alphaMode='OPAQUE',
                    baseColorFactor=colors[trackId % len(colors)])
                mesh = pyrender.Mesh.from_trimesh(
                    mesh,
                    material=material)
                scene.add(mesh, 'mesh')

            if camera is not None:
                light = pyrender.PointLight(color=[1.0, 1.0, 1.0], intensity=70)
                light_pose = np.eye(4)
                light_pose[:3, 3] = [0, 0, 4.5]
                scene.add(light, pose=light_pose)

                light_pose[:3, 3] = [0, 1, 4]
                scene.add(light, pose=light_pose)

                light_pose[:3, 3] = [0, -1, 4]
                scene.add(light, pose=light_pose)
            else:
                trans = [0, 0, 0]
                # Use 3 directional lights
                # Create light source
                light = pyrender.DirectionalLight(color=[1.0, 1.0, 1.0], intensity=1)
                light_pose = np.eye(4)
                light_pose[:3, 3] = np.array([0, -1, 1]) + trans
                scene.add(light, pose=light_pose)
                light_pose[:3, 3] = np.array([0, 1, 1]) + trans
                scene.add(light, pose=light_pose)
                light_pose[:3, 3] = np.array([1, 1, 2]) + trans
                scene.add(light, pose=light_pose)
            if camera is None:
                if K is None:
                    camera_center = np.array([img.shape[1] / 2., img.shape[0] / 2.])
                    camera = pyrender.camera.IntrinsicsCamera(fx=self.focal_length, fy=self.focal_length, cx=camera_center[0], cy=camera_center[1])
                else:
                    camera = pyrender.camera.IntrinsicsCamera(fx=K[nv][0, 0], fy=K[nv][1, 1], cx=K[nv][0, 2], cy=K[nv][1, 2])
            scene.add(camera, pose=camera_pose)
            # Alpha channel was not working previously need to check again
            # Until this is fixed use hack with depth image to get the opacity
            color, rend_depth = self.renderer.render(scene, flags=flags)
            # color = color[::-1,::-1]
            # rend_depth = rend_depth[::-1,::-1]
            output_depths.append(rend_depth)
            color = color.astype(np.uint8)
            valid_mask = (rend_depth > 0)[:, :, None]
            if color.shape[2] == 3: # 在服务器上透明通道失败
                color = np.dstack((color, (valid_mask*255).astype(np.uint8)))
            output_colors.append(color)
            output_img = (color[:, :, :3] * valid_mask +
                          (1 - valid_mask) * img)
            
            output_img = output_img.astype(np.uint8)
            output_images.append(output_img)
        if return_depth:
            return output_images, output_depths
        elif return_color:
            return output_colors
        else:
            return output_images
コード例 #29
0
def fit_single_frame(img,
                     keypoints,
                     body_model,
                     camera,
                     joint_weights,
                     body_pose_prior,
                     jaw_prior,
                     left_hand_prior,
                     right_hand_prior,
                     shape_prior,
                     expr_prior,
                     angle_prior,
                     result_fn='out.pkl',
                     mesh_fn='out.obj',
                     out_img_fn='output.png',
                     overlay_img_fn='overlay.png',
                     loss_type='smplify',
                     use_cuda=True,
                     init_joints_idxs=(9, 12, 2, 5),
                     use_face=True,
                     use_hands=True,
                     data_weights=None,
                     body_pose_prior_weights=None,
                     hand_pose_prior_weights=None,
                     jaw_pose_prior_weights=None,
                     shape_weights=None,
                     expr_weights=None,
                     hand_joints_weights=None,
                     face_joints_weights=None,
                     depth_loss_weight=1e2,
                     interpenetration=True,
                     coll_loss_weights=None,
                     df_cone_height=0.5,
                     penalize_outside=True,
                     max_collisions=8,
                     point2plane=False,
                     part_segm_fn='',
                     focal_length=5000.,
                     side_view_thsh=25.,
                     rho=100,
                     vposer_latent_dim=32,
                     vposer_ckpt='',
                     use_joints_conf=False,
                     interactive=True,
                     visualize=False,
                     save_viz=False,
                     pyopengl_platform='osmesa',
                     save_meshes=True,
                     degrees=None,
                     batch_size=1,
                     dtype=torch.float32,
                     ign_part_pairs=None,
                     left_shoulder_idx=2,
                     right_shoulder_idx=5,
                     **kwargs):
    assert batch_size == 1, 'PyTorch L-BFGS only supports batch_size == 1'

    device = torch.device('cuda') if use_cuda else torch.device('cpu')

    if degrees is None:
        degrees = [0, 90, 180, 270]

    if data_weights is None:
        data_weights = [
            1,
        ] * 5

    if body_pose_prior_weights is None:
        body_pose_prior_weights = [4.04 * 1e2, 4.04 * 1e2, 57.4, 4.78]

    msg = ('Number of Body pose prior weights {}'.format(
        len(body_pose_prior_weights)) +
           ' does not match the number of data term weights {}'.format(
               len(data_weights)))
    assert (len(data_weights) == len(body_pose_prior_weights)), msg

    if use_hands:
        if hand_pose_prior_weights is None:
            hand_pose_prior_weights = [1e2, 5 * 1e1, 1e1, .5 * 1e1]
        msg = ('Number of Body pose prior weights does not match the' +
               ' number of hand pose prior weights')
        assert (
            len(hand_pose_prior_weights) == len(body_pose_prior_weights)), msg
        if hand_joints_weights is None:
            hand_joints_weights = [0.0, 0.0, 0.0, 1.0]
            msg = ('Number of Body pose prior weights does not match the' +
                   ' number of hand joint distance weights')
            assert (
                len(hand_joints_weights) == len(body_pose_prior_weights)), msg

    if shape_weights is None:
        shape_weights = [1e2, 5 * 1e1, 1e1, .5 * 1e1]
    msg = ('Number of Body pose prior weights = {} does not match the' +
           ' number of Shape prior weights = {}')
    assert (len(shape_weights) == len(body_pose_prior_weights)), msg.format(
        len(shape_weights), len(body_pose_prior_weights))

    if use_face:
        if jaw_pose_prior_weights is None:
            jaw_pose_prior_weights = [[x] * 3 for x in shape_weights]
        else:
            jaw_pose_prior_weights = map(lambda x: map(float, x.split(',')),
                                         jaw_pose_prior_weights)
            jaw_pose_prior_weights = [list(w) for w in jaw_pose_prior_weights]
        msg = ('Number of Body pose prior weights does not match the' +
               ' number of jaw pose prior weights')
        assert (
            len(jaw_pose_prior_weights) == len(body_pose_prior_weights)), msg

        if expr_weights is None:
            expr_weights = [1e2, 5 * 1e1, 1e1, .5 * 1e1]
        msg = ('Number of Body pose prior weights = {} does not match the' +
               ' number of Expression prior weights = {}')
        assert (len(expr_weights) == len(body_pose_prior_weights)), msg.format(
            len(body_pose_prior_weights), len(expr_weights))

        if face_joints_weights is None:
            face_joints_weights = [0.0, 0.0, 0.0, 1.0]
        msg = ('Number of Body pose prior weights does not match the' +
               ' number of face joint distance weights')
        assert (len(face_joints_weights) == len(body_pose_prior_weights)), msg

    if coll_loss_weights is None:
        coll_loss_weights = [0.0] * len(body_pose_prior_weights)
    msg = ('Number of Body pose prior weights does not match the' +
           ' number of collision loss weights')
    assert (len(coll_loss_weights) == len(body_pose_prior_weights)), msg

    use_vposer = kwargs.get('use_vposer', True)
    vposer, pose_embedding = [
        None,
    ] * 2
    if use_vposer:
        pose_embedding = torch.zeros([batch_size, 32],
                                     dtype=dtype,
                                     device=device,
                                     requires_grad=True)

        vposer_ckpt = osp.expandvars(vposer_ckpt)
        vposer, _ = load_vposer(vposer_ckpt, vp_model='snapshot')
        vposer = vposer.to(device=device)
        vposer.eval()

    if use_vposer:
        body_mean_pose = torch.zeros([batch_size, vposer_latent_dim],
                                     dtype=dtype)
    else:
        body_mean_pose = body_pose_prior.get_mean().detach().cpu()

    keypoint_data = torch.tensor(keypoints, dtype=dtype)
    gt_joints = keypoint_data[:, :, :2]
    if use_joints_conf:
        joints_conf = keypoint_data[:, :, 2].reshape(1, -1)

    # Transfer the data to the correct device
    gt_joints = gt_joints.to(device=device, dtype=dtype)
    if use_joints_conf:
        joints_conf = joints_conf.to(device=device, dtype=dtype)

    # Create the search tree
    search_tree = None
    pen_distance = None
    filter_faces = None
    if interpenetration:
        from mesh_intersection.bvh_search_tree import BVH
        import mesh_intersection.loss as collisions_loss
        from mesh_intersection.filter_faces import FilterFaces

        assert use_cuda, 'Interpenetration term can only be used with CUDA'
        assert torch.cuda.is_available(), \
            'No CUDA Device! Interpenetration term can only be used' + \
            ' with CUDA'

        search_tree = BVH(max_collisions=max_collisions)

        pen_distance = \
            collisions_loss.DistanceFieldPenetrationLoss(
                sigma=df_cone_height, point2plane=point2plane,
                vectorized=True, penalize_outside=penalize_outside)

        if part_segm_fn:
            # Read the part segmentation
            part_segm_fn = os.path.expandvars(part_segm_fn)
            with open(part_segm_fn, 'rb') as faces_parents_file:
                face_segm_data = pickle.load(faces_parents_file,
                                             encoding='latin1')
            faces_segm = face_segm_data['segm']
            faces_parents = face_segm_data['parents']
            # Create the module used to filter invalid collision pairs
            filter_faces = FilterFaces(
                faces_segm=faces_segm,
                faces_parents=faces_parents,
                ign_part_pairs=ign_part_pairs).to(device=device)

    # Weights used for the pose prior and the shape prior
    opt_weights_dict = {
        'data_weight': data_weights,
        'body_pose_weight': body_pose_prior_weights,
        'shape_weight': shape_weights
    }
    if use_face:
        opt_weights_dict['face_weight'] = face_joints_weights
        opt_weights_dict['expr_prior_weight'] = expr_weights
        opt_weights_dict['jaw_prior_weight'] = jaw_pose_prior_weights
    if use_hands:
        opt_weights_dict['hand_weight'] = hand_joints_weights
        opt_weights_dict['hand_prior_weight'] = hand_pose_prior_weights
    if interpenetration:
        opt_weights_dict['coll_loss_weight'] = coll_loss_weights

    keys = opt_weights_dict.keys()
    opt_weights = [
        dict(zip(keys, vals))
        for vals in zip(*(opt_weights_dict[k] for k in keys
                          if opt_weights_dict[k] is not None))
    ]
    for weight_list in opt_weights:
        for key in weight_list:
            weight_list[key] = torch.tensor(weight_list[key],
                                            device=device,
                                            dtype=dtype)

    # The indices of the joints used for the initialization of the camera
    init_joints_idxs = torch.tensor(init_joints_idxs, device=device)

    edge_indices = kwargs.get('body_tri_idxs')
    init_t = fitting.guess_init(body_model,
                                gt_joints,
                                edge_indices,
                                use_vposer=use_vposer,
                                vposer=vposer,
                                pose_embedding=pose_embedding,
                                model_type=kwargs.get('model_type', 'smpl'),
                                focal_length=focal_length,
                                dtype=dtype)

    camera_loss = fitting.create_loss('camera_init',
                                      trans_estimation=init_t,
                                      init_joints_idxs=init_joints_idxs,
                                      depth_loss_weight=depth_loss_weight,
                                      dtype=dtype).to(device=device)
    camera_loss.trans_estimation[:] = init_t

    loss = fitting.create_loss(loss_type=loss_type,
                               joint_weights=joint_weights,
                               rho=rho,
                               use_joints_conf=use_joints_conf,
                               use_face=use_face,
                               use_hands=use_hands,
                               vposer=vposer,
                               pose_embedding=pose_embedding,
                               body_pose_prior=body_pose_prior,
                               shape_prior=shape_prior,
                               angle_prior=angle_prior,
                               expr_prior=expr_prior,
                               left_hand_prior=left_hand_prior,
                               right_hand_prior=right_hand_prior,
                               jaw_prior=jaw_prior,
                               interpenetration=interpenetration,
                               pen_distance=pen_distance,
                               search_tree=search_tree,
                               tri_filtering_module=filter_faces,
                               dtype=dtype,
                               **kwargs)
    loss = loss.to(device=device)

    os.environ['PYOPENGL_PLATFORM'] = pyopengl_platform
    with fitting.FittingMonitor(batch_size=batch_size,
                                visualize=visualize,
                                **kwargs) as monitor:

        img = torch.tensor(img, dtype=dtype)

        H, W, _ = img.shape

        data_weight = 1000 / H
        # The closure passed to the optimizer
        camera_loss.reset_loss_weights({'data_weight': data_weight})

        # Reset the parameters to estimate the initial translation of the
        # body model
        body_model.reset_params(body_pose=body_mean_pose)

        # If the distance between the 2D shoulders is smaller than a
        # predefined threshold then try 2 fits, the initial one and a 180
        # degree rotation
        shoulder_dist = torch.dist(gt_joints[:, left_shoulder_idx],
                                   gt_joints[:, right_shoulder_idx])
        try_both_orient = shoulder_dist.item() < side_view_thsh

        # Update the value of the translation of the camera as well as
        # the image center.
        with torch.no_grad():
            camera.translation[:] = init_t.view_as(camera.translation)
            camera.center[:] = torch.tensor([W, H], dtype=dtype) * 0.5

        # Re-enable gradient calculation for the camera translation
        camera.translation.requires_grad = True

        camera_opt_params = [camera.translation, body_model.global_orient]

        camera_optimizer, camera_create_graph = optim_factory.create_optimizer(
            camera_opt_params, **kwargs)

        # The closure passed to the optimizer
        fit_camera = monitor.create_fitting_closure(
            camera_optimizer,
            body_model,
            camera,
            gt_joints,
            camera_loss,
            create_graph=camera_create_graph,
            use_vposer=use_vposer,
            vposer=vposer,
            pose_embedding=pose_embedding,
            return_full_pose=False,
            return_verts=False)

        # Step 1: Optimize over the torso joints the camera translation
        # Initialize the computational graph by feeding the initial translation
        # of the camera and the initial pose of the body model.
        camera_init_start = time.time()
        cam_init_loss_val = monitor.run_fitting(camera_optimizer,
                                                fit_camera,
                                                camera_opt_params,
                                                body_model,
                                                use_vposer=use_vposer,
                                                pose_embedding=pose_embedding,
                                                vposer=vposer)

        if interactive:
            if use_cuda and torch.cuda.is_available():
                torch.cuda.synchronize()
            tqdm.write('Camera initialization done after {:.4f}'.format(
                time.time() - camera_init_start))
            tqdm.write('Camera initialization final loss {:.4f}'.format(
                cam_init_loss_val))

        # If the 2D detections/positions of the shoulder joints are too
        # close the rotate the body by 180 degrees and also fit to that
        # orientation
        if try_both_orient:
            body_orient = body_model.global_orient.detach().cpu().numpy()
            flipped_orient = cv2.Rodrigues(body_orient)[0].dot(
                cv2.Rodrigues(np.array([0., np.pi, 0]))[0])
            flipped_orient = cv2.Rodrigues(flipped_orient)[0].ravel()

            flipped_orient = torch.tensor(flipped_orient,
                                          dtype=dtype,
                                          device=device).unsqueeze(dim=0)
            orientations = [body_orient, flipped_orient]
        else:
            orientations = [body_model.global_orient.detach().cpu().numpy()]

        # store here the final error for both orientations,
        # and pick the orientation resulting in the lowest error
        results = []

        # Step 2: Optimize the full model
        final_loss_val = 0
        for or_idx, orient in enumerate(tqdm(orientations,
                                             desc='Orientation')):
            opt_start = time.time()

            new_params = defaultdict(global_orient=orient,
                                     body_pose=body_mean_pose)
            body_model.reset_params(**new_params)
            if use_vposer:
                with torch.no_grad():
                    pose_embedding.fill_(0)

            for opt_idx, curr_weights in enumerate(
                    tqdm(opt_weights, desc='Stage')):

                body_params = list(body_model.parameters())

                final_params = list(
                    filter(lambda x: x.requires_grad, body_params))

                if use_vposer:
                    final_params.append(pose_embedding)

                body_optimizer, body_create_graph = optim_factory.create_optimizer(
                    final_params, **kwargs)
                body_optimizer.zero_grad()

                curr_weights['data_weight'] = data_weight
                curr_weights['bending_prior_weight'] = (
                    3.17 * curr_weights['body_pose_weight'])
                if use_hands:
                    joint_weights[:, 25:76] = curr_weights['hand_weight']
                if use_face:
                    joint_weights[:, 76:] = curr_weights['face_weight']
                loss.reset_loss_weights(curr_weights)

                closure = monitor.create_fitting_closure(
                    body_optimizer,
                    body_model,
                    camera=camera,
                    gt_joints=gt_joints,
                    joints_conf=joints_conf,
                    joint_weights=joint_weights,
                    loss=loss,
                    create_graph=body_create_graph,
                    use_vposer=use_vposer,
                    vposer=vposer,
                    pose_embedding=pose_embedding,
                    return_verts=True,
                    return_full_pose=True)

                if interactive:
                    if use_cuda and torch.cuda.is_available():
                        torch.cuda.synchronize()
                    stage_start = time.time()
                final_loss_val = monitor.run_fitting(
                    body_optimizer,
                    closure,
                    final_params,
                    body_model,
                    pose_embedding=pose_embedding,
                    vposer=vposer,
                    use_vposer=use_vposer)

                if interactive:
                    if use_cuda and torch.cuda.is_available():
                        torch.cuda.synchronize()
                    elapsed = time.time() - stage_start
                    if interactive:
                        tqdm.write(
                            'Stage {:03d} done after {:.4f} seconds'.format(
                                opt_idx, elapsed))

            if interactive:
                if use_cuda and torch.cuda.is_available():
                    torch.cuda.synchronize()
                elapsed = time.time() - opt_start
                tqdm.write(
                    'Body fitting Orientation {} done after {:.4f} seconds'.
                    format(or_idx, elapsed))
                tqdm.write(
                    'Body final loss val = {:.5f}'.format(final_loss_val))

            # Get the result of the fitting process
            # Store in it the errors list in order to compare multiple
            # orientations, if they exist
            result = {
                'camera_' + str(key): val.detach().cpu().numpy()
                for key, val in camera.named_parameters()
            }
            result.update({
                key: val.detach().cpu().numpy()
                for key, val in body_model.named_parameters()
            })
            if use_vposer:
                result['body_pose'] = pose_embedding.detach().cpu().numpy()

            if save_meshes or save_viz:
                body_pose = vposer.decode(pose_embedding,
                                          output_type='aa').view(
                                              1, -1) if use_vposer else None

                model_type = kwargs.get('model_type', 'smpl')
                append_wrists = model_type == 'smpl' and use_vposer
                if append_wrists:
                    wrist_pose = torch.zeros([body_pose.shape[0], 6],
                                             dtype=body_pose.dtype,
                                             device=body_pose.device)
                    body_pose = torch.cat([body_pose, wrist_pose], dim=1)
                model_output = body_model(return_verts=False,
                                          body_pose=body_pose)
                joints = model_output.joints.detach().cpu().numpy()
                result['body_joints'] = joints
                result['body_pose_aa'] = body_pose.detach().cpu().numpy()

            results.append({'loss': final_loss_val, 'result': result})

        with open(result_fn, 'wb') as result_file:
            if len(results) > 1:
                min_idx = (0 if results[0]['loss'] < results[1]['loss'] else 1)
            else:
                min_idx = 0
            pickle.dump(results[min_idx]['result'], result_file, protocol=2)

    if save_meshes or save_viz:
        body_pose = vposer.decode(pose_embedding, output_type='aa').view(
            1, -1) if use_vposer else None

        model_type = kwargs.get('model_type', 'smpl')
        append_wrists = model_type == 'smpl' and use_vposer
        if append_wrists:
            wrist_pose = torch.zeros([body_pose.shape[0], 6],
                                     dtype=body_pose.dtype,
                                     device=body_pose.device)
            body_pose = torch.cat([body_pose, wrist_pose], dim=1)

        model_output = body_model(return_verts=True, body_pose=body_pose)
        vertices = model_output.vertices.detach().cpu().numpy().squeeze()

        import trimesh

        out_mesh = trimesh.Trimesh(vertices, body_model.faces)
        rot = trimesh.transformations.rotation_matrix(np.radians(180),
                                                      [1, 0, 0])
        out_mesh.apply_transform(rot)
        out_mesh.export(mesh_fn)

    if save_viz:
        import pyrender

        camera_center = camera.center.detach().cpu().numpy().squeeze()
        camera_transl = camera.translation.detach().cpu().numpy().squeeze()
        # x-axis is already flipped b/c of difference in coordinate systems.
        camera_transl[
            1] *= -1.0  # flip y and z to apply to the mesh rather than the camera
        camera_transl[2] *= -1.0

        # set up the scene
        material = pyrender.MetallicRoughnessMaterial(
            metallicFactor=0.0,
            alphaMode='BLEND',
            baseColorFactor=(1.0, 1.0, 0.9, 1.0))
        scene = pyrender.Scene(bg_color=[0.0, 0.0, 0.0, 0.0],
                               ambient_light=(0.3, 0.3, 0.3))

        # will always keep camera at the origin and instead transform mesh
        camera_pose = np.eye(4)
        # camera_pose[:3, 3] = camera_transl

        camera = pyrender.camera.IntrinsicsCamera(fx=focal_length,
                                                  fy=focal_length,
                                                  cx=camera_center[0],
                                                  cy=camera_center[1])
        main_cam_node = scene.add(camera, pose=camera_pose)

        # add lights for front view
        light = pyrender.PointLight(color=[1.0, 1.0, 1.0], intensity=1)

        light_pose = np.eye(4)
        light_pose[:3, 3] = [0, -1, camera_transl[2] + 1]
        light0_node = scene.add(light, pose=light_pose)

        light_pose[:3, 3] = [0, 1, camera_transl[2] + 1]
        light1_node = scene.add(light, pose=light_pose)

        light_pose[:3, 3] = [1, 1, camera_transl[2] + 2]
        light2_node = scene.add(light, pose=light_pose)

        r = pyrender.OffscreenRenderer(viewport_width=W,
                                       viewport_height=H,
                                       point_size=1.0)

        # FRONT VIEW
        trans = trimesh.transformations.translation_matrix(
            camera_transl)  # move mesh, not camera
        front_mesh = out_mesh.copy()
        front_mesh.apply_transform(trans)
        mesh = pyrender.Mesh.from_trimesh(front_mesh, material=material)
        mesh_node = scene.add(mesh, 'mesh')

        color, _ = r.render(scene)
        tqdm.write('Front rendering done!')

        color = color.astype(np.float32) / 255

        output_img = color.copy()  # (H, W, 3)
        og_img = img.cpu().numpy()

        # just mesh render from front
        img = pil_img.fromarray((output_img * 255).astype(np.uint8))
        tok = out_img_fn.split('.')
        front_out_path = '.'.join(tok[:-1]) + '_front.' + tok[-1]
        img.save(front_out_path)

        # and overlaid on original image
        fg_pixels = np.linalg.norm(output_img, axis=2) > 0.0
        og_img[fg_pixels, :] = output_img[fg_pixels, :]
        overlay_img = pil_img.fromarray((og_img * 255).astype(np.uint8))
        overlay_img.save(overlay_img_fn)

        # SIDE VIEW
        scene.remove_node(mesh_node)
        # update transformation for side view
        rot = trimesh.transformations.rotation_matrix(np.radians(90),
                                                      [0, 1, 0])
        side_transl = np.copy(camera_transl)
        # TODO for now just use same translation but look from side, can run viz after the fact later
        # trans = trimesh.transformations.translation_matrix([camera_transl[2], camera_transl[1], camera_transl[0]])
        side_transform = trimesh.transformations.concatenate_matrices(
            trans, rot)

        side_mesh = out_mesh.copy()
        side_mesh.apply_transform(side_transform)
        mesh = pyrender.Mesh.from_trimesh(side_mesh, material=material)
        mesh_node = scene.add(mesh, 'mesh')

        # TODO move lights

        color, _ = r.render(scene)
        tqdm.write('Side rendering done!')
        color = color.astype(np.float32) / 255
        output_img = color.copy()  # (H, W, 3)
        # just mesh render from side
        img = pil_img.fromarray((output_img * 255).astype(np.uint8))
        side_out_path = '.'.join(tok[:-1]) + '_side.' + tok[-1]
        img.save(side_out_path)