Exemplo n.º 1
0
    def __init__(self,
                 width,
                 height,
                 camera_distance=0.05,
                 pose_y=0.0,
                 focal_length=None):

        self.camera = pyrender.OrthographicCamera(xmag=1.0,
                                                  ymag=1.0,
                                                  znear=0.05)

        if focal_length:
            focal_length = focal_length * height
            self.camera = pyrender.IntrinsicsCamera(focal_length, focal_length,
                                                    width / 2, height / 2,
                                                    0.05, 5.05)
        self.width = width
        self.height = height
        self.global_tr = np.array([[1.0, 0.0, 0.0,
                                    0.0], [0.0, 1.0, 0.0, pose_y],
                                   [0.0, 0.0, 1.0, 0.0], [0.0, 0.0, 0.0, 1.0]])

        self.camera_pose = np.array([[1.0, 0.0, 0.0, 0.0],
                                     [0.0, 1.0, 0.0, 0.0],
                                     [0.0, 0.0, 1.0, camera_distance],
                                     [0.0, 0.0, 0.0, 1.0]])

        self.tri_mesh = self.py_mesh = self.vertices = self.faces = self.render = self.py_scene = None
Exemplo n.º 2
0
def render_face_transparent(mesh, background=None):
    """
    mesh location should be normalized
    :param mesh:
    :param background:
    :return:
    """
    mesh.visual.face_colors = np.array([0.5, 0.5, 0.5, 1])  # np.array([0.05, 0.1, 0.2, 1])

    mesh = pyrender.Mesh.from_trimesh(mesh, smooth=False)
    # mesh = pyrender.Mesh.from_trimesh(mesh)

    scene.add(mesh, pose=np.eye(4))
    camera_pose = np.eye(4)
    # camera_pose[0, 3] = 1
    # camera_pose[1, 3] = 1
    # camera_pose[2, 3] = -10
    # camera_pose[0, 0] = 1
    # camera_pose[1, 1] = -1
    # camera_pose[2, 2] = -1
    #
    # camera = pyrender.OrthographicCamera(xmag=1, ymag=1, zfar=100)
    camera_pose[0, 3] = 1
    camera_pose[1, 3] = 1
    camera_pose[2, 3] = 10
    camera_pose[0, 0] = 1
    camera_pose[1, 1] = 1
    camera_pose[2, 2] = 1

    camera = pyrender.OrthographicCamera(xmag=1, ymag=1, zfar=100)
    scene.add(camera, pose=camera_pose)
    light = pyrender.DirectionalLight(color=[1.0, 1.0, 1.0], intensity=5.0)
    scene.add(light, pose=camera_pose)
    color, depth = r.render(scene, flags=pyrender.RenderFlags.RGBA)
    scene.clear()

    # print(color.shape)
    color = np.array(color)
    color = color[::-1]
    if background is not None:
        bg_rgba = Image.fromarray(np.array(background)).convert('RGBA')

        color[color[:, :, 0] == 255, 3] = 0
        color[color[:, :, 3] == 255, 3] = 166

        color = Image.fromarray(color).convert('RGBA')
        final = Image.alpha_composite(bg_rgba, color)
        color = np.array(final)

        #
        # new_color = np.array(background)
        # new_color[color != 255] = color[color != 255]
        # color = new_color

    return color
Exemplo n.º 3
0
def renderVertices(vertices):
    mesh = pyrender.Mesh.from_points([[v[0], v[1], -1.0] for v in vertices])
    cam = pyrender.OrthographicCamera(xmag=ORTHO_CAM[0], ymag=ORTHO_CAM[1])
    pos = np.eye(4)
    scene = pyrender.Scene()
    scene.add(mesh)
    scene.add(cam, pose=pos)
    pyrender.Viewer(scene,
                    VIEWPORT_SIZE,
                    all_wireframe=True,
                    cull_faces=False,
                    use_perspective_cam=False)
Exemplo n.º 4
0
def renderMesh(mesh):
    tm = trimeshFromMesh(mesh)
    sceneMesh = pyrender.Mesh.from_trimesh(tm, smooth=False)
    cam = pyrender.OrthographicCamera(xmag=ORTHO_CAM[0], ymag=ORTHO_CAM[1])
    pos = np.eye(4)
    scene = pyrender.Scene()
    scene.add(sceneMesh)
    scene.add(cam, pose=pos)
    pyrender.Viewer(scene,
                    VIEWPORT_SIZE,
                    all_wireframe=True,
                    cull_faces=False,
                    use_perspective_cam=False)
Exemplo n.º 5
0
    def _get_projection(self, tm_mesh, render_size):
        tm_scene = tm_mesh.scene()

        tm_scene = tm_scene.scaled(0.8 / np.max(tm_scene.extents))
        scene = pyrender.Scene.from_trimesh_scene(tm_scene)

        camera = pyrender.OrthographicCamera(xmag=0.5, ymag=0.5)
        camera_position = tm_mesh.scene().camera_transform
        scene.add(camera, pose=camera_position)

        r = pyrender.OffscreenRenderer(render_size, render_size)
        depth_buffer = r.render(scene, flags=RF.DEPTH_ONLY)
        r.delete()

        return depth_buffer
Exemplo n.º 6
0
def get_image_array(model_path, camera_pose):

    mesh = pyrender.Mesh.from_trimesh(trimesh.load(model_path))

    camera = pyrender.OrthographicCamera(xmag=1.0, ymag=1.0)

    scene = pyrender.Scene()
    scene.add(mesh)
    scene.add(camera, pose=camera_pose)

    renderer = pyrender.OffscreenRenderer(resolution, resolution)
    color, depth = renderer.render(scene)

    depth_uint8 = (fitted_normalize(depth) * 255).astype(np.uint8)

    return depth_uint8
Exemplo n.º 7
0
def projectMeshCachedDebug(scene, f, R, t, sensorSize, ortho, mag, debug):
    # In OpenGL, camera points toward -z by default, hence we don't need rFix like in the MATLAB code
    sensorWidth = sensorSize[0]
    sensorHeight = sensorSize[1]
    fovHorizontal = 2 * np.arctan((sensorWidth / 2) / f)
    fovVertical = 2 * np.arctan((sensorHeight / 2) / f)

    if ortho:
        camera = pyrender.OrthographicCamera(xmag=mag, ymag=mag)
    else:
        camera = pyrender.PerspectiveCamera(fovVertical)

    camera_pose = np.eye(4)
    camera_pose[0:3, 0:3] = R
    camera_pose[0:3, 3] = t
    cameraNode = scene.add(camera, pose=camera_pose)

    scene._ambient_light = np.ones((3, ))

    r = pyrender.OffscreenRenderer(sensorWidth, sensorHeight)
    meshProjection, depth = r.render(
        scene)  # TODO: this thing consumes ~14 GB RAM!!!
    r.delete(
    )  # this releases that; but it should not require so much RAM in the first place

    # XYZ cut
    scaling = 1.0 / f

    spaceCoordinateSystem = np.eye(3)

    sensorCoordinateSystem = np.matmul(R, spaceCoordinateSystem)
    sensorXAxis = sensorCoordinateSystem[:, 0]
    sensorYAxis = -sensorCoordinateSystem[:, 1]
    # make camera point toward -z by default, as in OpenGL
    cameraDirection = -sensorCoordinateSystem[:, 2]  # unit vector

    xyzCut, pts = buildXYZcut(sensorWidth, sensorHeight, t, cameraDirection,
                              scaling, sensorXAxis, sensorYAxis, depth)

    XYZpc = -1
    if debug:
        XYZpc = o3d.geometry.PointCloud()
        XYZpc.points = o3d.utility.Vector3dVector(pts)

    scene.remove_node(cameraNode)

    return meshProjection, xyzCut, depth, XYZpc
Exemplo n.º 8
0
def renderMeshWithCircumcenters(mesh, circumcenters):
    tm = trimeshFromMesh(mesh)
    sceneMesh = pyrender.Mesh.from_trimesh(tm, smooth=False)
    scenePoints = pyrender.Mesh.from_points([[v[0], v[1], -1.0]
                                             for v in circumcenters])
    cam = pyrender.OrthographicCamera(xmag=ORTHO_CAM[0], ymag=ORTHO_CAM[1])
    pos = np.eye(4)
    scene = pyrender.Scene()
    scene.add(sceneMesh)
    scene.add(scenePoints)
    scene.add(cam, pose=pos)
    pyrender.Viewer(scene,
                    VIEWPORT_SIZE,
                    all_wireframe=True,
                    cull_faces=False,
                    use_perspective_cam=False,
                    point_size=5)
Exemplo n.º 9
0
def render_face_orthographic(mesh, background=None):
    """
    mesh location should be normalized
    :param mesh:
    :param background:
    :return:
    """
    mesh.visual.face_colors = np.array([0.05, 0.1, 0.2, 1])

    mesh = pyrender.Mesh.from_trimesh(mesh, smooth=False)
    # mesh = pyrender.Mesh.from_trimesh(mesh)

    scene.add(mesh, pose=np.eye(4))
    camera_pose = np.eye(4)
    # camera_pose[0, 3] = 1
    # camera_pose[1, 3] = 1
    # camera_pose[2, 3] = -10
    # camera_pose[0, 0] = 1
    # camera_pose[1, 1] = -1
    # camera_pose[2, 2] = -1
    #
    # camera = pyrender.OrthographicCamera(xmag=1, ymag=1, zfar=100)
    camera_pose[0, 3] = 1
    camera_pose[1, 3] = 1
    camera_pose[2, 3] = 10
    camera_pose[0, 0] = 1
    camera_pose[1, 1] = 1
    camera_pose[2, 2] = 1

    camera = pyrender.OrthographicCamera(xmag=1, ymag=1, zfar=100)
    scene.add(camera, pose=camera_pose)
    light = pyrender.DirectionalLight(color=[1.0, 1.0, 1.0], intensity=5.0)
    scene.add(light, pose=camera_pose)
    color, depth = r.render(scene)
    scene.clear()

    # print(color.shape)
    color = np.array(color)
    color = color[::-1]
    if background is not None:
        new_color = np.array(background)
        new_color[color != 255] = color[color != 255]
        color = new_color
    return color
Exemplo n.º 10
0
def renderLight(posmap, init_image=None, is_render=True):
    tex = np.ones((256, 256, 3)) / 2
    mesh = UVmap2Mesh(posmap, tex, is_extra_triangle=False)
    vertices = mesh['vertices']
    triangles = mesh['triangles']
    colors = mesh['colors'] / np.max(mesh['colors'])
    file = 'tmp/light/test.obj'
    write_obj_with_colors(file, vertices, triangles, colors)

    obj = trimesh.load(file)
    # obj.visual.vertex_colors = np.random.uniform(size=obj.vertices.shape)
    obj.visual.face_colors = np.array([0.05, 0.1, 0.2])

    mesh = pyrender.Mesh.from_trimesh(obj, smooth=False)

    scene.add(mesh, pose=np.eye(4))

    camera_pose = np.eye(4)
    camera_pose[0, 3] = 128
    camera_pose[1, 3] = 128
    camera_pose[2, 3] = 300
    camera = pyrender.OrthographicCamera(xmag=128, ymag=128, zfar=1000)

    scene.add(camera, pose=camera_pose)
    light = pyrender.DirectionalLight(color=[1.0, 1.0, 1.0], intensity=8.0)
    scene.add(light, pose=camera_pose)
    color, depth = r.render(scene)
    if is_render:
        plt.imshow(color)
        plt.show()

    if init_image is not None:
        sum_mask = np.mean(color, axis=-1)
        fuse_img = color.copy()
        fuse_img[sum_mask > 128] = init_image[sum_mask > 128]
        if is_render:
            plt.imshow(fuse_img)
            plt.show()
        scene.clear()
        return fuse_img

    scene.clear()
    return color
Exemplo n.º 11
0
def render(obj, cam_pos):
    # create scene
    scene = pyrender.Scene()

    # MESHES
    obj_trimesh = trimesh.load(obj)
    obj_trimesh.vertices = scaleVertices(obj_trimesh.vertices,
                                         feature_range=(-1, 1))

    mesh = pyrender.Mesh.from_trimesh(obj_trimesh)
    nm = pyrender.Node(mesh=mesh, matrix=np.eye(4))
    scene.add_node(nm)

    # CAMERA
    cam = pyrender.OrthographicCamera(xmag=1.0, ymag=1.0)
    nc = pyrender.Node(camera=cam, matrix=cam_pos)
    scene.add_node(nc)

    # RENDER
    flags = RenderFlags.DEPTH_ONLY
    r = pyrender.OffscreenRenderer(400, 400)
    depth = r.render(scene, flags=flags)
    return (depth)
Exemplo n.º 12
0
def evaluate(resnet50, regression, flamelayer, dataloader):

    NoWDataLoader = dataloader
    for batch_idx, data_batched in enumerate(NoWDataLoader):
        # print(batch_idx, data_batched['images'].shape)
        cur_batch, cur_batch_shape = data_batched['images'], data_batched[
            'images'].shape
        cur_facepos = data_batched['faceposes']
        cur_facepos.cuda()
        reshaped_batch = cur_batch.permute(1, 0, 2, 3, 4)
        # reshaped_facepos = cur_facepos.permute(1, 0, 2, 3, 4)
        # print(cur_facepos.size())
        # print(reshaped_facepos.size())

        # Output for each image in the ring
        # regress_outputs, flame_vertices, flame_lmks, flame_cams, flame_proj_lmks = [], [], [], [], []
        regress_outputs, flame_vertices, flame_proj_lmks = [], [], []
        shape_norms, exp_norms = 0.0, 0.0
        # print(reshaped_batch.size())
        for img_batch in reshaped_batch:
            # ResNet50
            # res_output = resnet50(img_batch.float())
            res_output = resnet50(img_batch.float().cuda())

            # Empty estimates as the initial value for concatenation
            regress_estimates = torch.zeros(
                [res_output.shape[0], regress_out_size]).cuda()
            # Regression model
            for _ in range(regress_iteration_cnt):
                # Preprocess regression input - concatenation
                regress_input = torch.cat([res_output, regress_estimates], 1)
                regress_estimates = regression(regress_input)
            regress_output = regress_estimates
            regress_outputs.append(regress_output)

            # FLAME model
            cam_params, pose_params = regress_output[0:,
                                                     0:3], regress_output[0:,
                                                                          3:9]
            shape_params, exp_params = regress_output[
                0:, 9:109], regress_output[0:, 109:159]
            # print(cam_params.shape, pose_params.shape, shape_params.shape, exp_params.shape)
            flame_vert, flame_lmk = flamelayer(shape_params, exp_params,
                                               pose_params)
            flame_vertices.append(flame_vert)
            # flame_lmks.append(flame_lmk)
            # flame_cams.append(cam_params)
            # print("raw flame landmarks")
            # print(flame_lmk)
            # print("cam ")
            # print(cam_params)
            flame_proj_lmk = project_points(flame_lmk, cam_params)

            faces = flamelayer.faces
            vertices = flame_vert.cpu().detach().numpy()[0]
            joints = flame_lmk.cpu().detach().numpy()[0]
            # vertex_colors = np.ones([vertices.shape[0], 4]) * [0.3, 0.3, 0.3, 0.8]

            # print("All info about faces: 1. length 2. itself")
            # print(len(faces))
            # print(faces)
            tri_mesh = trimesh.Trimesh(vertices, faces)
            mesh = pyrender.Mesh.from_trimesh(tri_mesh)
            scene = pyrender.Scene()
            scene.add(mesh)
            sm = trimesh.creation.uv_sphere(radius=0.005)
            sm.visual.vertex_colors = [0.9, 0.1, 0.1, 1.0]
            tfs = np.tile(np.eye(4), (len(joints), 1, 1))
            tfs[:, :3, 3] = joints
            joints_pcl = pyrender.Mesh.from_trimesh(sm, poses=tfs)
            scene.add(joints_pcl)
            scale = cam_params[0, 0]
            oc = pyrender.OrthographicCamera(scale, scale)
            pyrender.Viewer(scene, use_raymond_lighting=True)

            flame_proj_lmks.append(flame_proj_lmk)
            shape_norms += torch.norm(shape_params)
            exp_norms += torch.norm(exp_params)
Exemplo n.º 13
0
    def render_mesh_pc_bed_pyrender_everything_synth(self, smpl_verts, smpl_faces, camera_point, bedangle, RESULTS_DICT,
                                    smpl_verts_gt = None, pmat = None, smpl_render_points = False, markers = None,
                                    dropout_variance=None, tf_corners = None, save_name = 'test_synth'):

        pmat *= 0.75
        pmat[pmat>0] += 10

        viz_popup = False

        #print np.min(smpl_verts[:, 0])
        #print np.min(smpl_verts[:, 1])

        shift_estimate_sideways = np.min([-0.15, np.min(smpl_verts[:, 1])])
        #print shift_estimate_sideways
        shift_estimate_sideways = 0.8 - shift_estimate_sideways

        top_smpl_vert = np.max(smpl_verts[:, 0])
        extend_top_bottom  = np.max([np.max(smpl_verts[:, 0]), 64*.0286]) - 64*.0286
        print extend_top_bottom, 'extend top bot'


        shift_both_amount = np.max([0.9, np.max(smpl_verts[:, 1])]) #if smpl is bigger than 0.9 shift less
        shift_both_amount = 1.5 - shift_both_amount + (0.15 + np.min([-0.15, np.min(smpl_verts[:, 1])]))

        smpl_verts_quad = np.concatenate((smpl_verts, np.ones((smpl_verts.shape[0], 1))), axis = 1)
        smpl_verts_quad = np.swapaxes(smpl_verts_quad, 0, 1)


        smpl_verts_quad_gt = np.concatenate((smpl_verts_gt, np.ones((smpl_verts_gt.shape[0], 1))), axis = 1)
        smpl_verts_quad_gt = np.swapaxes(smpl_verts_quad_gt, 0, 1)

        #print smpl_verts_quad.shape

        shift_ground_truth = 1.3

        transform_A = np.identity(4)
        transform_A[1, 3] = shift_both_amount

        transform_B = np.identity(4)
        transform_B[1, 3] = shift_estimate_sideways + shift_both_amount#4.0 #move things over
        smpl_verts_B = np.swapaxes(np.matmul(transform_B, smpl_verts_quad), 0, 1)[:, 0:3]

        transform_C = np.identity(4)
        transform_C[1, 3] = shift_estimate_sideways + shift_both_amount+shift_ground_truth #move things over
        smpl_verts_C = np.swapaxes(np.matmul(transform_C, smpl_verts_quad_gt), 0, 1)[:, 0:3]



        from matplotlib import cm


        human_mesh_vtx_all, human_mesh_face_all = self.get_human_mesh_parts(smpl_verts_B, smpl_faces, segment_limbs=False)

        #GET MESH WITH PMAT
        tm_curr = trimesh.base.Trimesh(vertices=np.array(human_mesh_vtx_all[0]), faces = np.array(human_mesh_face_all[0]))
        tm_list = [tm_curr]
        original_mesh = [tm_curr]

        mesh_list = []
        mesh_list.append(pyrender.Mesh.from_trimesh(tm_list[0], material = self.human_mat, smooth=True))#wireframe = False)) #this is for the main human


        human_mesh_vtx_all_gt, human_mesh_face_all_gt = self.get_human_mesh_parts(smpl_verts_C, smpl_faces, segment_limbs=False)

        #GET MESH GT WITH PMAT
        tm_curr_gt = trimesh.base.Trimesh(vertices=np.array(human_mesh_vtx_all_gt[0]), faces = np.array(human_mesh_face_all_gt[0]))
        tm_list_gt = [tm_curr_gt]
        original_mesh_gt = [tm_curr_gt]

        mesh_list_gt = []
        mesh_list_gt.append(pyrender.Mesh.from_trimesh(tm_list_gt[0], material = self.human_mat_gt, smooth=True))#wireframe = False)) #this is for the main human


        fig = plt.figure()
        if self.render == True:


            artag_meshes = []
            artag_tm = trimesh.base.Trimesh(vertices=self.artag_r + [0.0, shift_estimate_sideways + shift_both_amount, 0.0], faces=self.artag_f, face_colors = self.artag_facecolors)
            artag_meshes.append(pyrender.Mesh.from_trimesh(artag_tm, smooth = False))


            artag_meshes_gt = []
            artag_tm_gt = trimesh.base.Trimesh(vertices=self.artag_r + [0.0, shift_estimate_sideways + shift_both_amount+shift_ground_truth, 0.0], faces=self.artag_f, face_colors = self.artag_facecolors_gt)
            artag_meshes_gt.append(pyrender.Mesh.from_trimesh(artag_tm_gt, smooth = False))



            if pmat is not None:
                pmat_verts, pmat_faces, pmat_facecolors = self.get_3D_pmat_markers(pmat, bedangle)
                pmat_verts = np.array(pmat_verts)
                pmat_verts = np.concatenate((np.swapaxes(pmat_verts, 0, 1), np.ones((1, pmat_verts.shape[0]))), axis = 0)
                pmat_verts = np.swapaxes(np.matmul(transform_A, pmat_verts), 0, 1)[:, 0:3]
                pmat_tm = trimesh.base.Trimesh(vertices=pmat_verts, faces=pmat_faces, face_colors = pmat_facecolors)
                pmat_mesh = pyrender.Mesh.from_trimesh(pmat_tm, smooth = False)

                pmat_verts2, _, pmat_facecolors2 = self.get_3D_pmat_markers(pmat, bedangle, solidcolor = True)
                pmat_verts2 = np.array(pmat_verts2)
                pmat_verts2 = np.concatenate((np.swapaxes(pmat_verts2, 0, 1), np.ones((1, pmat_verts2.shape[0]))), axis = 0)
                pmat_verts2 = np.swapaxes(np.matmul(transform_B, pmat_verts2), 0, 1)[:, 0:3]
                pmat_tm2 = trimesh.base.Trimesh(vertices=pmat_verts2, faces=pmat_faces, face_colors = pmat_facecolors2)
                pmat_mesh2 = pyrender.Mesh.from_trimesh(pmat_tm2, smooth = False)

            else:
                pmat_mesh = None
                pmat_mesh2 = None


            #print "Viewing"
            if self.first_pass == True:

                for mesh_part in mesh_list:
                    self.scene.add(mesh_part)

                for mesh_part_gt in mesh_list_gt:
                    self.scene.add(mesh_part_gt)

                if pmat_mesh is not None:
                    self.scene.add(pmat_mesh)

                if pmat_mesh2 is not None:
                    self.scene.add(pmat_mesh2)

                for artag_mesh in artag_meshes:
                    if artag_mesh is not None:
                        self.scene.add(artag_mesh)

                for artag_mesh_gt in artag_meshes_gt:
                    if artag_mesh_gt is not None:
                        self.scene.add(artag_mesh_gt)


                lighting_intensity = 20.

                #self.viewer = pyrender.Viewer(self.scene, use_raymond_lighting=True, lighting_intensity=lighting_intensity,
                #                              point_size=2, run_in_thread=True, viewport_size=(1200, 1200))



                self.first_pass = False

                self.node_list = []
                for mesh_part in mesh_list:
                    for node in self.scene.get_nodes(obj=mesh_part):
                        self.node_list.append(node)

                self.node_list_gt = []
                for mesh_part_gt in mesh_list_gt:
                    for node in self.scene.get_nodes(obj=mesh_part_gt):
                        self.node_list_gt.append(node)



                self.artag_nodes = []
                for artag_mesh in artag_meshes:
                    if artag_mesh is not None:
                        for node in self.scene.get_nodes(obj=artag_mesh):
                            self.artag_nodes.append(node)
                self.artag_nodes_gt = []
                for artag_mesh_gt in artag_meshes_gt:
                    if artag_mesh_gt is not None:
                        for node in self.scene.get_nodes(obj=artag_mesh_gt):
                            self.artag_nodes_gt.append(node)
                if pmat_mesh is not None:
                    for node in self.scene.get_nodes(obj=pmat_mesh):
                        self.pmat_node = node
                if pmat_mesh2 is not None:
                    for node in self.scene.get_nodes(obj=pmat_mesh2):
                        self.pmat_node2 = node

                camera_pose = np.eye(4)
                # camera_pose[0,0] = -1.0
                # camera_pose[1,1] = -1.0

                camera_pose[0, 0] = np.cos(np.pi/2)
                camera_pose[0, 1] = np.sin(np.pi/2)
                camera_pose[1, 0] = -np.sin(np.pi/2)
                camera_pose[1, 1] = np.cos(np.pi/2)
                rot_udpim = np.eye(4)

                rot_y = 180*np.pi/180.
                rot_udpim[1,1] = np.cos(rot_y)
                rot_udpim[2,2] = np.cos(rot_y)
                rot_udpim[1,2] = np.sin(rot_y)
                rot_udpim[2,1] = -np.sin(rot_y)
                camera_pose = np.matmul(rot_udpim,  camera_pose)

                camera_pose[0, 3] = 64*0.0286/2  # -1.0
                camera_pose[1, 3] = 1.2 + 0.8
                camera_pose[2, 3] = -1.0


                if viz_popup == True:
                    self.viewer = pyrender.Viewer(self.scene, use_raymond_lighting=True,
                                                  lighting_intensity=10.,
                                                  point_size=5, run_in_thread=True, viewport_size=(1000, 1000))
                    #camera = pyrender.PerspectiveCamera(yfov=np.pi / 3.0, aspectRatio=1.0)

                magnify =(64*.0286)

                camera = pyrender.OrthographicCamera(xmag=magnify, ymag = magnify)

                self.scene.add(camera, pose=camera_pose)


                light = pyrender.SpotLight(color=np.ones(3), intensity=250.0, innerConeAngle=np.pi / 10.0,
                                           outerConeAngle=np.pi / 2.0)
                light_pose = np.copy(camera_pose)
                # light_pose[1, 3] = 2.0
                light_pose[0, 3] = 0.8
                light_pose[1, 3] = -0.5
                light_pose[2, 3] = -2.5

                light_pose2 = np.copy(camera_pose)
                light_pose2[0, 3] = 2.5
                light_pose2[1, 3] = 1.0
                light_pose2[2, 3] = -5.0

                light_pose3 = np.copy(camera_pose)
                light_pose3[0, 3] = 1.0
                light_pose3[1, 3] = 5.0
                light_pose3[2, 3] = -4.0

                #light_pose2[0, 3] = 1.0
                #light_pose2[1, 3] = 2.0 #across
                #light_pose2[2, 3] = -1.5
                # light_pose[1, ]

                self.scene.add(light, pose=light_pose)
                self.scene.add(light, pose=light_pose2)
                self.scene.add(light, pose=light_pose3)




            else:
                if viz_popup == True:
                    self.viewer.render_lock.acquire()

                #reset the human mesh
                for idx in range(len(mesh_list)):
                    self.scene.remove_node(self.node_list[idx])
                    self.scene.add(mesh_list[idx])
                    for node in self.scene.get_nodes(obj=mesh_list[idx]):
                        self.node_list[idx] = node

                #reset the human mesh
                for idx in range(len(mesh_list_gt)):
                    self.scene.remove_node(self.node_list_gt[idx])
                    self.scene.add(mesh_list_gt[idx])
                    for node in self.scene.get_nodes(obj=mesh_list_gt[idx]):
                        self.node_list_gt[idx] = node

                #reset the artag meshes
                for artag_node in self.artag_nodes:
                    self.scene.remove_node(artag_node)
                for artag_mesh in artag_meshes:
                    if artag_mesh is not None:
                        self.scene.add(artag_mesh)
                self.artag_nodes = []
                for artag_mesh in artag_meshes:
                    if artag_mesh is not None:
                        for node in self.scene.get_nodes(obj=artag_mesh):
                            self.artag_nodes.append(node)

                #reset the artag meshes
                for artag_node_gt in self.artag_nodes_gt:
                    self.scene.remove_node(artag_node_gt)
                for artag_mesh_gt in artag_meshes_gt:
                    if artag_mesh_gt is not None:
                        self.scene.add(artag_mesh_gt)
                self.artag_nodes_gt = []
                for artag_mesh_gt in artag_meshes_gt:
                    if artag_mesh_gt is not None:
                        for node in self.scene.get_nodes(obj=artag_mesh_gt):
                            self.artag_nodes_gt.append(node)


                #reset the pmat mesh
                if pmat_mesh is not None:
                    self.scene.remove_node(self.pmat_node)
                    self.scene.add(pmat_mesh)
                    for node in self.scene.get_nodes(obj=pmat_mesh):
                        self.pmat_node = node


                #reset the pmat mesh
                if pmat_mesh2 is not None:
                    self.scene.remove_node(self.pmat_node2)
                    self.scene.add(pmat_mesh2)
                    for node in self.scene.get_nodes(obj=pmat_mesh2):
                        self.pmat_node2 = node



                #print self.scene.get_nodes()
                if viz_popup == True:
                    self.viewer.render_lock.release()
            #time.sleep(100)


        if viz_popup == False:
            r = pyrender.OffscreenRenderer(880, 880)
            # r.render(self.scene)
            color_render, depth = r.render(self.scene)
            # plt.subplot(1, 2, 1)
            plt.axis('off')


            #im_to_show = np.concatenate((color_render, color_im), axis = 1)
            im_to_show = np.copy(color_render)


            im_to_show = im_to_show[130-int(extend_top_bottom*300):750+int(extend_top_bottom*300), :, :]

            #plt.imshow(color)
            plt.imshow(im_to_show)
            # plt.subplot(1, 2, 2)
            # plt.axis('off')
            # plt.imshow(depth, cmap=plt.cm.gray_r) >> > plt.show()

            fig.set_size_inches(15., 10.)
            fig.tight_layout()
            #save_name = 'f_hbh_'+'{:04}'.format(self.pic_num)


            print "saving!"
            fig.savefig('/media/henry/multimodal_data_2/CVPR2020_study/'+save_name+'_v2.png', dpi=300)


            self.pic_num += 1
            #plt.show()
            #if self.pic_num == 20:
            #    print "DONE"
            #    time.sleep(1000000)
            #print "got here"

            #print X.shape


        return RESULTS_DICT
Exemplo n.º 14
0
def render_orthcam(
        model_in,  # model name or trimesh
        xy_mag,
        rend_size,
        flat_shading=False,
        zfar=10000,
        znear=0.05):

    # Mesh creation
    if isinstance(model_in, str) is True:
        mesh = trimesh.load(model_in, process=False)
    else:
        mesh = model_in.copy()
    pr_mesh = pyrender.Mesh.from_trimesh(mesh)

    # Scene creation
    scene = pyrender.Scene()

    # Adding objects to the scene
    face_node = scene.add(pr_mesh)

    # Camera Creation
    if type(xy_mag) == float:
        cam = pyrender.OrthographicCamera(xmag=xy_mag,
                                          ymag=xy_mag,
                                          znear=znear,
                                          zfar=zfar)
    elif type(xy_mag) == tuple:
        cam = pyrender.OrthographicCamera(xmag=xy_mag[0],
                                          ymag=xy_mag[1],
                                          znear=znear,
                                          zfar=zfar)
    else:
        print("Error: xy_mag should be float or tuple")
        return False

    scene.add(cam, pose=np.eye(4))

    # Set up the light
    light = pyrender.DirectionalLight(color=[1.0, 1.0, 1.0], intensity=10.0)
    scene.add(light, pose=np.eye(4))

    # Rendering offscreen from that camera
    r = pyrender.OffscreenRenderer(viewport_width=rend_size[1],
                                   viewport_height=rend_size[0],
                                   point_size=1.0)
    if flat_shading is True:
        color, depth = r.render(scene,
                                flags=pyrender.constants.RenderFlags.FLAT)
    else:
        color, depth = r.render(scene)

    # rgb to bgr for cv2
    color = color[:, :, [2, 1, 0]]

    # fix pyrender BUG of depth rendering, pyrender version: 0.1.43
    depth[depth != 0] = (zfar + znear - (
        (2.0 * znear * zfar) / depth[depth != 0])) / (zfar - znear)
    depth[depth != 0] = ((depth[depth != 0] + (zfar + znear) /
                          (zfar - znear)) * (zfar - znear)) / 2.0

    return depth, color
Exemplo n.º 15
0
def main(opt):
    model = body_models.create(model_path='../3d_data/models',
                               model_type='smpl',
                               gender='male',
                               ext='pkl')
    smpl = pickle.load(open('../3d_data/densepose_uv.pkl', 'rb'))
    faces = np.array(smpl['f_extended'], dtype=np.int64).reshape((-1, 3))
    uv_faceid = io.loadmat(
        '../3d_data/DensePoseData/UV_data/UV_Processed.mat')['All_FaceIndices']
    uv = smpl['uv']

    # with open('../3d_data/nongrey_male_0110.jpg', 'rb') as file:
    texture = cv2.imread('../3d_data/nongrey_male_0110.jpg')

    global_tr = np.array([[1.0, 0.0, 0.0, 0.0], [0.0, 1.0, 0.0, 0.0],
                          [0.0, 0.0, 1.0, 0.0], [0.0, 0.0, 0.0, 1.0]])

    # set up the rendering objects
    focal_length = opt.focal_length * opt.image_height
    # mesh_camera = pyrender.IntrinsicsCamera(focal_length, focal_length, opt.image_width / 2, opt.image_height / 2,
    #                                    opt.znear, opt.zfar)
    mesh_camera = pyrender.OrthographicCamera(xmag=1.0, ymag=1.0, znear=0.05)
    camera = pyrender.OrthographicCamera(xmag=1.0, ymag=1.0, znear=0.05)
    camera_pose = np.array([[1.0, 0.0, 0.0, 0.0], [0.0, 1.0, 0.0, 0.0],
                            [0.0, 0.0, 1.0, 0.05], [0.0, 0.0, 0.0, 1.0]])

    mesh_tr = np.array([[1.0, 0.0, 0.0, 0.0],
                        [0.0, 1.0, 0.0, opt.global_y + 0.11],
                        [0.0, 0.0, 1.0, 0.0], [0.0, 0.0, 0.0, 1.0]])

    mesh_camera_pose = np.array([[1.0, 0.0, 0.0, 0.0], [0.0, 1.0, 0.0, 0.0],
                                 [0.0, 0.0, 1.0, opt.camera_distance],
                                 [0.0, 0.0, 0.0, 1.0]])

    render = pyrender.OffscreenRenderer(opt.image_width, opt.image_height)

    output = model(return_verts=True)
    vertices = output.vertices.detach().cpu().numpy().squeeze()

    mesh_verts = np.array([vertices[i] for i in smpl['v_extended']])
    visual_check = trimesh.visual.TextureVisuals(uv=uv, image=texture)

    tri_mesh_scene = trimesh.Trimesh(vertices=mesh_verts,
                                     faces=faces,
                                     visual=visual_check)

    mesh_body = pyrender.Mesh.from_trimesh(tri_mesh_scene)
    mesh_scene = pyrender.Scene(ambient_light=[0.5, 0.5, 0.5],
                                bg_color=[-1.0, -1.0, -1.0])

    mesh_scene.add(mesh_body, pose=mesh_tr)
    mesh_scene.add(mesh_camera, pose=mesh_camera_pose)

    rendered_uv, depth = render.render(scene=mesh_scene,
                                       flags=pyrender.RenderFlags.UV_RENDERING)
    rendered_uv = rendered_uv.copy()

    mask = rendered_uv[:, :, 2] != -1.
    temp_2 = rendered_uv[:, :, 2]
    temp_2[mask] = np.take(uv_faceid, temp_2[mask].astype('int'))
    rendered_uv[:, :, 2] = temp_2

    cv2.imshow('UV', rendered_uv)
    bounds = tri_mesh_scene.bounding_box_oriented.extents

    mesh_verts -= mesh_scene.centroid
    mesh_verts /= bounds
    # mesh_verts *= 2
    mesh_verts = mesh_verts + 1 / 2

    face_select = faces[uv_faceid[:, 0] == 1]

    # verts = np.concatenate((uv, np.ones(uv.shape[:2] + (1,))), axis=2)

    # uv[:, 2] = 1
    verts = (uv * 2) - 1
    visual = trimesh.visual.ColorVisuals(vertex_colors=uv)
    tri_mesh = trimesh.Trimesh(vertices=verts,
                               faces=face_select,
                               visual=visual)
    # tri_mesh

    mesh = pyrender.Mesh.from_trimesh(tri_mesh)

    # tri_mesh.show()

    scene = pyrender.Scene(ambient_light=[0.5, 0.5, 0.5],
                           bg_color=[-1.0, -1.0, -1.0])
    scene.add(mesh, pose=global_tr)
    scene.add(camera, pose=camera_pose)

    rendered_color_visual, depth = render.render(
        scene=scene, flags=pyrender.RenderFlags.SKIP_CULL_FACES)
    # pyrender.Viewer(scene, render_flags={'cull_faces': False})
    cv2.imshow('Part UV', rendered_color_visual)
    # cv2.waitKey(0)

    rendered_interp, _ = render.render(
        scene=scene,
        flags=pyrender.RenderFlags.BARYCENTRIC_COORDINATES
        | pyrender.RenderFlags.SKIP_CULL_FACES)
    tri_id, _ = render.render(scene=scene,
                              flags=pyrender.RenderFlags.TRIANGLE_ID_RENDERING
                              | pyrender.RenderFlags.SKIP_CULL_FACES)

    vertex_stream = np.take(mesh_verts, face_select, axis=0)
    tri_id = tri_id[:, :, 0]

    rendered_interp = rendered_interp.reshape(rendered_interp.shape +
                                              (1, )).repeat([3], axis=-1)
    out_view = vertex_stream[tri_id.astype('int')] * rendered_interp
    out_view = out_view.sum(axis=-2)

    # rendered_uv[rendered_uv == -1] = 0
    # rendered_uv[:, :, 2] /= 255
    out_view[rendered_color_visual < 0] = 0

    # cv2.imwrite('../saves/checks/mesh_normalized_uv.jpg', (rendered_uv * 255).astype('uint8'))
    cv2.imshow('Coords', out_view)
    cv2.imwrite('../saves/checks/mesh_uv_render.jpg',
                (out_view * 255).astype('uint8'))
    cv2.waitKey(0)
Exemplo n.º 16
0
# +X
# -X

df = []

for dist in range(0, 200, 5):

    scene = pyrender.Scene()
    scene.add(mesh)

    # TODO: figure out how to render corectly

    # camera = pyrender.PerspectiveCamera(yfov=np.pi / 3.0, aspectRatio=1.0)

    camera = pyrender.OrthographicCamera(xmag=10.0, ymag=10.0)

    camera_pose = np.array([
        [1.0, 0.0, 0.0, 0.0],
        [0.0, 1.0, 0.0, 0.0],
        [0.0, 0.0, 1.0, dist],
        [0.0, 0.0, 0.0, 1.0],
    ])

    scene.add(camera, pose=camera_pose)
    light = pyrender.SpotLight(color=np.ones(3),
                               intensity=3.0,
                               innerConeAngle=np.pi / 16.0)
    # scene.add(light, pose=camera_pose)

    r = pyrender.OffscreenRenderer(100, 100)
    def render_mesh_pc_bed_pyrender_everything(self,
                                               smpl_verts,
                                               smpl_faces,
                                               camera_point,
                                               bedangle,
                                               RESULTS_DICT,
                                               pc=None,
                                               pmat=None,
                                               smpl_render_points=False,
                                               markers=None,
                                               dropout_variance=None,
                                               color_im=None,
                                               tf_corners=None,
                                               current_pose_type_ct=None,
                                               participant=None):

        #print np.min(smpl_verts[:, 0])
        #print np.min(smpl_verts[:, 1])

        shift_estimate_sideways = np.min([-0.15, np.min(smpl_verts[:, 1])])
        #print shift_estimate_sideways
        shift_estimate_sideways = 0.8 - shift_estimate_sideways

        top_smpl_vert = np.max(smpl_verts[:, 0])
        extend_top_bottom = np.max([np.max(smpl_verts[:, 0]), 64 * .0286
                                    ]) - 64 * .0286
        print extend_top_bottom, 'extend top bot'

        shift_both_amount = np.max([0.9, np.max(smpl_verts[:, 1])
                                    ])  #if smpl is bigger than 0.9 shift less
        shift_both_amount = 1.5 - shift_both_amount + (
            0.15 + np.min([-0.15, np.min(smpl_verts[:, 1])]))

        #print np.max(smpl_verts[:, 1]), 'max smpl'

        #shift_both_amount = 0.6
        #smpl_verts[:, 2] += 0.5
        #pc[:, 2] += 0.5

        pc[:, 0] = pc[:, 0]  # - 0.17 - 0.036608
        pc[:, 1] = pc[:, 1]  # + 0.09

        #adjust the point cloud

        #segment_limbs = True

        #if pmat is not None:
        #   if np.sum(pmat) < 5000:
        #       smpl_verts = smpl_verts * 0.001

        smpl_verts_quad = np.concatenate(
            (smpl_verts, np.ones((smpl_verts.shape[0], 1))), axis=1)
        smpl_verts_quad = np.swapaxes(smpl_verts_quad, 0, 1)

        #print smpl_verts_quad.shape

        transform_A = np.identity(4)
        transform_A[1, 3] = shift_both_amount

        transform_B = np.identity(4)
        transform_B[
            1,
            3] = shift_estimate_sideways + shift_both_amount  #4.0 #move things over
        smpl_verts_B = np.swapaxes(np.matmul(transform_B, smpl_verts_quad), 0,
                                   1)[:, 0:3]

        transform_C = np.identity(4)
        transform_C[1, 3] = 2.0  #2.0 #move things over
        smpl_verts_C = np.swapaxes(np.matmul(transform_C, smpl_verts_quad), 0,
                                   1)[:, 0:3]

        from matplotlib import cm

        human_mesh_vtx_all, human_mesh_face_all = self.get_human_mesh_parts(
            smpl_verts_B, smpl_faces, segment_limbs=False)

        #GET MESH WITH PMAT
        tm_curr = trimesh.base.Trimesh(vertices=np.array(
            human_mesh_vtx_all[0]),
                                       faces=np.array(human_mesh_face_all[0]))
        tm_list = [tm_curr]
        original_mesh = [tm_curr]

        mesh_list = []
        mesh_list.append(
            pyrender.Mesh.from_trimesh(tm_list[0],
                                       material=self.human_mat,
                                       smooth=False)
        )  #wireframe = False)) #this is for the main human

        print np.shape(color_im)
        print tf_corners
        top_idx = float(tf_corners[0, 1])
        bot_idx = float(tf_corners[2, 1])
        perc_total = (bot_idx - top_idx) / 880.
        print perc_total

        fig = plt.figure()
        if self.render == True:

            #print m.r
            #print artag_r
            #create mini meshes for AR tags
            artag_meshes = []
            if markers is not None:
                for marker in markers:
                    if markers[2] is None:
                        artag_meshes.append(None)
                    elif marker is None:
                        artag_meshes.append(None)
                    else:
                        #print marker - markers[2]
                        if marker is markers[2]:
                            print "is markers 2", marker
                            #artag_tm = trimesh.base.Trimesh(vertices=self.artag_r, faces=self.artag_f, face_colors = self.artag_facecolors_root)
                            #artag_meshes.append(pyrender.Mesh.from_trimesh(artag_tm, smooth = False))
                        else:
                            artag_tm = trimesh.base.Trimesh(
                                vertices=self.artag_r + [
                                    0.0, shift_estimate_sideways +
                                    shift_both_amount, 0.0
                                ],
                                faces=self.artag_f,
                                face_colors=self.artag_facecolors)
                            artag_meshes.append(
                                pyrender.Mesh.from_trimesh(artag_tm,
                                                           smooth=False))

            if pmat is not None:
                pmat_verts, pmat_faces, pmat_facecolors = self.get_3D_pmat_markers(
                    pmat, bedangle)
                pmat_verts = np.array(pmat_verts)
                pmat_verts = np.concatenate((np.swapaxes(
                    pmat_verts, 0, 1), np.ones((1, pmat_verts.shape[0]))),
                                            axis=0)
                pmat_verts = np.swapaxes(np.matmul(transform_A, pmat_verts), 0,
                                         1)[:, 0:3]
                pmat_tm = trimesh.base.Trimesh(vertices=pmat_verts,
                                               faces=pmat_faces,
                                               face_colors=pmat_facecolors)
                pmat_mesh = pyrender.Mesh.from_trimesh(pmat_tm, smooth=False)

                pmat_verts2, _, pmat_facecolors2 = self.get_3D_pmat_markers(
                    pmat, bedangle, solidcolor=True)
                pmat_verts2 = np.array(pmat_verts2)
                pmat_verts2 = np.concatenate((np.swapaxes(
                    pmat_verts2, 0, 1), np.ones((1, pmat_verts2.shape[0]))),
                                             axis=0)
                pmat_verts2 = np.swapaxes(np.matmul(transform_B, pmat_verts2),
                                          0, 1)[:, 0:3]
                pmat_tm2 = trimesh.base.Trimesh(vertices=pmat_verts2,
                                                faces=pmat_faces,
                                                face_colors=pmat_facecolors2)
                pmat_mesh2 = pyrender.Mesh.from_trimesh(pmat_tm2, smooth=False)

            else:
                pmat_mesh = None
                pmat_mesh2 = None

            #print "Viewing"
            if self.first_pass == True:

                for mesh_part in mesh_list:
                    self.scene.add(mesh_part)
                if pmat_mesh is not None:
                    self.scene.add(pmat_mesh)

                if pmat_mesh2 is not None:
                    self.scene.add(pmat_mesh2)

                for artag_mesh in artag_meshes:
                    if artag_mesh is not None:
                        self.scene.add(artag_mesh)

                lighting_intensity = 20.

                #self.viewer = pyrender.Viewer(self.scene, use_raymond_lighting=True, lighting_intensity=lighting_intensity,
                #                              point_size=2, run_in_thread=True, viewport_size=(1200, 1200))

                self.first_pass = False

                self.node_list = []
                for mesh_part in mesh_list:
                    for node in self.scene.get_nodes(obj=mesh_part):
                        self.node_list.append(node)

                self.artag_nodes = []
                for artag_mesh in artag_meshes:
                    if artag_mesh is not None:
                        for node in self.scene.get_nodes(obj=artag_mesh):
                            self.artag_nodes.append(node)
                if pmat_mesh is not None:
                    for node in self.scene.get_nodes(obj=pmat_mesh):
                        self.pmat_node = node
                if pmat_mesh2 is not None:
                    for node in self.scene.get_nodes(obj=pmat_mesh2):
                        self.pmat_node2 = node

                camera_pose = np.eye(4)
                # camera_pose[0,0] = -1.0
                # camera_pose[1,1] = -1.0

                camera_pose[0, 0] = np.cos(np.pi / 2)
                camera_pose[0, 1] = np.sin(np.pi / 2)
                camera_pose[1, 0] = -np.sin(np.pi / 2)
                camera_pose[1, 1] = np.cos(np.pi / 2)
                rot_udpim = np.eye(4)

                rot_y = 180 * np.pi / 180.
                rot_udpim[1, 1] = np.cos(rot_y)
                rot_udpim[2, 2] = np.cos(rot_y)
                rot_udpim[1, 2] = np.sin(rot_y)
                rot_udpim[2, 1] = -np.sin(rot_y)
                camera_pose = np.matmul(rot_udpim, camera_pose)

                camera_pose[0, 3] = 64 * 0.0286 / 2  # -1.0
                camera_pose[1, 3] = 1.2
                camera_pose[2, 3] = -1.0

                # self.viewer = pyrender.Viewer(self.scene, use_raymond_lighting=True,
                #                              lighting_intensity=10.,
                #                              point_size=5, run_in_thread=True, viewport_size=(1000, 1000))
                # camera = pyrender.PerspectiveCamera(yfov=np.pi / 3.0, aspectRatio=1.0)

                magnify = (64 * .0286) * 0.5 / perc_total

                camera = pyrender.OrthographicCamera(xmag=magnify,
                                                     ymag=magnify)

                self.scene.add(camera, pose=camera_pose)
                light = pyrender.SpotLight(color=np.ones(3),
                                           intensity=200.0,
                                           innerConeAngle=np.pi / 10.0,
                                           outerConeAngle=np.pi / 2.0)
                light_pose = np.copy(camera_pose)
                # light_pose[1, 3] = 2.0
                light_pose[0, 3] = 0.8
                light_pose[1, 3] = -0.5
                light_pose[2, 3] = -1.5

                light_pose2 = np.copy(camera_pose)
                light_pose2[0, 3] = 2.5
                light_pose2[1, 3] = 1.0
                light_pose2[2, 3] = -4.0
                # light_pose[1, ]

                self.scene.add(light, pose=light_pose)
                self.scene.add(light, pose=light_pose2)

            else:
                #self.viewer.render_lock.acquire()

                #reset the human mesh
                for idx in range(len(mesh_list)):
                    self.scene.remove_node(self.node_list[idx])
                    self.scene.add(mesh_list[idx])
                    for node in self.scene.get_nodes(obj=mesh_list[idx]):
                        self.node_list[idx] = node

                #reset the artag meshes
                for artag_node in self.artag_nodes:
                    self.scene.remove_node(artag_node)
                for artag_mesh in artag_meshes:
                    if artag_mesh is not None:
                        self.scene.add(artag_mesh)
                self.artag_nodes = []
                for artag_mesh in artag_meshes:
                    if artag_mesh is not None:
                        for node in self.scene.get_nodes(obj=artag_mesh):
                            self.artag_nodes.append(node)

                #reset the pmat mesh
                if pmat_mesh is not None:
                    self.scene.remove_node(self.pmat_node)
                    self.scene.add(pmat_mesh)
                    for node in self.scene.get_nodes(obj=pmat_mesh):
                        self.pmat_node = node

                #reset the pmat mesh
                if pmat_mesh2 is not None:
                    self.scene.remove_node(self.pmat_node2)
                    self.scene.add(pmat_mesh2)
                    for node in self.scene.get_nodes(obj=pmat_mesh2):
                        self.pmat_node2 = node

                #print self.scene.get_nodes()
                #self.viewer.render_lock.release()
            #time.sleep(100)

        r = pyrender.OffscreenRenderer(880, 880)
        # r.render(self.scene)
        color_render, depth = r.render(self.scene)
        # plt.subplot(1, 2, 1)
        plt.axis('off')

        if 880. - bot_idx > top_idx:
            print 'shift im down by', 880. - bot_idx - top_idx
            downshift = int((880. - bot_idx) / 2 - top_idx / 2 + 0.5)
            color_im[downshift:880] = color_im[0:880 - downshift]

        elif top_idx > (880. - bot_idx):
            print 'shift im up by', top_idx - (880. - bot_idx)
            upshift = int(top_idx / 2 - (880. - bot_idx) / 2 + 0.5)
            color_im[0:880 - upshift] = color_im[upshift:880]

        print tf_corners
        print np.shape(color_render), np.shape(color_im)
        color_im = np.concatenate(
            (color_im[:, :, 2:3], color_im[:, :, 1:2], color_im[:, :, 0:1]),
            axis=2)
        color_im = color_im[:,
                            int(tf_corners[0, 0] - 10):int(tf_corners[1, 0] +
                                                           10), :]

        im_to_show = np.concatenate((color_render, color_im), axis=1)

        im_to_show = im_to_show[130 - int(extend_top_bottom * 300):750 +
                                int(extend_top_bottom * 300), :, :]

        #plt.imshow(color)
        plt.imshow(im_to_show)
        # plt.subplot(1, 2, 2)
        # plt.axis('off')
        # plt.imshow(depth, cmap=plt.cm.gray_r) >> > plt.show()

        fig.set_size_inches(15., 10.)
        fig.tight_layout()
        #save_name = 'f_hbh_'+'{:04}'.format(self.pic_num)

        save_name = participant + '_' + current_pose_type_ct

        fig.savefig('/media/henry/multimodal_data_2/CVPR2020_study/' +
                    participant + '/estimated_poses/' + save_name + '.png',
                    dpi=300)
        #fig.savefig('/media/henry/multimodal_data_2/CVPR2020_study/TEST.png', dpi=300)

        #plt.savefig('test2png.png', dpi=100)

        self.pic_num += 1
        #plt.show()
        #if self.pic_num == 20:
        #    print "DONE"
        #    time.sleep(1000000)
        #print "got here"

        #print X.shape

        return RESULTS_DICT
    def render_only_human_gt(self, m):

        bed1_verts = np.array([
            [-1.3, -1.35, -3.0],
            [-0.9, -0.75, -4.0],
            [1.3, -1.35, -3.0],
            [0.9, -0.75, -4.0],
            [-1.2, 0.05, -4.0],
            [-1.2, 0.0, -4.0],
            [1.2, 0.05, -4.0],
            [1.2, 0.0, -4.0],
            [-1.3, -1.35, -3.0],
            [-1.3, -1.45, -3.0],
            [1.3, -1.35, -3.0],
            [1.3, -1.45, -3.0],
            [-1.2, 0.05, -4.0],
            [-1.0, 1.0, -4.0],
            [1.2, 0.05, -4.0],
            [1.0, 1.0, -4.0],
        ])
        bed1_faces = np.array([
            [0, 1, 2],
            [0, 2, 1],
            [1, 2, 3],
            [1, 3, 2],
            [4, 5, 6],
            [4, 6, 5],
            [5, 6, 7],
            [5, 7, 6],
            [8, 9, 10],
            [8, 10, 9],
            [9, 10, 11],
            [9, 11, 10],
            [12, 13, 14],
            [12, 14, 13],
            [13, 14, 15],
            [13, 15, 14],
        ])
        bed1_facecolors = []
        for i in range(bed1_faces.shape[0]):
            if 4 <= i < 12:
                bed1_facecolors.append([0.8, 0.8, 0.2])
            else:
                bed1_facecolors.append([1.0, 1.0, 0.8])

        smpl_verts = (m.r - m.J_transformed[0, :])

        #smpl_verts = np.concatenate((smpl_verts[:, 1:2] - 1.5, smpl_verts[:, 0:1], -smpl_verts[:, 2:3]), axis = 1)
        smpl_verts = np.concatenate(
            (smpl_verts[:, 0:1] - 1.5, smpl_verts[:, 1:2], smpl_verts[:, 2:3]),
            axis=1)
        #smpl_verts = np.concatenate((smpl_verts[:, 1:2], smpl_verts[:, 0:1], -smpl_verts[:, 2:3]), axis = 1)

        smpl_faces = np.array(m.f)
        #smpl_faces = np.concatenate((smpl_faces[:, 0:1],smpl_faces[:, 2:3],smpl_faces[:, 1:2]), axis = 1)

        smpl_verts2 = np.concatenate(
            (smpl_verts[:, 1:2], smpl_verts[:, 2:3] - 2.0, smpl_verts[:, 0:1]),
            axis=1)
        smpl_verts3 = np.concatenate(
            (smpl_verts[:, 1:2], smpl_verts[:, 2:3] - 2.4, smpl_verts[:, 0:1]),
            axis=1)
        laying_rot_M = np.array([[1., 0., 0.], [0., 0.866025, -0.5],
                                 [0., 0.5, 0.866025]])
        laying_rot_M2 = np.array([[1., 0., 0.], [0., 0.34202, -0.93969],
                                  [0., 0.93969, 0.34202]])
        for i in range(smpl_verts2.shape[0]):
            smpl_verts2[i, :] = np.matmul(laying_rot_M, smpl_verts2[i, :])
            smpl_verts3[i, :] = np.matmul(laying_rot_M2, smpl_verts3[i, :])

            #break

        #smpl_verts2 = np.concatenate((-smpl_verts[:, 2:3] + 1.5, smpl_verts[:, 1:2], smpl_verts[:, 0:1]), axis = 1)
        #smpl_verts3 = np.concatenate((smpl_verts[:, 2:3] - 1.5, smpl_verts[:, 1:2], -smpl_verts[:, 0:1]), axis = 1)
        #print smpl_verts2.shape

        #tm = trimesh.base.Trimesh(vertices=smpl_verts, faces=smpl_faces)
        #mesh = pyrender.Mesh.from_trimesh(tm, material=self.human_mat_for_study, wireframe=False)

        tm2 = trimesh.base.Trimesh(vertices=smpl_verts2, faces=smpl_faces)
        mesh2 = pyrender.Mesh.from_trimesh(tm2,
                                           material=self.human_mat_for_study,
                                           wireframe=False)

        tm3 = trimesh.base.Trimesh(vertices=smpl_verts3, faces=smpl_faces)
        mesh3 = pyrender.Mesh.from_trimesh(tm3,
                                           material=self.human_mat_for_study,
                                           wireframe=False)

        tm_bed1 = trimesh.base.Trimesh(vertices=bed1_verts,
                                       faces=bed1_faces,
                                       face_colors=np.array(bed1_facecolors))
        mesh_bed1 = pyrender.Mesh.from_trimesh(
            tm_bed1,
            material=self.human_bed_for_study,
            wireframe=False,
            smooth=False)

        fig = plt.figure()
        #plt.plot(np.arange(0, 400), np.arange(0, 400)*.5 + 800)

        if self.first_pass == True:
            self.scene.add(mesh_bed1)
            #self.scene.add(mesh)
            self.scene.add(mesh2)
            self.scene.add(mesh3)

            self.first_pass = False

            self.node_list_bed1 = []
            for node in self.scene.get_nodes(obj=mesh_bed1):
                self.node_list_bed1.append(node)

            #self.node_list = []
            #for node in self.scene.get_nodes(obj=mesh):
            #    self.node_list.append(node)

            self.node_list_2 = []
            for node in self.scene.get_nodes(obj=mesh2):
                self.node_list_2.append(node)

            self.node_list_3 = []
            for node in self.scene.get_nodes(obj=mesh3):
                self.node_list_3.append(node)

            camera_pose = np.eye(4)
            # camera_pose[0,0] = -1.0
            # camera_pose[1,1] = -1.0
            camera_pose[0, 3] = 0.0  # -1.0
            camera_pose[1, 3] = 0.0  # -1.0
            camera_pose[2, 3] = 4.0

            # self.viewer = pyrender.Viewer(self.scene, use_raymond_lighting=True,
            #                              lighting_intensity=10.,
            #                              point_size=5, run_in_thread=True, viewport_size=(1000, 1000))
            # camera = pyrender.PerspectiveCamera(yfov=np.pi / 3.0, aspectRatio=1.0)
            camera = pyrender.OrthographicCamera(xmag=2.0, ymag=2.0)

            self.scene.add(camera, pose=camera_pose)
            light = pyrender.SpotLight(color=np.ones(3),
                                       intensity=150.0,
                                       innerConeAngle=np.pi / 10.0,
                                       outerConeAngle=np.pi / 2.0)
            light_pose = np.copy(camera_pose)
            #light_pose[1, 3] = 2.0
            light_pose[0, 3] = -1.0
            light_pose[2, 3] = 5.0

            light_pose2 = np.copy(camera_pose)
            light_pose2[0, 3] = 0.5
            light_pose2[1, 3] = 0.5
            light_pose2[2, 3] = 5.0
            # light_pose[1, ]

            #self.scene.add(light, pose=light_pose)
            #self.scene.add(light, pose=light_pose2)

        else:
            #self.viewer.render_lock.acquire()

            # reset the human mesh
            self.scene.remove_node(self.node_list_bed1[0])
            self.scene.add(mesh_bed1)
            for node in self.scene.get_nodes(obj=mesh_bed1):
                self.node_list_bed1[0] = node

            # reset the human mesh
            #self.scene.remove_node(self.node_list[0])
            #self.scene.add(mesh)
            #for node in self.scene.get_nodes(obj=mesh):
            #    self.node_list[0] = node

            self.scene.remove_node(self.node_list_2[0])
            self.scene.add(mesh2)
            for node in self.scene.get_nodes(obj=mesh2):
                self.node_list_2[0] = node

            self.scene.remove_node(self.node_list_3[0])
            self.scene.add(mesh3)
            for node in self.scene.get_nodes(obj=mesh3):
                self.node_list_3[0] = node

            #self.viewer.render_lock.release()

        r = pyrender.OffscreenRenderer(2000, 2000)
        # r.render(self.scene)
        color, depth = r.render(self.scene)
        # plt.subplot(1, 2, 1)
        plt.axis('off')
        plt.imshow(color)
        # plt.subplot(1, 2, 2)
        # plt.axis('off')
        # plt.imshow(depth, cmap=plt.cm.gray_r) >> > plt.show()

        fig.set_size_inches(15., 15.)
        fig.tight_layout()
        save_name = 'f_hbh_' + '{:04}'.format(self.pic_num)
        fig.savefig('/home/henry/Pictures/CVPR2020_study/' + save_name +
                    '.png',
                    dpi=300)

        #plt.savefig('test2png.png', dpi=100)

        self.pic_num += 1
        #plt.show()
        if self.pic_num == 20:
            print "DONE"
            time.sleep(1000000)
        print "got here"
Exemplo n.º 19
0
def fixed_prediction_animation(real_traj,
                               pred_traj,
                               loop_time,
                               urdf_path='robots/right_hand_relative.urdf',
                               real_color=np.array([71, 107, 107, 255]),
                               pred_color_cmap=matplotlib.cm.get_cmap('tab10'),
                               background_color=np.array([1.0, 1.0, 1.0]),
                               hand_offset=0.2,
                               title="Hand Motion Prediction",
                               reverse=True,
                               show=False):
    scene, origin, node_map, real_hand, pred_hands, pred_trajectories, times, fps = setup_animation_scene(
        real_traj=real_traj,
        pred_traj=pred_traj,
        hand_offset=hand_offset,
        loop_time=loop_time,
        urdf_path=urdf_path,
        real_color=real_color,
        pred_color_cmap=pred_color_cmap,
        background_color=background_color,
        reverse=reverse)

    # clear_output()

    # Set up the camera -- z-axis away from the scene, pred_joint_states-axis right, real_joint_states-axis up
    camera = pyrender.OrthographicCamera(
        xmag=(hand_offset * len(pred_trajectories)) * 1.2, ymag=0.3)
    camera_pose = np.array([
        [1.0, 0.0, 0.0, (hand_offset * len(pred_trajectories)) / 2],
        [0.0, 0.0, -1.0, -0.25],
        [0.0, 1.0, 0.0, 0.10],
        [0.0, 0.0, 0.0, 1.0],
    ])
    scene.add(camera, pose=camera_pose)

    # Set up the light -- a single spot light in the same spot as the camera
    light = pyrender.DirectionalLight(color=np.ones(3), intensity=5)
    scene.add(light, pose=camera_pose)

    # Render the scene
    height = 5
    width = height * 1.5
    dpi = 100
    r = pyrender.OffscreenRenderer(width * dpi, height * dpi)

    rgb_sequence, depth_sequence = [], []

    fig = plt.figure(figsize=(width, height))
    plt.title(title)

    color, _ = r.render(scene)
    rgb_sequence.append(color)

    for i in tqdm(range(len(times)), position=0, leave=True, desc="Rendering"):
        real_cfg = {k: real_traj[k][i] for k in real_traj}
        pred_cfgs = [{k: pred_traj[k][i]
                      for k in pred_traj} for pred_traj in pred_trajectories]
        # i = (i + 1) % len(times)

        fk_real = real_hand.visual_trimesh_fk(cfg=real_cfg)
        fk_preds = [
            pred_hand.visual_trimesh_fk(cfg=pred_cfg)
            for pred_hand, pred_cfg in zip(pred_hands, pred_cfgs)
        ]

        for real_mesh in fk_real:
            real_pose = fk_real[real_mesh]
            node_map[real_mesh].matrix = real_pose
        for fk_pred in fk_preds:
            for pred_mesh in fk_pred:
                pred_pose = fk_pred[pred_mesh]
                node_map[pred_mesh].matrix = pred_pose

        color, _ = r.render(scene)
        rgb_sequence.append(color)

    r.delete()

    artists = []
    for img in tqdm(rgb_sequence,
                    position=0,
                    leave=True,
                    desc="Preparing PLT Animation"):
        img_color = plt.imshow(img, animated=True)
        artists.append([img_color])

    # Create PLT animation
    ani = animation.ArtistAnimation(fig,
                                    artists,
                                    interval=1 / fps * 1000,
                                    blit=True,
                                    repeat_delay=500)
    plt.axis('off')
    if show:
        plt.show()
    else:
        plt.close()
    return ani
Exemplo n.º 20
0
    def render(self, pose_deg=25.0, light_pose_deg=-25.0, gamma=1.0):

        vertices = self.dims["shape"]["vals"]
        vertices /= np.max(np.abs(vertices))

        faces = self._tl - 1

        # these are linear (I think) but the renderer needs them in sRGB
        colours = self.dims["tex"]["vals"]

        srgb_colours = colours / 255.0

        srgb_colours = np.where(
            srgb_colours < 0.0031308,
            srgb_colours * 12.92,
            1.055 * (srgb_colours**(1.0 / 2.4)) - 0.055,
        )

        srgb_colours *= 255.0

        mesh = trimesh.Trimesh(vertices=vertices,
                               faces=faces,
                               vertex_colors=colours)

        mesh.invert()

        self._mesh = mesh

        camera = pyrender.OrthographicCamera(xmag=1.0, ymag=1.0)

        pose_vec = scipy.spatial.transform.Rotation.from_rotvec(
            [0, np.radians(pose_deg), 0])

        pose_mat = np.eye(4)
        pose_mat[:3, :3] = pose_vec.as_dcm()
        pose_mat[2, -1] = -100

        material = pyrender.material.SpecularGlossinessMaterial(
            diffuseFactor=1.0, glossinessFactor=0.0)

        obj = pyrender.Mesh.from_trimesh(
            mesh=mesh,
            poses=pose_mat[np.newaxis, ...]  # material=material
        )

        self._obj = obj

        light = pyrender.DirectionalLight(intensity=5.0)

        light_pose_vec = scipy.spatial.transform.Rotation.from_rotvec(
            [0, np.radians(light_pose_deg), 0])
        light_pose_mat = np.eye(4)
        light_pose_mat[:3, :3] = light_pose_vec.as_dcm()

        scene = pyrender.Scene(ambient_light=[0.1] * 3, bg_color=[0] * 4)

        scene.add(camera)
        scene.add(obj)
        scene.add(light, pose=light_pose_mat)

        self._renderer = pyrender.OffscreenRenderer(viewport_width=512,
                                                    viewport_height=512)

        self._scene = scene

        flags = (pyrender.constants.RenderFlags.NONE
                 | pyrender.constants.RenderFlags.RGBA
                 | pyrender.constants.RenderFlags.SHADOWS_ALL)

        (img, _) = self._renderer.render(scene, flags=flags)

        img = (img / 255.0)**(1.0 / gamma)
        img = np.round(img * 255.0).astype("uint8")

        return img