Esempio n. 1
0
def viz_mean_excitability(sid, rid):
    regpos, w, obsmask, surfaces, contacts = read_structural_data(sid, rid)
    vlines, vmeshes, vcontacts = viz_structure(regpos, w, surfaces, contacts)

    # Load results
    nreg = regpos.shape[0]
    res = io.parse_csv([
        f"run/solo/INC/vep/id{sid:03d}/output/r{rid:02d}_all/chain_{chain}.csv"
        for chain in [1, 2]
    ])
    cinf = res['c']
    # cmean = np.mean(cinf, axis=0)
    # pexc = np.mean(cinf > 2.0, axis=0)
    scalar = np.percentile(cinf, 50, axis=0)

    # Regions
    cmap = 'plasma'
    # vmin = np.min(scalar)
    # vmax = np.max(scalar)
    vmin, vmax = -2, 2

    vpoints = []
    for i in range(nreg):
        if not obsmask[i]:
            vpoints.append(
                vp.Sphere(regpos[i],
                          r=4,
                          c=vp.colorMap(scalar[i], cmap, vmin, vmax)))
        else:
            vpoints.append(
                vp.Cube(regpos[i],
                        side=6,
                        c=vp.colorMap(scalar[i], cmap, vmin, vmax)))

    vbar = vp.Points(regpos, r=0.01).pointColors(scalar,
                                                 cmap=cmap,
                                                 vmin=vmin,
                                                 vmax=vmax)
    vbar.addScalarBar(horizontal=True, pos=(0.8, 0.02))

    def slider(widget, event):
        percentile = widget.GetRepresentation().GetValue()
        scalar = np.percentile(cinf, percentile, axis=0)
        for i in range(nreg):
            vpoints[i].color(vp.colorMap(scalar[i], cmap, vmin, vmax))

    vplotter = vp.Plotter(axes=0)
    vplotter.addSlider2D(slider,
                         0.,
                         100.,
                         value=50.0,
                         pos=3,
                         title="Percentile")
    vplotter.show(vpoints, vlines, vmeshes, vcontacts, vbar)
def main(_):
  aist_dataset = AISTDataset(anno_dir=FLAGS.anno_dir)

  for env_name, seq_names in aist_dataset.mapping_env2seq.items():
    # Init camera parameters
    cgroup = init_env_cameras()

    # Select a set of sequences for optimizing camera parameters.
    seq_names = random.choices(seq_names, k=20)

    # Load 2D keypoints
    keypoints2d_all = []
    for seq_name in seq_names:
      keypoints2d_raw, _, _ = AISTDataset.load_keypoint2d(
          aist_dataset.keypoint2d_dir, seq_name=seq_name)
      # Special cases
      if seq_name == 'gBR_sBM_cAll_d04_mBR0_ch01':
        keypoints2d_raw[4] = np.nan  # not synced view
      if seq_name == 'gJB_sBM_cAll_d07_mJB3_ch05':
        keypoints2d_raw[6] = np.nan  # size 640x480
      keypoints2d_all.append(keypoints2d_raw)
    keypoints2d_all = np.concatenate(keypoints2d_all, axis=1)

    # Filter keypoints to select those best points
    kpt_thre = 0.5
    ignore_idxs = np.where(keypoints2d_all[:, :, :, 2] < kpt_thre)
    keypoints2d_all[ignore_idxs[0], ignore_idxs[1], ignore_idxs[2], :] = np.nan
    keypoints2d_all = keypoints2d_all[..., 0:2]

    # Apply bundle adjustment and dump the camera parameters
    nviews = keypoints2d_all.shape[0]
    cgroup.bundle_adjust_iter(
        keypoints2d_all.reshape(nviews, -1, 2),
        n_iters=20,
        n_samp_iter=500,
        n_samp_full=5000,
        verbose=True)
    os.makedirs(FLAGS.save_dir, exist_ok=True)
    camera_file = os.path.join(FLAGS.save_dir, f'{env_name}.json')
    with open(camera_file, 'w') as f:
      json.dump([camera.get_dict() for camera in cgroup.cameras], f)

    # visualize the world with one frame
    if FLAGS.visualize:
      print("seq_name:", seq_name)
      axes_all = plot_cameras(cgroup)
      keypoints3d = cgroup.triangulate(
          keypoints2d_all[:, 0].reshape(nviews, -1, 2)
      ).reshape(-1, 3)
      vedo.show(
        *axes_all, vedo.Points(keypoints3d, r=12), 
        interactive=True, axes=True)
      vedo.clear()
Esempio n. 3
0
    def __init__(self, boids=()):
        self.neighbors = 20
        self.cohesion = 0.5
        self.separation = 0.3

        self.boids = list(boids)
        self.actor = None
        self.colors = [vedo.getColor(b.color) for b in boids]

        self.actor = vedo.Points([b.position for b in self.boids],
                                 r=8,
                                 c=self.colors)
def knNeighbors(vertices, nNeighbors):
    """ 
    knNeighbors: disminuye el ruido y agrupo los puntos con sus vecionos más cercanos
    parameters: vertices y el número de vecinos a tener en cuenta
    return: nueva nube de puntos
    """
    vertices = vedo.Points(vertices)
    newPointCloud = []
    for i in range(vertices.N()):
        pt = vertices.points()[i]
        ids = vertices.closestPoint(pt, N = nNeighbors, returnIds=True)
        newPointCloud.append(
            np.mean(vertices.points()[ids], axis=0).tolist())
    newPointCloud = np.array(newPointCloud)
    return newPointCloud
Esempio n. 5
0
def keyfunc(key):
    global data_idx
    if key == 'Left' and data_idx > 0:
        data_idx -= 1
    elif key == 'Right' and data_idx < data_length - 1:
        data_idx += 1

    pos, path = getpos(data_idx)
    plt.clear()

    plt.add(vedo.Points(pos, c='b'))
    plt.add(vedo.Text2D(path, pos=(.02, .02), c='k'))

    plt.add(boundary_mesh)
    plt.render()
Esempio n. 6
0
def viz_excitability(sid, rid):
    regpos, w, obsmask, surfaces, contacts = read_structural_data(sid, rid)
    vlines, vmeshes, vcontacts = viz_structure(regpos, w, surfaces, contacts)

    # Load results
    nreg = regpos.shape[0]
    res = io.parse_csv([
        f"run/solo/INC/vep/id{sid:03d}/output/r{rid:02d}_all/chain_{chain}.csv"
        for chain in [1, 2]
    ])
    cinf = res['c']

    ctr = 2.0
    pexc = np.mean(cinf > ctr, axis=0)

    # Regions
    cmap = 'Reds'
    vmin, vmax = 0, 0.15
    # vmin, vmax = -2, 0

    vpoints = []
    for i in range(nreg):
        if not obsmask[i]:
            vpoints.append(
                vp.Sphere(regpos[i],
                          r=4,
                          c=vp.colorMap(pexc[i], cmap, vmin, vmax)))
        else:
            vpoints.append(
                vp.Cube(regpos[i],
                        side=6,
                        c=vp.colorMap(pexc[i], cmap, vmin, vmax)))

    vbar = vp.Points(regpos, r=0.01).pointColors(pexc,
                                                 cmap=cmap,
                                                 vmin=vmin,
                                                 vmax=vmax)
    vbar.addScalarBar(horizontal=True, pos=(0.8, 0.02))

    def cslider(widget, event):
        ctr = widget.GetRepresentation().GetValue()
        pexc = np.mean(cinf > ctr, axis=0)
        for i in range(nreg):
            vpoints[i].color(vp.colorMap(pexc[i], cmap, vmin, vmax))

    vplotter = vp.Plotter(axes=0)
    vplotter.addSlider2D(cslider, -3.0, 3.0, value=2.0, pos=3, title="c")
    vplotter.show(vpoints, vlines, vmeshes, vcontacts, vbar)
Esempio n. 7
0
def viz_seizure(sid, rid):
    regpos, w, obsmask, surfaces, contacts = read_structural_data(sid, rid)
    vlines, vmeshes, vcontacts = viz_structure(regpos, w, surfaces, contacts)

    # Load results
    nreg = regpos.shape[0]
    res = io.parse_csv([
        f"run/solo/INC/vep/id{sid:03d}/output/r{rid:02d}_all/chain_{chain}.csv"
        for chain in [1, 2]
    ])
    tinf = res['t']

    t = 0.0
    psz = np.mean(tinf < t, axis=0)

    # Regions
    cmap = 'bwr'
    vmin, vmax = 0, 1

    vpoints = []
    for i in range(nreg):
        if not obsmask[i]:
            vpoints.append(
                vp.Sphere(regpos[i],
                          r=4,
                          c=vp.colorMap(psz[i], cmap, vmin, vmax)))
        else:
            vpoints.append(
                vp.Cube(regpos[i],
                        side=6,
                        c=vp.colorMap(psz[i], cmap, vmin, vmax)))

    vbar = vp.Points(regpos, r=0.01).pointColors(psz,
                                                 cmap=cmap,
                                                 vmin=vmin,
                                                 vmax=vmax)
    vbar.addScalarBar(horizontal=True, pos=(0.8, 0.02))

    def tslider(widget, event):
        t = widget.GetRepresentation().GetValue()
        psz = np.mean(tinf < t, axis=0)
        for i in range(nreg):
            vpoints[i].color(vp.colorMap(psz[i], cmap, vmin, vmax))

    vplotter = vp.Plotter(axes=0)
    vplotter.addSlider2D(tslider, 0, 90.0, value=0.0, pos=3, title="t")
    vplotter.show(vpoints, vlines, vmeshes, vcontacts, vbar)
Esempio n. 8
0
    def fit(self, keypoints3d, dtype='coco', verbose=True):
        """Run fitting to optimize the SMPL parameters."""
        assert dtype == 'coco', 'only support coco format for now.'
        assert len(
            keypoints3d.shape) == 3, 'input shape should be [N, njoints, 3]'
        mapping_target = unify_joint_mappings(dataset=dtype)
        keypoints3d = keypoints3d[:, mapping_target, :]
        keypoints3d = torch.from_numpy(keypoints3d).float().to(self.device)
        batch_size, njoints = keypoints3d.shape[0:2]

        # Init learnable smpl model
        smpl = SMPL(model_path=self.smpl_model_path,
                    gender=self.smpl_model_gender,
                    batch_size=batch_size).to(self.device)

        # Start fitting
        for step in range(self.niter):
            optimizer = self.get_optimizer(smpl, step, self.base_lr)

            output = smpl.forward()
            joints = output.joints[:, self.joints_mapping_smpl[:njoints], :]
            loss = self.metric(joints, keypoints3d)

            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

            if verbose and step % 10 == 0:
                logging.info(f'step {step:03d}; loss {loss.item():.3f};')

            if FLAGS.visualize:
                vertices = output.vertices[0].detach().cpu().numpy(
                )  # first frame
                mesh = trimesh.Trimesh(vertices, smpl.faces)
                mesh.visual.face_colors = [200, 200, 250, 100]
                pts = vedo.Points(keypoints3d[0].detach().cpu().numpy(),
                                  r=20)  # first frame
                vedo.show(mesh, pts, interactive=False)

        # Return results
        return smpl, loss.item()
def start():
    """ 
    start: empieza el procedimiento de reconstrucción
    """
    print('init...')
    # Cargamos las imágenes
    images = loadImages(59)

    # hace una primera tranformación para inicializar la recontrucción
    skel = esqueletizar(images[0])
    xyz = toXYZ(skel, 0)
    # empieza la reconstrucción
    xyz = reconstruction(images[5:-3], xyz, 1)
    # si se desea se aplica el filtro 
    #xyz = knNeighbors(xyz, 5)
    
    # visualiza la nube de puntos resultante
    vertex = vedo.Points(xyz, r=2.0)
    scalars = vertex.points()[:, 0]
    # agrega el falso color jet
    vertex.pointColors(-scalars, cmap="jet")
    vedo.show(vertex, bg='k', axes=9)
Esempio n. 10
0
def main():
    args = get_args()
    if args.aist:
        import vedo

    if not os.path.exists(args.json_dir):
        os.makedirs(args.json_dir)

    if args.aist:
        print ("test with AIST++ dataset!")
        music_data, dance_data, dance_names = load_data_aist(
            args.input_dir, interval=None, rotmat=args.rotmat)
    else:    
        music_data, dance_data, dance_names = load_data(
            args.input_dir, interval=None)

    device = torch.device('cuda' if args.cuda else 'cpu')

    test_loader = torch.utils.data.DataLoader(
        DanceDataset(music_data, dance_data),
        batch_size=args.batch_size,
        collate_fn=paired_collate_fn
    )

    generator = Generator(args.model, device)
    
    if args.aist and args.rotmat:
        from smplx import SMPL
        smpl = SMPL(model_path="/media/ruilongli/hd1/Data/smpl/", gender='MALE', batch_size=1)

    results = []
    random_id = 0  # np.random.randint(0, 1e4)
    for i, batch in enumerate(tqdm(test_loader, desc='Generating dance poses')):
        # Prepare data
        src_seq, src_pos, tgt_pose = map(lambda x: x.to(device), batch)
        pose_seq = generator.generate(src_seq[:, :1200], src_pos[:, :1200])  # first 20 secs
        results.append(pose_seq)

        if args.aist:
            np_dance = pose_seq[0].data.cpu().numpy()
            if args.rotmat:
                root = np_dance[:, :3]
                rotmat = np_dance[:, 3:].reshape([-1, 3, 3])
                rotmat = get_closest_rotmat(rotmat)
                smpl_poses = rotmat2aa(rotmat).reshape(-1, 24, 3)
                np_dance = smpl.forward(
                    global_orient=torch.from_numpy(smpl_poses[:, 0:1]).float(),
                    body_pose=torch.from_numpy(smpl_poses[:, 1:]).float(),
                    transl=torch.from_numpy(root).float(),
                ).joints.detach().numpy()[:, 0:24, :]
            else:
                root = np_dance[:, :3]
                np_dance = np_dance + np.tile(root, (1, 24))
                np_dance[:, :3] = root
                np_dance = np_dance.reshape(np_dance.shape[0], -1, 3)
            print (np_dance.shape)
            # save
            save_path = os.path.join(args.json_dir, dance_names[i]+f"_{random_id:04d}")
            np.save(save_path, np_dance)
            # visualize
            for frame in np_dance:
                pts = vedo.Points(frame, r=20)
                vedo.show(pts, interactive=False)
                time.sleep(0.1)
            exit()

    if args.aist:
        pass

    else:
        # Visualize generated dance poses
        np_dances = []
        for i in range(len(results)):
            np_dance = results[i][0].data.cpu().numpy()
            root = np_dance[:, 2*8:2*9]
            np_dance = np_dance + np.tile(root, (1, 25))
            np_dance[:, 2*8:2*9] = root
            np_dances.append(np_dance)
        write2json(np_dances, dance_names, args)
        visualize(args)
Esempio n. 11
0
    pos, path = getpos(data_idx)
    plt.clear()

    plt.add(vedo.Points(pos, c='b'))
    plt.add(vedo.Text2D(path, pos=(.02, .02), c='k'))

    plt.add(boundary_mesh)
    plt.render()


plt = vedo.Plotter(interactive=False)

pos, path = getpos(data_idx)

pts = vedo.Points(pos, c='b')
plt.keyPressFunction = keyfunc

data_info = vedo.Text2D(path, pos=(.02, .02), c='k')

verts = [(-1.5, 0, -1.5), (-1.5, 0, 1.5), (1.5, 0, 1.5), (1.5, 0, -1.5),
         (-1.5, 5, -1.5), (-1.5, 5, 1.5), (1.5, 5, 1.5), (1.5, 5, -1.5)]
# faces = [(3,2,1,0),(0,1,5,4),(4,5,6,7),(2,3,7,6),(1,2,6,5),(4,7,3,0)]
faces = [(3, 2, 1, 0), (0, 1, 5, 4), (4, 7, 3, 0)]

boundary_mesh = vedo.Mesh([verts, faces]).lineColor('black').lineWidth(1)

plt += boundary_mesh
plt += pts
plt += data_info
Esempio n. 12
0
def show_stl_pts(stl_path, pts_path):
    stl_model = vedo.load(stl_path).c(("magenta"))
    pts_point = get_gum_line_pts(pts_path)
    point = vedo.Points(pts_point.reshape(-1, 3)).pointSize(10).c(("green"))
    vedo.show(stl_model, point)
Esempio n. 13
0
m = pymeshlab.Mesh(vertex_matrix=pts)

ms = pymeshlab.MeshSet()
ms.add_mesh(m)

p = pymeshlab.Percentage(2)
ms.surface_reconstruction_ball_pivoting(ballradius=p)
# ms.compute_normals_for_point_sets()

mlab_mesh = ms.current_mesh()
reco_mesh = vedo.Mesh(mlab_mesh).computeNormals().flat().backColor('t')

vedo.show(
    __doc__,
    vedo.Points(pts),
    reco_mesh,
    axes=True,
    bg2='blue9',
    title="vedo + pymeshlab",
)

################################################################################
# Full list of filters, https://pymeshlab.readthedocs.io/en/latest/filter_list.html
#
# MeshLab offers plenty of useful filters, among which:
#
# ambient_occlusion
# compute_curvature_principal_directions
# colorize_by_geodesic_distance_from_a_given_point
# colorize_by_border_distance
Esempio n. 14
0
def viz_param_manifold(filename, size):
    data = np.load(filename)

    vline = vp.Tube(data['boundary_hns'], r=0.08)
    vline.color('g')

    # HNS manifold
    vmesh_hns = vp.Mesh([data['verts_hns'], data['triangs_hns']])
    k = 3
    prior = (2 * np.pi)**(-k / 2) * (np.exp(
        -0.5 * np.sum(vmesh_hns.points()**2, axis=1)))
    vmesh_hns.pointColors(prior, cmap='Reds', vmin=0)
    vmesh_hns.addScalarBar(horizontal=True,
                           nlabels=6,
                           c='k',
                           pos=(0.74, 0.01),
                           titleFontSize=44)
    vmesh_hns.scalarbar.SetLabelFormat("%.2g")
    vmesh_hns.scalarbar.SetBarRatio(1.0)

    # Inverted HNS manifold
    vmesh_hnsi = vp.Mesh([data['verts_hnsi'], data['triangs_hnsi']])
    # vmesh_hnsi.color([0.68, 0.68, 0.68])
    vmesh_hnsi.color([0.9, 0.9, 0.9]).alpha(0.0)

    # Invisible points to set the extent
    vpoints = vp.Points([(-5.01, -5.01, -5.01), (5.01, 5.01, 5.01)]).alpha(0.0)

    vplotter = vp.Plotter(offscreen=True,
                          size=size,
                          axes=dict(xyGrid=True,
                                    yzGrid=True,
                                    zxGrid=True,
                                    xTitleSize=0,
                                    yTitleSize=0,
                                    zTitleSize=0,
                                    xHighlightZero=True,
                                    yHighlightZero=True,
                                    zHighlightZero=True,
                                    xHighlightZeroColor='b',
                                    yHighlightZeroColor='b',
                                    zHighlightZeroColor='b',
                                    numberOfDivisions=10,
                                    axesLineWidth=5,
                                    tipSize=0.02,
                                    gridLineWidth=2,
                                    xLabelSize=0.05,
                                    yLabelSize=0.05,
                                    zLabelSize=0.05,
                                    xLabelOffset=0.05,
                                    yLabelOffset=0.05,
                                    zLabelOffset=0.0,
                                    zTitleRotation=225))
    vlabels = [
        vp.Text2D("H", (0.09 * size[0], 0.10 * size[1]), s=3, font='Arial'),
        vp.Text2D("N", (0.87 * size[0], 0.16 * size[1]), s=3, font='Arial'),
        vp.Text2D("S", (0.49 * size[0], 0.90 * size[1]), s=3, font='Arial')
    ]

    k = 2
    vecs = np.array([[[0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0],
                      [0, 0, 0, 0, 0, 0]],
                     [[k, -k, 0, 0, 0, 0], [0, 0, k, -k, 0, 0],
                      [0, 0, 0, 0, k, -k]]])
    varrows = vp.Arrows(vecs[0].T, vecs[1].T, s=1.2, c='k')

    vp.show([vline, vmesh_hns, vmesh_hnsi, vpoints, varrows] + vlabels,
            camera=dict(pos=(16, 13, 20),
                        focalPoint=(0, 0, 1.5),
                        viewup=(0, 0, 1)))

    img = vp.screenshot(None, scale=1, returnNumpy=True)
    vp.clear()
    vp.closePlotter()

    return img
Esempio n. 15
0
def make_video(sid, rid, video_file):
    regpos, w, obsmask, surfaces, contacts = read_structural_data(sid, rid)
    vlines, vmeshes, vcontacts = viz_structure(regpos, w, surfaces, contacts)

    # Load results
    res = io.parse_csv([
        f"run/solo/INC/vep/id{sid:03d}/output/r{rid:02d}_all/chain_{chain}.csv"
        for chain in [1, 2]
    ])
    tinf = res['t']

    t = 0.0
    psz = np.mean(tinf < t, axis=0)
    nreg = regpos.shape[0]

    # Regions
    cmap = 'bwr'
    vpoints = []
    for i in range(nreg):
        if not obsmask[i]:
            vpoints.append(
                vp.Sphere(regpos[i], r=4, c=vp.colorMap(psz[i], cmap, 0, 1)))
        else:
            vpoints.append(
                vp.Cube(regpos[i], side=6, c=vp.colorMap(psz[i], cmap, 0, 1)))

    vbar = vp.Points(regpos, r=0.01).pointColors(psz,
                                                 cmap=cmap,
                                                 vmin=0,
                                                 vmax=1)
    vbar.addScalarBar(horizontal=True, pos=(0.8, 0.02))
    vtext = vp.Text2D(f"t = {t:4.1f} s", pos=0, s=2, c='black')

    center = np.mean(regpos, axis=0)
    dist = 2.5 * (np.max(regpos[:, 1]) - np.min(regpos[:, 1]))

    # Video -------------------------------------------------------
    vplotter = vp.Plotter(axes=0,
                          interactive=0,
                          offscreen=True,
                          size=(1800, 1800))
    nframes = 3000

    vplotter += vpoints
    vplotter += vlines
    vplotter += vmeshes

    video = vp.Video(name=video_file, duration=90)
    ratios = np.array([30, 3, 5, 30, 5, 3, 30, 10])
    frames = (nframes * ratios / np.sum(ratios)).astype(int)

    # Run and pause
    animate(vplotter,
            video,
            frames[0],
            vpoints,
            tinf,
            pos=center + dist * np.r_[0, 0, 1],
            foc=center,
            viewup=(0, 1, 1),
            prange=(0, 45),
            time=lambda p: p)
    animate(vplotter,
            video,
            frames[1],
            vpoints,
            tinf,
            pos=center + dist * np.r_[0, 0, 1],
            foc=center,
            viewup=(0, 1, 1),
            time=45.)

    ## Fly around
    pos = lambda angle: center + dist * np.array(
        [0, -np.sin(angle), np.cos(angle)])
    animate(vplotter,
            video,
            frames[2],
            vpoints,
            tinf,
            pos=pos,
            foc=center,
            viewup=(0, 1, 1),
            prange=(0, np.pi / 2),
            time=45.,
            endpoint=False)

    pos = lambda angle: center + dist * np.array(
        [-np.sin(angle), -np.cos(angle), 0])
    animate(vplotter,
            video,
            frames[3],
            vpoints,
            tinf,
            pos=pos,
            foc=center,
            viewup=(0, 0, 1),
            prange=(0, 2 * np.pi),
            time=45.)

    pos = lambda angle: center + dist * np.array(
        [0, -np.sin(angle), np.cos(angle)])
    animate(vplotter,
            video,
            frames[4],
            vpoints,
            tinf,
            pos=pos,
            foc=center,
            viewup=(0, 1, 1),
            prange=(np.pi / 2, 0),
            time=45.,
            startpoint=False)

    # Pause + run + pause
    animate(vplotter,
            video,
            frames[5],
            vpoints,
            tinf,
            pos=center + dist * np.r_[0, 0, 1],
            foc=center,
            viewup=(0, 1, 1),
            time=45.)
    animate(vplotter,
            video,
            frames[6],
            vpoints,
            tinf,
            pos=center + dist * np.r_[0, 0, 1],
            foc=center,
            viewup=(0, 1, 1),
            prange=(45, 90),
            time=lambda p: p)
    animate(vplotter,
            video,
            frames[7],
            vpoints,
            tinf,
            pos=center + dist * np.r_[0, 0, 1],
            foc=center,
            viewup=(0, 1, 1),
            time=90.)

    video.close()
Esempio n. 16
0
def viz_observation_manifold(t3, tlim, size):
    tmin = 0
    tmax = 2 * tlim

    # tlim line
    vline1 = vp.Tube([[tmin, tlim, t3], [tmax, tlim, t3]], r=2.0)
    vline1.color('g')

    # t = 0 line
    vline2 = vp.Tube([[tmin, tlim, t3], [tmin, tmax, t3]], r=2.0)
    vline2.color((1, 1, 1))

    # Manifold
    verts = [[tmin, tlim, t3], [tmax, tlim, t3], [tmin, tmax, t3],
             [tmax, tmax, t3]]
    triangs = [[0, 1, 3], [0, 3, 2]]
    vmesh1 = vp.Mesh([verts, triangs])
    vmesh1.color((1, 1, 1))

    # Inverse manifold
    verts = [[tmin, tmin, t3], [tmax, tmin, t3], [tmin, tlim, t3],
             [tmax, tlim, t3]]
    triangs = [[0, 1, 3], [0, 3, 2]]
    vmesh2 = vp.Mesh([verts, triangs])
    vmesh2.color((0.9, 0.9, 0.9)).alpha(0.0)

    # Invisible points to set the extent
    vpoints = vp.Points([(tmin - 0.1, tmin - 0.1, tmin - 0.1),
                         (1.01 * tmax, 1.01 * tmax, 1.01 * tmax)]).alpha(0.0)

    lpos = [(p, str(p)) for p in [0, 50, 100, 150]]

    vplotter = vp.Plotter(offscreen=True,
                          size=size,
                          axes=dict(xyGrid=True,
                                    yzGrid=True,
                                    zxGrid=True,
                                    xTitleSize=0,
                                    yTitleSize=0,
                                    zTitleSize=0,
                                    xPositionsAndLabels=lpos,
                                    yPositionsAndLabels=lpos,
                                    zPositionsAndLabels=lpos[1:],
                                    axesLineWidth=5,
                                    tipSize=0.02,
                                    gridLineWidth=2,
                                    xLabelSize=0.05,
                                    yLabelSize=0.05,
                                    zLabelSize=0.05,
                                    xLabelOffset=0.05,
                                    yLabelOffset=0.05,
                                    zLabelOffset=0.0,
                                    zTitleRotation=225))
    vlabels = [
        vp.Text2D("H", (0.09 * size[0], 0.10 * size[1]), s=3, font='Arial'),
        vp.Text2D("N", (0.87 * size[0], 0.16 * size[1]), s=3, font='Arial'),
        vp.Text2D("S", (0.49 * size[0], 0.90 * size[1]), s=3, font='Arial')
    ]

    vp.show([vline1, vline2, vmesh1, vpoints] + vlabels,
            camera=dict(pos=(378, 324, 450),
                        focalPoint=(tlim, tlim, tlim + 27),
                        viewup=(0, 0, 1)))

    img = vp.screenshot(None, scale=1, returnNumpy=True)
    vp.clear()
    vp.closePlotter()

    return img
Esempio n. 17
0
def main(_):
    # Parsing data info.
    aist_dataset = AISTDataset(FLAGS.anno_dir)
    video_path = os.path.join(FLAGS.video_dir, f'{FLAGS.video_name}.mp4')
    seq_name, view = AISTDataset.get_seq_name(FLAGS.video_name)
    view_idx = AISTDataset.VIEWS.index(view)

    # Parsing keypoints.
    if FLAGS.mode == '2D':  # raw keypoints detection results.
        keypoints2d, _, _ = AISTDataset.load_keypoint2d(
            aist_dataset.keypoint2d_dir, seq_name)
        keypoints2d = keypoints2d[view_idx, :, :, 0:2]

    elif FLAGS.mode == '3D':  # 3D keypoints with temporal optimization.
        keypoints3d = AISTDataset.load_keypoint3d(aist_dataset.keypoint3d_dir,
                                                  seq_name,
                                                  use_optim=True)
        nframes, njoints, _ = keypoints3d.shape
        env_name = aist_dataset.mapping_seq2env[seq_name]
        cgroup = AISTDataset.load_camera_group(aist_dataset.camera_dir,
                                               env_name)
        keypoints2d = cgroup.project(keypoints3d)
        keypoints2d = keypoints2d.reshape(9, nframes, njoints, 2)[view_idx]

    elif FLAGS.mode == 'SMPL':  # SMPL joints
        smpl_poses, smpl_scaling, smpl_trans = AISTDataset.load_motion(
            aist_dataset.motion_dir, seq_name)
        smpl = SMPL(model_path=FLAGS.smpl_dir, gender='MALE', batch_size=1)
        keypoints3d = smpl.forward(
            global_orient=torch.from_numpy(smpl_poses[:, 0:1]).float(),
            body_pose=torch.from_numpy(smpl_poses[:, 1:]).float(),
            transl=torch.from_numpy(smpl_trans).float(),
            scaling=torch.from_numpy(smpl_scaling.reshape(1, 1)).float(),
        ).joints.detach().numpy()

        nframes, njoints, _ = keypoints3d.shape
        env_name = aist_dataset.mapping_seq2env[seq_name]
        cgroup = AISTDataset.load_camera_group(aist_dataset.camera_dir,
                                               env_name)
        keypoints2d = cgroup.project(keypoints3d)
        keypoints2d = keypoints2d.reshape(9, nframes, njoints, 2)[view_idx]

    elif FLAGS.mode == 'SMPLMesh':  # SMPL Mesh
        import trimesh  # install by `pip install trimesh`
        import vedo  # install by `pip install vedo`
        smpl_poses, smpl_scaling, smpl_trans = AISTDataset.load_motion(
            aist_dataset.motion_dir, seq_name)
        smpl = SMPL(model_path=FLAGS.smpl_dir, gender='MALE', batch_size=1)
        vertices = smpl.forward(
            global_orient=torch.from_numpy(smpl_poses[:, 0:1]).float(),
            body_pose=torch.from_numpy(smpl_poses[:, 1:]).float(),
            transl=torch.from_numpy(smpl_trans).float(),
            scaling=torch.from_numpy(smpl_scaling.reshape(1, 1)).float(),
        ).vertices.detach().numpy()[0]  # first frame
        faces = smpl.faces
        mesh = trimesh.Trimesh(vertices, faces)
        mesh.visual.face_colors = [200, 200, 250, 100]

        keypoints3d = AISTDataset.load_keypoint3d(aist_dataset.keypoint3d_dir,
                                                  seq_name,
                                                  use_optim=True)
        pts = vedo.Points(keypoints3d[0], r=20)  # first frame

        vedo.show(mesh, pts, interactive=True)
        exit()

    # Visualize.
    os.makedirs(FLAGS.save_dir, exist_ok=True)
    save_path = os.path.join(FLAGS.save_dir, f'{FLAGS.video_name}.mp4')
    plot_on_video(keypoints2d, video_path, save_path, fps=60)