Esempio n. 1
0
 def on_step(_):
     """Create visualization."""
     # show optimized joints in 3D
     # pylint: disable=cell-var-from-loop
     mv.set_dynamic_meshes([_Mesh(v=sv.r, f=[]),
                            Sphere(center=cam.t.r,
                                   radius=.1).to_mesh()] \
             + [Sphere(center=jc, radius=.01).to_mesh(vc[ijc])
                for ijc, jc in enumerate(Jtr.r)])
     plt.figure(1, figsize=(10, 10))
     plt.subplot(1, 2, 1)
     # show optimized joints in 2D
     tmp_img = img.copy()
     for coord, target_coord in zip(
             _np.around(cam.r[smpl_ids]).astype(int),
             _np.around(j2d[cids]).astype(int)):
         if (coord[0] < tmp_img.shape[1] and coord[0] >= 0
                 and coord[1] < tmp_img.shape[0] and coord[1] >= 0):
             _cv2.circle(tmp_img, tuple(coord), 3, [0, 0, 255])
         if (target_coord[0] < tmp_img.shape[1]
                 and target_coord[0] >= 0
                 and target_coord[1] < tmp_img.shape[0]
                 and target_coord[1] >= 0):
             _cv2.circle(tmp_img, tuple(target_coord), 3,
                         [0, 255, 0])
     plt.imshow(tmp_img)
     plt.draw()
     plt.show()
Esempio n. 2
0
def render(model,
           resolution,
           cam,
           steps,
           center=(0, 0),
           segmented=False,
           use_light=False,
           path_to_mesh=None):  # pylint: disable=too-many-arguments
    """Render a sequence of views from a fitted body model."""
    assert steps >= 1
    if segmented:
        texture = _os.path.join(_os.path.dirname(__file__), '..', 'models',
                                '3D', 'mask_filled.png')
    else:
        texture = _os.path.join(_os.path.dirname(__file__), '..', 'models',
                                '3D', 'mask_filled_uniform.png')
    if path_to_mesh is None:
        mesh = _copy(_TEMPLATE_MESH)
    else:
        mesh = _copy(_Mesh(path_to_mesh))

    # render ply
    model.betas[:len(cam['betas'])] = cam['betas']
    model.pose[:] = cam['pose']
    model.trans[:] = cam['trans']

    mesh.v = model.r
    w, h = resolution[0], resolution[1]
    dist = _np.abs(cam['t'][2] - _np.mean(mesh.v, axis=0)[2])
    rn = _create_renderer(
        w=w,
        h=h,
        near=1.,
        far=20. + dist,
        rt=_np.array(cam['rt']),
        t=_np.array(cam['t']),
        f=_np.array([cam['f'], cam['f']]),
        # c=_np.array(cam['cam_c']),
        texture=texture)
    light_yrot = _np.radians(120)
    baked_mesh = bake_vertex_colors(mesh)
    base_mesh = _copy(baked_mesh)
    mesh.f = base_mesh.f
    mesh.vc = base_mesh.vc
    renderings = []
    for angle in _np.linspace(0., 2. * (1. - 1. / steps) * _np.pi, steps):
        mesh.v = _rotateY(base_mesh.v, angle)
        imtmp = _simple_renderer(rn=rn,
                                 meshes=[mesh],
                                 yrot=light_yrot,
                                 texture=texture,
                                 use_light=use_light)
        im = _np.zeros(h * w * 3).reshape(((h, w, 3)))
        im[:h, :w, :] = imtmp * 255.
        renderings.append(im)
    return renderings
Esempio n. 3
0
def run_single_fit(img,
                   j2d,
                   conf,
                   inner_penetration=False,
                   silh=None,
                   scale_factor=1,
                   gender='neutral',
                   exp_logistic=False,
                   viz=False,
                   do_degrees=None,
                   is_gt_data=False):
    """Run the fit for one specific image."""
    model = _MODEL_NEUTRAL
    if silh is not None:
        if silh.ndim == 3:
            silh = _cv2.split(silh)[0]
        silh = _np.uint8(silh > 0)

    if do_degrees is None:
        do_degrees = []

    # create the pose prior (GMM over CMU)
    prior = _MaxMixtureCompletePrior(n_gaussians=8).get_gmm_prior()
    # get the mean pose as our initial pose
    init_pose = _np.hstack((_np.zeros(3), prior.weights.dot(prior.means)))

    if scale_factor != 1:
        img = _cv2.resize(
            img, (img.shape[1] * scale_factor, img.shape[0] * scale_factor))
        j2d[:, 0] *= scale_factor
        j2d[:, 1] *= scale_factor

    # get the center of the image (needed to estimate camera parms)
    center = _np.array([img.shape[1] / 2, img.shape[0] / 2])

    # estimate the camera parameters
    (cam, try_both_orient,
     body_orient) = initialize_camera(model,
                                      j2d,
                                      center,
                                      img,
                                      _T_GUESS,
                                      init_pose,
                                      conf,
                                      is_gt_data,
                                      flength=_FLENGTH_GUESS,
                                      viz=viz)

    # fit
    (sv, opt_j2d, t, rt) = optimize_on_joints(  # pylint: disable=unused-variable
        j2d,
        model,
        cam,
        img,
        prior,
        try_both_orient,
        body_orient,
        exp_logistic,
        n_betas=_N_BETAS,
        conf=conf,
        viz=viz,
        inner_penetration=inner_penetration,
        silh=silh)

    # get the optimized mesh
    m = _Mesh(v=sv.r, f=model.f)
    m.vc = [.7, .7, .9]

    dist = _np.abs(cam.t.r[2] - _np.mean(sv.r, axis=0)[2])  # pylint: disable=no-member
    h = img.shape[0]
    w = img.shape[1]
    rn = create_renderer(
        w=w,
        h=h,
        near=1.,
        far=20. + dist,
        rt=cam.rt,  # pylint: disable=no-member
        t=cam.t,  # pylint: disable=no-member
        f=cam.f,  # pylint: disable=no-member
        c=cam.c)  # pylint: disable=no-member
    light_yrot = _np.radians(120)
    images = []
    orig_v = sv.r
    for deg in do_degrees:
        aroundy = _cv2.Rodrigues(_np.array([0, _np.radians(deg), 0]))[0]
        center = orig_v.mean(axis=0)
        new_v = _np.dot((orig_v - center), aroundy)
        m.v = new_v + center
        # Now render.
        im = (simple_renderer(rn=rn, meshes=[m], yrot=light_yrot) *
              255.).astype('uint8')
        images.append(im)
    # save to disk
    result = {
        'j2d': opt_j2d,
        't': t,
        'rt': rt,
        'f': _FLENGTH_GUESS,
        'pose': sv.pose.r,
        'betas': sv.betas.r,
        'trans': sv.trans.r
    }
    return sv, result, images
Esempio n. 4
0
from config import SMPL_FP
sys.path.insert(0, SMPL_FP)
try:
    from smpl.serialization import load_model  # pylint: disable=import-error
except ImportError:
    try:
        from psbody.smpl.serialization import load_model  # pylint: disable=import-error
    except ImportError:
        from smpl_webuser.serialization import load_model

_LOGGER = _logging.getLogger(__name__)
MODEL_NEUTRAL_PATH = _os.path.join(
    _path.dirname(__file__), '..', 'models', '3D',
    'basicModel_neutral_lbs_10_207_0_v1.0.0.pkl')
MODEL_NEUTRAL = load_model(MODEL_NEUTRAL_PATH)
_TEMPLATE_MESH = _Mesh(filename=_os.path.join(_os.path.dirname(
    __file__), '..', 'models', '3D', 'template-bodyparts.ply'))


def _rodrigues_from_seq(angles_seq):
    """Create rodrigues representation of angles."""
    rot = _np.eye(3)
    for angle in angles_seq[::-1]:
        rot = rot.dot(_cv2.Rodrigues(angle)[0])
    return _cv2.Rodrigues(rot)[0].flatten()


def _create_renderer(  # pylint: disable=too-many-arguments
        w=640,
        h=480,
        rt=_np.zeros(3),
        t=_np.zeros(3),
Esempio n. 5
0
def render(model, resolution, cam, steps, center=(0,0), segmented=False, use_light=False, path_to_mesh=None, color_type="segment"):  # pylint: disable=too-many-arguments
    """Render a sequence of views from a fitted body model."""
    assert steps >= 1
    if segmented:
        texture = _os.path.join(_os.path.dirname(__file__),
                                '..', 'models', '3D', 'mask_filled.png')
    else:
        texture = _os.path.join(_os.path.dirname(__file__),
                                '..', 'models', '3D', 'mask_filled_uniform.png')
    if path_to_mesh is None:
        mesh = _copy(_TEMPLATE_MESH)
    else:
        mesh = _copy(_Mesh(path_to_mesh))

    # render ply
    model.betas[:len(cam['betas'])] = cam['betas']
    model.pose[:] = cam['pose']
    model.trans[:] = cam['trans']

    mesh.v = model.r
    w, h = resolution[0], resolution[1]
    dist = _np.abs(cam['t'][2] - _np.mean(mesh.v, axis=0)[2])
    rn = _create_renderer(w=w,
                          h=h,
                          near=1.,
                          far=20.+dist,
                          rt=_np.array(cam['rt']),
                          t=_np.array(cam['t']),
                          f=_np.array([cam['f'], cam['f']]),
                          # c=_np.array(cam['cam_c']),
                          texture=None,
                          depth=(color_type == "depth"))
    light_yrot = _np.radians(120)
    meshes = []
    if color_type == "segment" or color_type == "depth":
        baked_mesh = bake_vertex_colors(mesh)
        base_mesh = _copy(baked_mesh)
        mesh.f = base_mesh.f
        mesh.vc = base_mesh.vc
        meshes = [_copy(mesh)]
    elif color_type == "weight":
        base_mesh = _copy(mesh)
        npweights = _np.array(model.weights)
        print(npweights)
        for i in range(24):
            nv = len(mesh.v)
            vc = _np.zeros_like(mesh.v)
            for iv, v in enumerate(mesh.v):
                v = mesh.v[iv]
                warray = npweights[iv]
                vc[iv] = (warray[i], 0, 0)
            mesh.vc = vc
            meshes.append(_copy(mesh))
    else:
        _LOGGER.warn("this color type is not yet supported")

    renderings = []
    for mesh in meshes:
        for angle in _np.linspace(0., 2. * (1. - 1. / steps) * _np.pi, steps):
            mesh.v = _rotateY(base_mesh.v, angle)
            imtmp = _simple_renderer(rn=rn,
                                     meshes=[mesh],
                                     yrot=light_yrot,
                                     texture=None,
                                     use_light=use_light,
                                     depth=(color_type == "depth"))
            if color_type == "depth":
                import scipy.misc as sm
                sm.imsave("/tmp/depth.png", imtmp)
                _np.set_printoptions(threshold=40)
                im = _np.zeros(h*w).reshape(((h, w)))
                im[:h, :w] = imtmp
            else:
                im = _np.zeros(h*w*3).reshape(((h, w, 3)))
                im[:h, :w, :] = imtmp*255.
            renderings.append(im)
    return renderings