Exemple #1
0
def _simple_renderer(rn, meshes, yrot=0, texture=None, use_light=False):
    mesh = meshes[0]
    if texture is not None:
        if not hasattr(mesh, 'ft'):
            mesh.ft = _copy(mesh.f)
            vt = _copy(mesh.v[:, :2])
            vt -= _np.min(vt, axis=0).reshape((1, -1))
            vt /= _np.max(vt, axis=0).reshape((1, -1))
            mesh.vt = vt
        mesh.texture_filepath = rn.texture_image

    # Set camera parameters
    if texture is not None:
        rn.set(v=mesh.v,
               f=mesh.f,
               vc=mesh.vc,
               ft=mesh.ft,
               vt=mesh.vt,
               bgcolor=_np.ones(3))
    else:
        rn.set(v=mesh.v, f=mesh.f, vc=mesh.vc, bgcolor=_np.ones(3))

    for next_mesh in meshes[1:]:
        _stack_with(rn, next_mesh, texture)

    # Construct light.
    if use_light:
        albedo = rn.vc
        rn.vc = _odr_l.LambertianPointLight(f=rn.f,
                                            v=rn.v,
                                            num_verts=len(rn.v),
                                            light_pos=_rotateY(
                                                _np.array([-200, -100, -100]),
                                                yrot),
                                            vc=albedo,
                                            light_color=_np.array([1, 1, 1]))
        # Construct Left Light
        rn.vc += _odr_l.LambertianPointLight(f=rn.f,
                                             v=rn.v,
                                             num_verts=len(rn.v),
                                             light_pos=_rotateY(
                                                 _np.array([800, 10, 300]),
                                                 yrot),
                                             vc=albedo,
                                             light_color=_np.array([1, 1, 1]))

        # Construct Right Light
        rn.vc += _odr_l.LambertianPointLight(
            f=rn.f,
            v=rn.v,
            num_verts=len(rn.v),
            light_pos=_rotateY(_np.array([-500, 500, 1000]), yrot),
            vc=albedo,
            light_color=_np.array([.7, .7, .7]))
    return rn.r
def simple_renderer(rn, meshes, yrot=0):
    """Create a renderer, optionally with texture."""
    mesh = meshes[0]
    if hasattr(rn, 'texture_image'):
        if not hasattr(mesh, 'ft'):
            mesh.ft = _copy(mesh.f)
            vt = _copy(mesh.v[:, :2])
            vt -= _np.min(vt, axis=0).reshape((1, -1))
            vt /= _np.max(vt, axis=0).reshape((1, -1))
            mesh.vt = vt
        mesh.texture_filepath = rn.texture_image
        rn.set(v=mesh.v,
               f=mesh.f,
               vc=mesh.vc,
               ft=mesh.ft,
               vt=mesh.vt,
               bgcolor=_np.ones(3))
    else:
        rn.set(v=mesh.v, f=mesh.f, vc=mesh.vc, bgcolor=_np.ones(3))

    for next_mesh in meshes[1:]:
        _stack_with(rn, next_mesh)  # pylint: disable=undefined-variable

    albedo = rn.vc

    # Construct Back Light (on back right corner)
    rn.vc = _odr_l.LambertianPointLight(f=rn.f,
                                        v=rn.v,
                                        num_verts=len(rn.v),
                                        light_pos=_rotateY(
                                            _np.array([-200, -100, -100]),
                                            yrot),
                                        vc=albedo,
                                        light_color=_np.array([1, 1, 1]))

    # Construct Left Light
    rn.vc += _odr_l.LambertianPointLight(f=rn.f,
                                         v=rn.v,
                                         num_verts=len(rn.v),
                                         light_pos=_rotateY(
                                             _np.array([800, 10, 300]), yrot),
                                         vc=albedo,
                                         light_color=_np.array([1, 1, 1]))

    # Construct Right Light
    rn.vc += _odr_l.LambertianPointLight(f=rn.f,
                                         v=rn.v,
                                         num_verts=len(rn.v),
                                         light_pos=_rotateY(
                                             _np.array([-500, 500, 1000]),
                                             yrot),
                                         vc=albedo,
                                         light_color=_np.array([.7, .7, .7]))
    return rn.r
Exemple #3
0
def render(model,
           resolution,
           cam,
           steps,
           center=(0, 0),
           segmented=False,
           use_light=False,
           path_to_mesh=None):  # pylint: disable=too-many-arguments
    """Render a sequence of views from a fitted body model."""
    assert steps >= 1
    if segmented:
        texture = _os.path.join(_os.path.dirname(__file__), '..', 'models',
                                '3D', 'mask_filled.png')
    else:
        texture = _os.path.join(_os.path.dirname(__file__), '..', 'models',
                                '3D', 'mask_filled_uniform.png')
    if path_to_mesh is None:
        mesh = _copy(_TEMPLATE_MESH)
    else:
        mesh = _copy(_Mesh(path_to_mesh))

    # render ply
    model.betas[:len(cam['betas'])] = cam['betas']
    model.pose[:] = cam['pose']
    model.trans[:] = cam['trans']

    mesh.v = model.r
    w, h = resolution[0], resolution[1]
    dist = _np.abs(cam['t'][2] - _np.mean(mesh.v, axis=0)[2])
    rn = _create_renderer(
        w=w,
        h=h,
        near=1.,
        far=20. + dist,
        rt=_np.array(cam['rt']),
        t=_np.array(cam['t']),
        f=_np.array([cam['f'], cam['f']]),
        # c=_np.array(cam['cam_c']),
        texture=texture)
    light_yrot = _np.radians(120)
    baked_mesh = bake_vertex_colors(mesh)
    base_mesh = _copy(baked_mesh)
    mesh.f = base_mesh.f
    mesh.vc = base_mesh.vc
    renderings = []
    for angle in _np.linspace(0., 2. * (1. - 1. / steps) * _np.pi, steps):
        mesh.v = _rotateY(base_mesh.v, angle)
        imtmp = _simple_renderer(rn=rn,
                                 meshes=[mesh],
                                 yrot=light_yrot,
                                 texture=texture,
                                 use_light=use_light)
        im = _np.zeros(h * w * 3).reshape(((h, w, 3)))
        im[:h, :w, :] = imtmp * 255.
        renderings.append(im)
    return renderings
Exemple #4
0
def render(model, resolution, cam, steps, center=(0,0), segmented=False, use_light=False, path_to_mesh=None, color_type="segment"):  # pylint: disable=too-many-arguments
    """Render a sequence of views from a fitted body model."""
    assert steps >= 1
    if segmented:
        texture = _os.path.join(_os.path.dirname(__file__),
                                '..', 'models', '3D', 'mask_filled.png')
    else:
        texture = _os.path.join(_os.path.dirname(__file__),
                                '..', 'models', '3D', 'mask_filled_uniform.png')
    if path_to_mesh is None:
        mesh = _copy(_TEMPLATE_MESH)
    else:
        mesh = _copy(_Mesh(path_to_mesh))

    # render ply
    model.betas[:len(cam['betas'])] = cam['betas']
    model.pose[:] = cam['pose']
    model.trans[:] = cam['trans']

    mesh.v = model.r
    w, h = resolution[0], resolution[1]
    dist = _np.abs(cam['t'][2] - _np.mean(mesh.v, axis=0)[2])
    rn = _create_renderer(w=w,
                          h=h,
                          near=1.,
                          far=20.+dist,
                          rt=_np.array(cam['rt']),
                          t=_np.array(cam['t']),
                          f=_np.array([cam['f'], cam['f']]),
                          # c=_np.array(cam['cam_c']),
                          texture=None,
                          depth=(color_type == "depth"))
    light_yrot = _np.radians(120)
    meshes = []
    if color_type == "segment" or color_type == "depth":
        baked_mesh = bake_vertex_colors(mesh)
        base_mesh = _copy(baked_mesh)
        mesh.f = base_mesh.f
        mesh.vc = base_mesh.vc
        meshes = [_copy(mesh)]
    elif color_type == "weight":
        base_mesh = _copy(mesh)
        npweights = _np.array(model.weights)
        print(npweights)
        for i in range(24):
            nv = len(mesh.v)
            vc = _np.zeros_like(mesh.v)
            for iv, v in enumerate(mesh.v):
                v = mesh.v[iv]
                warray = npweights[iv]
                vc[iv] = (warray[i], 0, 0)
            mesh.vc = vc
            meshes.append(_copy(mesh))
    else:
        _LOGGER.warn("this color type is not yet supported")

    renderings = []
    for mesh in meshes:
        for angle in _np.linspace(0., 2. * (1. - 1. / steps) * _np.pi, steps):
            mesh.v = _rotateY(base_mesh.v, angle)
            imtmp = _simple_renderer(rn=rn,
                                     meshes=[mesh],
                                     yrot=light_yrot,
                                     texture=None,
                                     use_light=use_light,
                                     depth=(color_type == "depth"))
            if color_type == "depth":
                import scipy.misc as sm
                sm.imsave("/tmp/depth.png", imtmp)
                _np.set_printoptions(threshold=40)
                im = _np.zeros(h*w).reshape(((h, w)))
                im[:h, :w] = imtmp
            else:
                im = _np.zeros(h*w*3).reshape(((h, w, 3)))
                im[:h, :w, :] = imtmp*255.
            renderings.append(im)
    return renderings
def run_single_fit(
        img,  # pylint: disable=too-many-statements, too-many-locals
        j2d,
        scale,
        do_degrees=None):
    """Run the fit for one specific image."""
    global _DEPTH_EST, _SHAPE_EST, _ROT_EST, _POSE_EST  # pylint: disable=global-statement
    assert j2d.shape[0] == 3
    assert j2d.shape[1] == 91
    conf = j2d[2, :].copy().reshape((-1, ))
    j2d = j2d[:2, :].copy()
    j2d_norm = j2d * scale
    # Center the data.
    mean = _np.mean(j2d_norm, axis=1)
    j2d_norm = (j2d_norm.T - mean + 513. / 2.).T
    _LOGGER.debug("Running fit...")
    if do_degrees is None:
        do_degrees = []
    # Prepare the estimators if necessary.
    if _DEPTH_EST is None:
        _DEPTH_EST = [
            None,
            _pymp.shared.array(j2d.shape, dtype='float32'),
            _pymp.shared.array((3, ), dtype='float32'),
            _pymp.shared.queue(),
            _pymp.shared.lock(),
            _pymp.shared.queue()
        ]
        _DEPTH_EST[0] = _multiprocessing.Process(target=_depth_estimator,
                                                 args=tuple(_DEPTH_EST[1:]))
        _DEPTH_EST[0].start()
        _DEPTH_EST[5].get()
    if _ROT_EST is None:
        _ROT_EST = [
            None,
            _pymp.shared.array(j2d.shape, dtype='float32'),
            _pymp.shared.array((3, ), dtype='float32'),
            _pymp.shared.queue(),
            _pymp.shared.lock(),
            _pymp.shared.queue()
        ]
        _ROT_EST[0] = _multiprocessing.Process(target=_rot_estimator,
                                               args=tuple(_ROT_EST[1:]))
        _ROT_EST[0].start()
        _ROT_EST[5].get()
    if _SHAPE_EST is None:
        _SHAPE_EST = [
            None,
            _pymp.shared.array(j2d.shape, dtype='float32'),
            _pymp.shared.array((10, ), dtype='float32'),
            _pymp.shared.queue(),
            _pymp.shared.lock(),
            _pymp.shared.queue()
        ]
        _SHAPE_EST[0] = _multiprocessing.Process(target=_shape_estimator,
                                                 args=tuple(_SHAPE_EST[1:]))
        _SHAPE_EST[0].start()
        _SHAPE_EST[5].get()
    if _POSE_EST is None:
        _POSE_EST = [
            None,
            _pymp.shared.array(j2d.shape, dtype='float32'),
            _pymp.shared.array((69, ), dtype='float32'),
            _pymp.shared.queue(),
            _pymp.shared.lock(),
            _pymp.shared.queue()
        ]
        _POSE_EST[0] = _multiprocessing.Process(target=_pose_estimator,
                                                args=tuple(_POSE_EST[1:]))
        _POSE_EST[0].start()
        _POSE_EST[5].get()
    # Copy the data to the processes.
    with _POSE_EST[4]:
        _POSE_EST[1][...] = j2d_norm
    with _SHAPE_EST[4]:
        _SHAPE_EST[1][...] = j2d_norm
    with _ROT_EST[4]:
        _ROT_EST[1][...] = j2d_norm
    with _DEPTH_EST[4]:
        _DEPTH_EST[1][...] = j2d_norm
    # Run it.
    before_fit = _time()
    _POSE_EST[3].put('go')
    _ROT_EST[3].put('go')
    _SHAPE_EST[3].put('go')
    _DEPTH_EST[3].put('go')
    _LOGGER.info("Running...")
    _DEPTH_EST[5].get()
    _POSE_EST[5].get()
    _SHAPE_EST[5].get()
    _ROT_EST[5].get()
    _LOGGER.info("Prediction available in %ss.", str(_time() - before_fit))
    # Extract the results.
    pose = _np.zeros((72, ), dtype='float32')
    betas = _np.zeros((10, ), dtype='float32')
    trans = _np.zeros((3, ), dtype='float32')
    with _POSE_EST[4]:
        pose[3:] = _POSE_EST[2]
    with _SHAPE_EST[4]:
        betas[:] = _SHAPE_EST[2]
    with _ROT_EST[4]:
        pose[:3] = _ROT_EST[2]
    with _DEPTH_EST[4]:
        trans[:] = _DEPTH_EST[2]
    trans[2] *= scale
    # Get the projected landmark locations from the model.
    param_dict = {
        't': [0, 0, 0],
        'rt': [0, 0, 0],
        'f': _FLENGTH_GUESS,
        'pose': pose,
        'trans': trans,
        'betas': betas
    }
    # Optimize depth and global rotation.
    opt_globrot, opt_trans, dmin, dmax = _fit_rot_trans(
        _MODEL_NEUTRAL, j2d, [img.shape[1] // 2, img.shape[0] // 2], trans,
        pose, conf, _FLENGTH_GUESS)
    pose[:3] = opt_globrot
    trans[:] = opt_trans
    """
    proj_landmark_positions = get_landmark_positions(param_dict,
                                                     (513, 513),
                                                     _LANDMARK_MAPPING)
    # Get the right offset to match the original.
    offset = _np.mean(j2d, axis=1) - _np.mean(proj_landmark_positions, axis=1)
    """
    # Render the optimized mesh.
    _LOGGER.info("Rendering...")
    mesh = _copy(_TEMPLATE_MESH)
    model = _MODEL_NEUTRAL
    model.betas[:len(betas)] = betas
    # Get the full rendered mesh.
    model.pose[:] = pose
    model.trans[:] = trans
    mesh.v = model.r
    mesh.vc = [.7, .7, .9]
    base_mesh_v = mesh.v.copy()
    images = []
    for deg in do_degrees:
        mesh.v = _rotateY(base_mesh_v.copy(), deg)
        rn = create_renderer(w=img.shape[1],
                             h=img.shape[0],
                             near=dmin - 1.,
                             far=dmax + 1.,
                             rt=[0., 0., 0.],
                             t=[0., 0., 0.],
                             f=[_FLENGTH_GUESS, _FLENGTH_GUESS],
                             c=[img.shape[1] // 2,
                                img.shape[0] // 2])  # + offset[1]])
        light_yrot = _np.radians(120)
        im = (simple_renderer(rn=rn, meshes=[mesh], yrot=light_yrot) *
              255.).astype('uint8')
        images.append(im)
    #param_dict['j2d'] = (proj_landmark_positions.T + offset).T
    _LOGGER.info("Estimation done.")
    return param_dict, images
def _simple_renderer(rn, meshes, yrot=0, texture=None, use_light=False):
    mesh = meshes[0]
    if texture is not None:
        if not hasattr(mesh, 'ft'):
            mesh.ft = _copy(mesh.f)
            vt = _copy(mesh.v[:, :2])
            vt -= _np.min(vt, axis=0).reshape((1, -1))
            vt /= _np.max(vt, axis=0).reshape((1, -1))
            mesh.vt = vt
        mesh.texture_filepath = rn.texture_image

    # Set camera parameters
    if texture is not None:
        rn.set(v=mesh.v,
               f=mesh.f,
               vc=mesh.vc,
               ft=mesh.ft,
               vt=mesh.vt,
               bgcolor=_np.ones(3))
    else:
        rn.set(v=mesh.v, f=mesh.f, vc=mesh.vc, bgcolor=_np.ones(3))

    for next_mesh in meshes[1:]:
        _stack_with(rn, next_mesh, texture)

    # Construct light.
    if use_light:
        albedo = rn.vc
        rn.vc = _odr_l.LambertianPointLight(f=rn.f,
                                            v=rn.v,
                                            num_verts=len(rn.v),
                                            light_pos=_rotateY(
                                                _np.array([-200, -100, -100]),
                                                yrot),
                                            vc=albedo,
                                            light_color=_np.array([1, 1, 1]))
        # Construct Left Light
        rn.vc += _odr_l.LambertianPointLight(f=rn.f,
                                             v=rn.v,
                                             num_verts=len(rn.v),
                                             light_pos=_rotateY(
                                                 _np.array([800, 10, 300]),
                                                 yrot),
                                             vc=albedo,
                                             light_color=_np.array([1, 1, 1]))

        # Construct Right Light
        rn.vc += _odr_l.LambertianPointLight(
            f=rn.f,
            v=rn.v,
            num_verts=len(rn.v),
            light_pos=_rotateY(_np.array([-500, 500, 1000]), yrot),
            vc=albedo,
            light_color=_np.array([.7, .7, .7]))
    flipXRotation = np.array([[1.0, 0.0, 0.0, 0.0], [0.0, -1.0, 0., 0.0],
                              [0.0, 0., -1.0, 0.0], [0.0, 0.0, 0.0, 1.0]])
    rn.camera.openglMat = flipXRotation  # this is from setupcamera in utils
    rn.glMode = 'glfw'
    rn.sharedWin = None
    rn.overdraw = True
    rn.nsamples = 8
    rn.msaa = True  # Without anti-aliasing optimization often does not work.
    rn.initGL()
    rn.debug = False
    return rn.r