Пример #1
0
def _create_renderer_mesh(  # pylint: disable=too-many-arguments
        w=640,
        h=480,
        rt=np.zeros(3),
        t=np.zeros(3),
        f=None,
        c=None,
        k=None,
        near=1.,
        far=10.,
        texture=None):
    """Create a renderer for the specified parameters."""
    f = np.array([w, w]) / 2. if f is None else f
    c = np.array([w, h]) / 2. if c is None else c
    k = np.zeros(5) if k is None else k

    if texture is not None:
        rn = TexturedRenderer()
    else:
        rn = ColoredRenderer()

    rn.camera = ProjectPoints(rt=rt, t=t, f=f, c=c, k=k)
    rn.frustum = {'near': near, 'far': far, 'height': h, 'width': w}

    if texture is not None:
        rn.texture_image = np.asarray(texture, np.float64) / 255.
        rn.texture_image = rn.texture_image[:, :, ::-1]
        #print rn.texture_image.shape
    return rn
Пример #2
0
def renderBody(m):
    from opendr.camera import ProjectPoints
    from opendr.renderer import ColoredRenderer
    from opendr.lighting import LambertianPointLight
    # Create OpenDR renderer
    rn = ColoredRenderer()
    # Assign attributes to renderer
    w, h = (640, 480)
    rn.camera = ProjectPoints(v=m,
                              rt=np.zeros(3),
                              t=np.array([0, 0, 2.]),
                              f=np.array([w, w]) / 2.,
                              c=np.array([w, h]) / 2.,
                              k=np.zeros(5))
    rn.frustum = {'near': 1., 'far': 10., 'width': w, 'height': h}
    rn.set(v=m, f=m.f, bgcolor=np.zeros(3))
    # Construct point light source
    rn.vc = LambertianPointLight(f=m.f,
                                 v=rn.v,
                                 num_verts=len(m),
                                 light_pos=np.array([-1000, -1000, -2000]),
                                 vc=np.ones_like(m) * .9,
                                 light_color=np.array([1., 1., 1.]))
    plt.ion()
    plt.imshow(np.fliplr(rn.r))  # FLIPPED!
    plt.show()
    plt.xticks([])
    plt.yticks([])
Пример #3
0
def initialize_camera(fx, fy, cx, cy):
    """Initialize camera translation and body orientation
    :param model: SMPL model
    :param j2d: 14x2 array of CNN joints
    :param img: h x w x 3 image 
    :param init_pose: 72D vector of pose parameters used for initialization
    :param flength: camera focal length (kept fixed)
    :param pix_thsh: threshold (in pixel), if the distance between shoulder joints in 2D
                     is lower than pix_thsh, the body orientation as ambiguous (so a fit is run on both
                     the estimated one and its flip)
    :param viz: boolean, if True enables visualization during optimization
    :returns: a tuple containing the estimated camera,
              a boolean deciding if both the optimized body orientation and its flip should be considered,
              3D vector for the body orientation
    """

    rt = ch.zeros(3)

    t = ch.zeros(3)

    # check how close the shoulder joints are

    # initialize the camera
    cam = ProjectPoints(f=np.array([fx, fy]),
                        rt=rt,
                        t=t,
                        k=np.zeros(5),
                        c=[cx, cy])
    return cam
Пример #4
0
def main(mesh_list, out_list, scale=1.0, move_scale=True):
    assert len(mesh_list) == len(out_list)
    for mesh_file, out_file in zip(mesh_list, out_list):
        mesh = load_obj_data_binary(mesh_file)
        if move_scale:  # move to center and scale to unit bounding box
            mesh['v'] = (mesh['v'] - np.array([128, -192, 128]) +
                         0.5) * voxel_size

        if not ('vn' in mesh and mesh['vn'] is not None):
            mesh['vn'] = np.array(VertNormals(f=mesh['f'], v=mesh['v']))

        V = ch.array(mesh['v']) * scale
        V -= trans

        C = np.ones_like(mesh['v'])
        C *= np.array([186, 212, 255], dtype=np.float32) / 255.0
        # C *= np.array([158, 180, 216], dtype=np.float32) / 250.0
        C = np.minimum(C, 1.0)
        A = np.zeros_like(mesh['v'])
        A += LambertianPointLight(f=mesh['f'],
                                  v=V,
                                  vn=-mesh['vn'],
                                  num_verts=len(V),
                                  light_pos=np.array([0, -50, -50]),
                                  light_color=np.array([1.0, 1.0, 1.0]),
                                  vc=C)

        cam_t, cam_r = ch.array((0, 0, 0)), ch.array((3.14, 0, 0))
        U = ProjectPoints(v=V,
                          f=[flength, flength],
                          c=[w / 2., h / 2.],
                          k=ch.zeros(5),
                          t=cam_t,
                          rt=cam_r)
        rn = ColoredRenderer(camera=U,
                             v=V,
                             f=mesh['f'],
                             vc=A,
                             bgcolor=np.array([1.0, 0.0, 0.0]),
                             frustum={
                                 'width': w,
                                 'height': h,
                                 'near': 0.1,
                                 'far': 20
                             })

        img = np.asarray(rn)[:, :, (2, 1, 0)]
        msk = np.sum(np.abs(img - np.array([[[0, 0, 1.0]]], dtype=np.float32)),
                     axis=-1,
                     keepdims=True)
        msk[msk > 0] = 1
        img = cv.resize(img, (img.shape[1] // 2, img.shape[0] // 2))
        msk = cv.resize(msk, (msk.shape[1] // 2, msk.shape[0] // 2),
                        interpolation=cv.INTER_AREA)
        msk[msk < 1] = 0
        msk = msk[:, :, np.newaxis]
        img = np.concatenate([img, msk], axis=-1)
        cv.imshow('render3', img)
        cv.waitKey(3)
        cv.imwrite(out_file, np.uint8(img * 255))
Пример #5
0
def create_renderer(w=640,
                    h=480,
                    rt=np.zeros(3),
                    t=np.zeros(3),
                    f=None,
                    c=None,
                    k=None,
                    near=.5,
                    far=10.,
                    mesh=None):
    f = np.array([w, w]) / 2. if f is None else f
    c = np.array([w, h]) / 2. if c is None else c
    k = np.zeros(5) if k is None else k

    if mesh is not None and hasattr(mesh, 'texture_image'):
        from opendr.renderer import TexturedRenderer
        rn = TexturedRenderer()
        rn.texture_image = mesh.texture_image
        if rn.texture_image.max() > 1:
            rn.texture_image[:] = rn.texture_image[:].r / 255.
        rn.ft = mesh.ft
        rn.vt = mesh.vt
    else:
        from opendr.renderer import ColoredRenderer
        rn = ColoredRenderer()

    rn.camera = ProjectPoints(rt=rt, t=t, f=f, c=c, k=k)
    rn.frustum = {'near': near, 'far': far, 'height': h, 'width': w}
    return rn
Пример #6
0
def main(vt_ft_path, gar_type, mesh_location, texture_location, inpaint_mask,
         side, cam_file, save_tex_location):

    mesh = Mesh(filename=mesh_location)

    img = cv2.imread(texture_location) / 255.
    img = cv2.resize(img, (1000, 1000))

    cam_data = pkl.load(open(cam_file, 'r'))
    cam_y, cam_z = cam_data[gar_type]['cam_y'], cam_data[gar_type]['cam_z']

    camera = ProjectPoints(v=mesh.v,
                           f=np.array([1000, 1000]),
                           c=np.array([1000, 1000]) / 2.,
                           t=np.array([0, cam_y, cam_z]),
                           rt=np.zeros(3),
                           k=np.zeros(5))

    data = pkl.load(open(vt_ft_path, 'r'))[gar_type]
    iso = Isomapper(data['vt'], data['ft'], 2000)

    tex = iso.render(img, camera, mesh.f)
    if side == "front":
        tex_save = tex[500:1500, 0:1000]
    else:
        tex_save = tex[500:1500, 1000:2000]

    inpaint_mask = cv2.imread(inpaint_mask, cv2.IMREAD_UNCHANGED)
    if inpaint_mask is not None:
        tex_save = cv2.inpaint(np.uint8(tex_save * 255), inpaint_mask, 3,
                               cv2.INPAINT_TELEA)
    cv2.imwrite(save_tex_location, tex_save)
Пример #7
0
def Render():

    verts = np.load('../../resault/verts.npy')
    faces = np.load('../../resault/faces.npy')

    rn = ColoredRenderer()
    w, h = (640, 480)

    rn.camera = ProjectPoints(v=verts,
                              rt=np.zeros(3),
                              t=np.array([0, 0, 2.]),
                              f=np.array([w, w]) / 2.,
                              c=np.array([w, h]) / 2.,
                              k=np.zeros(5))
    rn.frustum = {'near': 0.8, 'far': 16., 'width': w, 'height': h}
    rn.set(v=verts, f=faces, bgcolor=np.array([255, 255, 255]))

    rn.vc = LambertianPointLight(f=faces,
                                 v=rn.v,
                                 num_verts=len(verts),
                                 light_pos=np.array([-1000, -1000, -2000]),
                                 vc=np.ones_like(verts) * .9,
                                 light_color=np.array([1., 1., 1.]))

    # import cv2
    #
    # cv2.imshow('render_SMPL', rn.r)
    # cv2.waitKey(0)
    import matplotlib.pyplot as plt
    plt.ion()
    plt.axis('off')
    plt.imshow(rn.r)
    raw_input()
    plt.show()
Пример #8
0
def main(mesh, sv_file, side, size, fmap_location, bmap_location, cam_file,
         gar_type):
    from opendr.camera import ProjectPoints
    from psbody.mesh import Mesh

    print(fmap_location)
    cam_data = pkl.load(open(cam_file, 'r'))
    cam_z, cam_y = cam_data[gar_type]['cam_z'], cam_data[gar_type]['cam_y']

    mesh = Mesh(filename=mesh)
    fmap = np.load(fmap_location)
    bmap = np.load(bmap_location)

    cam = ProjectPoints(v=mesh.v,
                        t=np.array([0, cam_y, cam_z]),
                        rt=np.zeros(3),
                        f=[1000, 1000],
                        c=[1000 / 2., 1000 / 2.],
                        k=np.zeros(5))

    points = uv_to_xyz_and_normals(mesh, fmap, bmap)
    cam.v = points
    projection = cam.r.astype(np.int32)
    projection[projection > 999] = 999

    projection = np.fliplr(np.around(projection.squeeze())).astype(np.int32)

    pixels_to_set = np.array(np.where(fmap != -1)).T
    x_to_set = pixels_to_set[:, 0]
    y_to_set = pixels_to_set[:, 1]

    cords_ret = -999 * np.ones((fmap.shape[0], fmap.shape[1], 2))
    cords_ret[x_to_set, y_to_set, :] = projection

    cords_ret = cords_ret.astype('float64')

    cords_ret = 2 * ((cords_ret) / 999) - 1

    cords_ret = np.flip(cords_ret, 2)

    if side == 'front':
        cords_ret = cords_ret[500:1500, 0:1000]
    else:
        cords_ret = cords_ret[500:1500, 1000:2000]

    cords_ret = cv2.resize(cords_ret, (size, size))
    np.save(sv_file, cords_ret)
Пример #9
0
def render_orth(sv,
                w,
                h,
                cam,
                img=None,
                deg=None,
                margin=None,
                color_key='blue',
                use_face=None,
                vc=None):
    # Use large focal length to remove perspective effects.
    # Get projected points using current f.
    orig_trans = sv.trans.r.copy()
    # import ipdb; ipdb.set_trace()
    use_f = 5000.
    # We have f * X/Z = x = f' * X / Z' where f' is the use_f, solve for Z'
    new_Z = use_f / cam.f[0].r * orig_trans[2]
    sv.trans[2] = new_Z
    # Now project this
    # dist = np.mean(sv.r, axis=0)[2]
    min_z = np.maximum(np.min(sv.r, axis=0)[2] - 5, 0.5)
    max_z = np.max(sv.r, axis=0)[2]
    cam_f = ProjectPoints(rt=cam.rt,
                          t=cam.t,
                          f=np.array([use_f, use_f]),
                          c=cam.c,
                          k=cam.k)
    if use_face is None:
        #use_face = sv.model.f
        use_face = sv.f

    if vc is None:
        img_here = render_mesh(Mesh(sv.r, use_face),
                               w,
                               h,
                               cam_f,
                               near=min_z,
                               far=max_z + 5,
                               img=img,
                               deg=deg,
                               margin=margin,
                               color_key=color_key)
    else:
        img_here = render_mesh(Mesh(sv.r, use_face, vc=vc),
                               w,
                               h,
                               cam_f,
                               near=min_z,
                               far=max_z + 5,
                               img=img,
                               deg=deg,
                               margin=margin,
                               color_key=color_key)

    # Model and sv don't have ties but for safety put it back.
    sv.trans[:] = orig_trans

    return img_here
def fit_silhouettes_pyramid_multi_model(objs,
                            sv,
                            silhs,
                            cams,
                            #weights=1.,
                            mv=None,
                            fix_shape=False,
                            imgs=None,
                            fix_rot=False,
                            fix_trans=False,
                            s2m_weights=1.,
                            m2s_weights=1.,
                            max_iter=100,
                            alpha=None, vc=None, symIdx=None, mv2=None, FIX_POSE=False):
    ''' if alpha is not None, then replace that with sv.betas'''
    from opendr.camera import ProjectPoints

    silhs = [np.uint8(silh[:, :, 0] > 0) for silh in silhs]

    # Setup silhouet term camera.
    cam_copy = [ProjectPoints(
        rt=cam.rt, t=cam.t, f=cam.f, c=cam.c, k=cam.k, v=cam.v) for cam in cams]

    if imgs[0].shape[1] < 900:
        scales = 1. / (2 * np.array([3, 2, 1, 0.5]))
    else:
        scales = 1. / (2 * np.array([6, 4, 3, 2, 1]))

    res_silh = []

    for sc in scales:

        if 'shape_prior' in objs.keys():
            objs['shape_prior'] = 0.4 * objs['shape_prior'] 

        silh_here = [cv2.resize(silh, (int(silh.shape[1] * sc),int(silh.shape[0] * sc))) for silh in silhs]
        cam_here = [scalecam(cam, sc) for cam in cam_copy]
        for i,cam in enumerate(cam_copy):
            cam_here[i].v = cam.v
        print('Scale %g' % (1 / sc))
        w_s2m = s2m_weights
        w_m2s = m2s_weights
        R, s_objs = fit_silhouettes_multi_model(objs, sv, silh_here, cam_here,
                                        w_s2m, w_m2s, max_iter, mv, fix_shape,
                                        cams, imgs, alpha=alpha, fix_trans=fix_trans,
                                        pyr_scale=sc, vc=vc, symIdx=symIdx, mv2=mv2, FIX_POSE=FIX_POSE)

        # For scales < 1 we optimize f on the kp_camera (cams) and then we update cam_copy
        for i in range(len(cams)):
            cam_copy[i].f[:] = cam_here[i].f.r/sc 
        res_silh.append(R)

    # Compute energy
    E = 0
    for term in s_objs.values():
        E = E + np.mean(term.r)

    return res_silh, E
Пример #11
0
def proj_smpl_onto_img(img, smpl, pose, shape, cam_f, cam_t):
    if isinstance(cam_f, float):
        cam_f = np.array([cam_f, cam_f])

    smpl.pose[:] = pose
    smpl.betas[:] = shape
    center = np.array([img.shape[1] / 2, img.shape[0] / 2])
    cam = ProjectPoints(
        f=cam_f, rt=ch.zeros(3), t=cam_t, k=np.zeros(5), c=center)

    cam.v = smpl.r
    for v in cam.r:
        r = int(round(v[1]))
        c = int(round(v[0]))
        if 0 <= r < img.shape[0] and 0 <= c < img.shape[1]:
            img[r, c, :] = np.asarray([255, 255, 255])

    return img
Пример #12
0
    def __call__(self,
                 verts,
                 faces,
                 vert_colors=None,
                 cam=None,
                 img=None,
                 do_alpha=False,
                 far=None,
                 near=None,
                 color_id=0,
                 img_size=None):
        """
        cam is 3D [f, px, py]
        """
                
        if img is not None:
            h, w = img.shape[:2]
        elif img_size is not None:
            h = img_size[0]
            w = img_size[1]
        else:
            h = self.h
            w = self.w

        if cam is None:
            if self.cam is not None:
                cam = self.cam
            else:
                cam = [self.flength, w / 2., h / 2.]

        use_cam = ProjectPoints(
            f=cam[0] * np.ones(2),
            rt=np.zeros(3),
            t=np.zeros(3),
            k=np.zeros(5),
            c=cam[1:3])

        if near is None:
            near = np.maximum(np.min(verts[:, 2]) - 25, 0.1)
        if far is None:
            far = np.maximum(np.max(verts[:, 2]) + 25, 25)
    
        imtmp = render_model(
            verts,
            faces,
            w,
            h,
            use_cam,
            vert_colors = vert_colors,
            do_alpha = do_alpha,
            img = img,
            far = far,
            near = near,
            color_id = color_id)

        return (imtmp * 255).astype('uint8')
Пример #13
0
def project_vertices(v, w, h, cam_r, cam_t):
    """projects vertices onto image plane"""
    V = ch.array(v)
    U = ProjectPoints(v=V,
                      f=[w, w],
                      c=[w / 2., h / 2.],
                      k=ch.zeros(5),
                      t=cam_t,
                      rt=cam_r)
    return U
Пример #14
0
def scalecam(cam, imres):
    if imres == 1:
        return cam
    # Returns a camera which shares camera parameters by reference,
    # and whose scale is resized according to imres
    return ProjectPoints(rt=cam.rt,
                         t=cam.t,
                         f=array(cam.f.r * imres),
                         c=array(cam.c.r * imres),
                         k=cam.k)
Пример #15
0
def main(pose_file, masks_file, camera_file, out, obj_out, num, icp_count, model_file, first_frame, last_frame,
         nohands, naked, display):

    # load data
    with open(model_file, 'rb') as fp:
        model_data = pkl.load(fp)

    with open(camera_file, 'rb') as fp:
        camera_data = pkl.load(fp)

    pose_data = h5py.File(pose_file, 'r')
    poses = pose_data['pose'][first_frame:last_frame]
    trans = pose_data['trans'][first_frame:last_frame]
    masks = h5py.File(masks_file, 'r')['masks'][first_frame:last_frame]
    num_frames = masks.shape[0]

    indices_consensus = np.ceil(np.arange(num) * num_frames * 1. / num).astype(np.int)

    # init
    base_smpl = Smpl(model_data)
    base_smpl.betas[:] = np.array(pose_data['betas'], dtype=np.float32)

    camera = ProjectPoints(t=np.zeros(3), rt=np.zeros(3), c=camera_data['camera_c'],
                           f=camera_data['camera_f'], k=camera_data['camera_k'], v=base_smpl)
    camera_t = camera_data['camera_t']
    camera_rt = camera_data['camera_rt']
    frustum = {'near': 0.1, 'far': 1000., 'width': int(camera_data['width']), 'height': int(camera_data['height'])}
    frames = []

    for i in indices_consensus:
        log.info('Set up frame {}...'.format(i))

        mask = np.array(masks[i] * 255, dtype=np.uint8)
        pose_i = np.array(poses[i], dtype=np.float32)
        trans_i = np.array(trans[i], dtype=np.float32)

        frames.append(setup_frame_rays(base_smpl, camera, camera_t, camera_rt, pose_i, trans_i, mask))

    log.info('Set up complete.')
    log.info('Begin consensus fit...')
    fit_consensus(frames, base_smpl, camera, frustum, model_data, nohands, icp_count, naked, display)

    with open(out, 'wb') as fp:
        pkl.dump({
            'v_personal': base_smpl.v_personal.r,
            'betas': base_smpl.betas.r,
        }, fp, protocol=2)

    if obj_out is not None:
        base_smpl.pose[:] = 0
        vt = np.load('assets/basicModel_vt.npy')
        ft = np.load('assets/basicModel_ft.npy')
        mesh.write(obj_out, base_smpl.r, base_smpl.f, vt=vt, ft=ft)

    log.info('Done.')
Пример #16
0
def get_cam_rend(verts, faces, cam_y, cam_z):

    frustum = {'near': 0.1, 'far': 1000., 'width': 1000, 'height': 1000}

    camera = ProjectPoints(v=verts, t=np.array([0, cam_y, cam_z]), rt=np.zeros(3), f=[1000, 1000],
                           c=[1000 / 2., 1000 / 2.], k=np.zeros(5))

    rn_m = ColoredRenderer(camera=camera, v=verts, f=faces, vc=np.ones_like(verts),
                           frustum=frustum, bgcolor=0, num_channels=1)

    return camera, rn_m
def scalecam(cam, imres):
    # Returns a camera which shares camera parameters by reference,
    # and whose scale is resized according to imres
    from chumpy import array
    from opendr.camera import ProjectPoints
    return ProjectPoints(
        rt=cam.rt,
        t=cam.t,
        f=cam.f * imres,
        c=array(cam.c.r * imres),
        k=cam.k)
Пример #18
0
def _project_vertices(v, w, h, cam_r, cam_t):
    """projects vertices onto image plane"""
    V = ch.array(v)
    U = ProjectPoints(
        v=V,
        f=[w * 2, w * 2],
        c=[w / 2., h / 2.],  # camera intrinsics
        k=ch.zeros(5),
        t=cam_t,
        rt=cam_r)
    return U
Пример #19
0
    def __call__(self,
                 verts,
                 cam=None,
                 img=None,
                 do_alpha=False,
                 far=None,
                 near=None,
                 color_id=0,
                 img_size=None):
        """
        cam is 3D [f, px, py]
        """
        if img is not None:
            h, w = img.shape[:2]
        elif img_size is not None:
            h = img_size[0]
            w = img_size[1]
        else:
            h = self.h
            w = self.w

        if cam is None:
            cam = [self.flength, w / 2., h / 2.]

        use_cam = ProjectPoints(f=cam[0] * np.ones(2),
                                rt=np.zeros(3),
                                t=np.zeros(3),
                                k=np.zeros(5),
                                c=cam[1:3])

        if near is None:
            near = np.maximum(np.min(verts[:, 2]) - 25, 0.1)
        if far is None:
            far = np.maximum(np.max(verts[:, 2]) + 25, 25)

        np_verts = np.array(verts)
        #np_verts = np.stack((np_verts[:,0],np.max(np_verts[:,1])-np_verts[:,1],np_verts[:,2]),axis=1)
        self.trimesh = trimesh.Trimesh(vertices=np.asarray(np_verts),
                                       faces=self.faces)

        imtmp = render_model(verts,
                             self.faces,
                             w,
                             h,
                             use_cam,
                             do_alpha=do_alpha,
                             img=img,
                             far=far,
                             near=near,
                             color_id=color_id)

        return (imtmp * 255).astype('uint8')
Пример #20
0
def test_earth():
    m = get_earthmesh(trans=ch.array([0, 0, 0]), rotation=ch.zeros(3))
    # Create V, A, U, f: geometry, brightness, camera, renderer
    V = ch.array(m.v)
    A = SphericalHarmonics(vn=VertNormals(v=V, f=m.f),
                           components=[3., 2., 0., 0., 0., 0., 0., 0., 0.],
                           light_color=ch.ones(3))
    # camera
    U = ProjectPoints(v=V,
                      f=[w, w],
                      c=[w / 2., h / 2.],
                      k=ch.zeros(5),
                      t=ch.zeros(3),
                      rt=ch.zeros(3))
    f = TexturedRenderer(vc=A,
                         camera=U,
                         f=m.f,
                         bgcolor=[0., 0., 0.],
                         texture_image=m.texture_image,
                         vt=m.vt,
                         ft=m.ft,
                         frustum={
                             'width': w,
                             'height': h,
                             'near': 1,
                             'far': 20
                         })

    # Parameterize the vertices
    translation, rotation = ch.array([0, 0, 8]), ch.zeros(3)
    f.v = translation + V.dot(Rodrigues(rotation))

    observed = f.r
    np.random.seed(1)
    # this is reactive
    # in the sense that changes to values will affect function which depend on them.
    translation[:] = translation.r + np.random.rand(3)
    rotation[:] = rotation.r + np.random.rand(3) * .2
    # Create the energy
    E_raw = f - observed
    E_pyr = gaussian_pyramid(E_raw, n_levels=6, normalization='size')

    Image.fromarray((observed * 255).astype(np.uint8)).save(
        os.path.join(save_dir, "reference.png"))
    step = 0
    Image.fromarray((f.r * 255).astype(np.uint8)).save(
        os.path.join(save_dir, "step_{:05d}.png".format(step)))

    print('OPTIMIZING TRANSLATION, ROTATION, AND LIGHT PARMS')
    free_variables = [translation, rotation]
    ch.minimize({'pyr': E_pyr}, x0=free_variables, callback=create_callback(f))
    ch.minimize({'raw': E_raw}, x0=free_variables, callback=create_callback(f))
Пример #21
0
def setup_camera(w,
                 h,
                 flength=2000,
                 rt=np.zeros(3),
                 t=np.zeros(3),
                 k=np.zeros(5)):
    '''Width is image.shape[0]!!'''
    from opendr.camera import ProjectPoints
    center = np.array([h / 2, w / 2])
    f = np.array([flength, flength])
    cam = ProjectPoints(f=f, rt=rt, t=t, k=k, c=center)

    return cam
Пример #22
0
def get_3DSV(mesh):
    from opendr.camera import ProjectPoints
    from opendr.renderer import DepthRenderer
    WIDTH, HEIGHT = 250, 250

    camera = ProjectPoints(v=mesh.vertices,
                           f=np.array([WIDTH, WIDTH]),
                           c=np.array([WIDTH, HEIGHT]) / 2.,
                           t=np.array([0, 0, 2.5]),
                           rt=np.array([np.pi, 0, 0]),
                           k=np.zeros(5))
    frustum = {'near': 1., 'far': 10., 'width': WIDTH, 'height': HEIGHT}
    rn = DepthRenderer(camera=camera,
                       frustum=frustum,
                       f=mesh.faces,
                       overdraw=False)

    points3d = camera.unproject_depth_image(rn.r)
    points3d = points3d[points3d[:, :, 2] > np.min(points3d[:, :, 2]) + 0.01]

    # print('sampled {} points.'.format(points3d.shape[0]))
    return points3d
Пример #23
0
def setupCamera(v, cameraParams):

    chDistMat = geometry.Translate(x=0,
                                   y=cameraParams['Zshift'],
                                   z=cameraParams['chCamHeight'])

    chRotElMat = geometry.RotateX(a=-cameraParams['chCamEl'])

    chCamModelWorld = ch.dot(chDistMat, chRotElMat)

    flipZYRotation = np.array([[1.0, 0.0, 0.0, 0.0], [0.0, 0, 1.0, 0.0],
                               [0.0, -1.0, 0, 0.0], [0.0, 0.0, 0.0, 1.0]])

    chMVMat = ch.dot(chCamModelWorld, flipZYRotation)

    chInvCam = ch.inv(chMVMat)

    modelRotation = chInvCam[0:3, 0:3]

    chRod = opendr.geometry.Rodrigues(rt=modelRotation).reshape(3)
    chTranslation = chInvCam[0:3, 3]

    translation, rotation = (chTranslation, chRod)

    camera = ProjectPoints(v=v,
                           rt=rotation,
                           t=translation,
                           f=1000 * cameraParams['chCamFocalLength'] *
                           cameraParams['a'],
                           c=cameraParams['c'],
                           k=ch.zeros(5))

    flipXRotation = np.array([[1.0, 0.0, 0.0, 0.0], [0.0, -1.0, 0., 0.0],
                              [0.0, 0., -1.0, 0.0], [0.0, 0.0, 0.0, 1.0]])

    camera.openglMat = flipXRotation  #Needed to match OpenGL flipped axis.

    return camera, modelRotation, chMVMat
Пример #24
0
def mesh2Image(vertices,
               faces,
               batch,
               path,
               name,
               height,
               width,
               vertices_num=6890):
    # Create OpenDR renderer
    rn = ColoredRenderer()

    rt_1 = np.zeros(3)

    rn.camera = ProjectPoints(
        v=vertices,  # vertices
        # v=m,
        rt=rt_1,
        # x, y, z translation of the camera, z>=0    0 0 2
        t=np.array([0, 0, 0]),
        # f=np.array([w,w])/2, # focus length? just scaling the picture
        # c=np.array([w,h])/2, #  just move the picture along top-left axis? not sure
        f=np.array([1, 1]),
        c=np.array([0, 0]),
        k=np.zeros(5))
    rn.frustum = {'near': 1, 'far': 15, 'width': width, 'height': height}
    rn.set(v=vertices, f=faces, bgcolor=np.zeros(3))

    # Construct point light source
    rn.vc = LambertianPointLight(
        f=faces,  # face
        v=vertices,
        # v=rn.v, #vertex?
        num_verts=len(vertices),
        light_pos=np.array([-1000, -1000, -2000]),  # point light position
        vc=np.ones_like(vertices) * .9,  # albedo per vertex
        light_color=np.array([1., 1.,
                              1.]))  # Blue, Green, Red; light intensity

    # make the image binary(black and white); these are actually magic steps
    rn.change_col(np.ones((vertices_num, 3)))
    #mask = rn.r.copy()  # takes lots of time

    mask = rn.r * 255
    import cv2
    if batch == 1:
        cv2.imwrite('%s/%s.png' % (path, name), mask)
    else:
        cv2.imwrite('%s/%s_%d.png' % (path, name, i), mask)
    '''
Пример #25
0
def get_3DSV(mesh):
    from opendr.camera import ProjectPoints
    from opendr.renderer import DepthRenderer
    WIDTH, HEIGHT = 250, 250

    rt = R.from_euler('xyz', [np.pi, 0, 0]).as_rotvec()
    rt_mat = R.from_euler('xyz', [np.pi, 0, 0]).as_matrix()
    camera = ProjectPoints(v=mesh.vertices, f=np.array([WIDTH, WIDTH]), c=np.array([WIDTH, HEIGHT]) / 2.,
                           t=np.array([0, 0, 3.0]), rt=rt, k=np.zeros(5))
    frustum = {'near': 1., 'far': 10., 'width': WIDTH, 'height': HEIGHT}
    rn = DepthRenderer(camera=camera, frustum=frustum, f=mesh.faces, overdraw=False)

    # import cv2
    depth_image = rn.depth_image.copy()
    mask = depth_image < depth_image.max() - 0.01
    depth_image[~mask] = 0
    depth_image[mask] = 255 - (depth_image[mask] - depth_image[mask].min()) / (depth_image[mask].max() - depth_image[mask].min()) * 255

    points3d = camera.unproject_depth_image(rn.r)
    mask = points3d[:, :, 2] > np.min(points3d[:, :, 2]) + 0.01

    points3d = points3d[mask]

    return points3d, depth_image
Пример #26
0
def initialize_camera(fx, fy, cx, cy):
    """
    @param: fx,fy,cx,cy as pinhole camera model parameter
    @return: a differentiable camera instance
    """

    rt = ch.zeros(3)

    t = ch.zeros(3)

    cam = ProjectPoints(f=np.array([fx, fy]),
                        rt=rt,
                        t=t,
                        k=np.zeros(5),
                        c=[cx, cy])
    return cam
Пример #27
0
def render_bound(mesh, require_id = False, img_size=(448, 448), f=1000):
    rn = DepthRenderer()
    rn.camera = ProjectPoints(rt = np.zeros(3), 
                              t = np.zeros(3), 
                              f = np.array([f, f]),
                              c = np.array([img_size[1], img_size[0]])/2.,
                              k = np.zeros(5)
                             )
    rn.frustum = {'near': .5, 'far': 10., 
                  'width': img_size[1], 'height': img_size[0]}
    rn.v = mesh.points()
    rn.f = mesh.face_vertex_indices()
    rn.bgcolor = np.zeros(3)
    if require_id is False:
        return rn.boundarybool_image
    else:
        return rn.boundaryid_image
Пример #28
0
    def __init__(self, m):
        self.m = m
        self.m.betas[:] = np.random.rand(m.betas.size) * .3
        # m.pose[:] = np.random.rand(m.pose.size) * .2
        self.m.pose[:3] = [0., 0., 0.]
        self.m.pose[3:] = np.zeros(45)
        # m.pose[3:] = [-0.42671473, -0.85829819, -0.50662164, +1.97374622, -0.84298473, -1.29958491]
        self.m.pose[0] = np.pi

        # compute inverse components to map from fullpose spec to coefficients
        hands_components = np.asarray(m.hands_components)
        self.hands_components_inv = np.linalg.inv(hands_components)

        # rendering components
        # Assign attributes to renderer
        w, h = (640, 480)

        # Create OpenDR renderer
        self.rn = ColoredRenderer()
        self.rn.camera = ProjectPoints(v=m,
                                       rt=np.zeros(3),
                                       t=np.array([-0.03, -0.04, 0.20]),
                                       f=np.array([w, w]) / 2.,
                                       c=np.array([w, h]) / 2.,
                                       k=np.zeros(5))
        self.rn.frustum = {'near': 0.01, 'far': 2., 'width': w, 'height': h}
        self.rn.set(v=m, f=m.f, bgcolor=np.zeros(3))

        # Construct point light source
        self.rn.vc = LambertianPointLight(f=m.f,
                                          v=self.rn.v,
                                          num_verts=len(m),
                                          light_pos=np.array(
                                              [-1000, -1000, -2000]),
                                          vc=np.ones_like(m) * .9,
                                          light_color=np.array([1., 1., 1.]))
        self.rn.vc += LambertianPointLight(f=m.f,
                                           v=self.rn.v,
                                           num_verts=len(m),
                                           light_pos=np.array(
                                               [+2000, +2000, +2000]),
                                           vc=np.ones_like(m) * .9,
                                           light_color=np.array([1., 1., 1.]))
        self.mvs = MeshViewers(window_width=2000,
                               window_height=800,
                               shape=[1, 3])
Пример #29
0
def render_depth_v(verts, faces, 
                   require_visi = False, img_size=(448, 448), f=1000):
    rn = DepthRenderer()
    rn.camera = ProjectPoints(rt = np.zeros(3), 
                              t = np.zeros(3), 
                              f = np.array([f, f]),
                              c = np.array([img_size[1], img_size[0]])/2.,
                              k = np.zeros(5)
                             )
    rn.frustum = {'near': .5, 'far': 10., 
                  'width': img_size[1], 'height': img_size[0]}
    rn.v = verts
    rn.f = faces
    rn.bgcolor = np.zeros(3)
    if require_visi is True:
        return rn.r, rn.visibility_image
    else:
        return rn.r
Пример #30
0
def _create_renderer(w=640,
                     h=480,
                     rt=np.zeros(3),
                     t=np.zeros(3),
                     f=None,
                     c=None,
                     k=None,
                     near=.5,
                     far=10.):
    f = np.array([w, w]) / 2. if f is None else f
    c = np.array([w, h]) / 2. if c is None else c
    k = np.zeros(5) if k is None else k

    rn = DepthRenderer()

    rn.camera = ProjectPoints(rt=rt, t=t, f=f, c=c, k=k)
    rn.frustum = {'near': near, 'far': far, 'height': h, 'width': w}
    return rn
Пример #31
0
def initialize_camera(model,
                      j2d,
                      img,
                      init_pose,
                      flength=5000.,
                      pix_thsh=25.,
                      viz=False):
    """Initialize camera translation and body orientation
    :param model: SMPL model
    :param j2d: 14x2 array of CNN joints
    :param img: h x w x 3 image 
    :param init_pose: 72D vector of pose parameters used for initialization
    :param flength: camera focal length (kept fixed)
    :param pix_thsh: threshold (in pixel), if the distance between shoulder joints in 2D
                     is lower than pix_thsh, the body orientation as ambiguous (so a fit is run on both
                     the estimated one and its flip)
    :param viz: boolean, if True enables visualization during optimization
    :returns: a tuple containing the estimated camera,
              a boolean deciding if both the optimized body orientation and its flip should be considered,
              3D vector for the body orientation
    """
    # optimize camera translation and body orientation based on torso joints
    # LSP torso ids:
    # 2=right hip, 3=left hip, 8=right shoulder, 9=left shoulder
    torso_cids = [2, 3, 8, 9]
    # corresponding SMPL torso ids
    torso_smpl_ids = [2, 1, 17, 16]

    center = np.array([img.shape[1] / 2, img.shape[0] / 2])

    # initialize camera rotation
    rt = ch.zeros(3)
    # initialize camera translation
    _LOGGER.info('initializing translation via similar triangles')
    init_t = guess_init(model, flength, j2d, init_pose)
    t = ch.array(init_t)

    # check how close the shoulder joints are
    try_both_orient = np.linalg.norm(j2d[8] - j2d[9]) < pix_thsh

    opt_pose = ch.array(init_pose)
    (_, A_global) = global_rigid_transformation(
        opt_pose, model.J, model.kintree_table, xp=ch)
    Jtr = ch.vstack([g[:3, 3] for g in A_global])

    # initialize the camera
    cam = ProjectPoints(
        f=np.array([flength, flength]), rt=rt, t=t, k=np.zeros(5), c=center)

    # we are going to project the SMPL joints
    cam.v = Jtr

    if viz:
        viz_img = img.copy()

        # draw the target (CNN) joints
        for coord in np.around(j2d).astype(int):
            if (coord[0] < img.shape[1] and coord[0] >= 0 and
                    coord[1] < img.shape[0] and coord[1] >= 0):
                cv2.circle(viz_img, tuple(coord), 3, [0, 255, 0])

        import matplotlib.pyplot as plt
        plt.ion()

        # draw optimized joints at each iteration
        def on_step(_):
            """Draw a visualization."""
            plt.figure(1, figsize=(5, 5))
            plt.subplot(1, 1, 1)
            viz_img = img.copy()
            for coord in np.around(cam.r[torso_smpl_ids]).astype(int):
                if (coord[0] < viz_img.shape[1] and coord[0] >= 0 and
                        coord[1] < viz_img.shape[0] and coord[1] >= 0):
                    cv2.circle(viz_img, tuple(coord), 3, [0, 0, 255])
            plt.imshow(viz_img[:, :, ::-1])
            plt.draw()
            plt.show()
            plt.pause(1e-3)
    else:
        on_step = None
    # optimize for camera translation and body orientation
    free_variables = [cam.t, opt_pose[:3]]
    ch.minimize(
        # data term defined over torso joints...
        {'cam': j2d[torso_cids] - cam[torso_smpl_ids],
         # ...plus a regularizer for the camera translation
         'cam_t': 1e2 * (cam.t[2] - init_t[2])},
        x0=free_variables,
        method='dogleg',
        callback=on_step,
        options={'maxiter': 100,
                 'e_3': .0001,
                 # disp set to 1 enables verbose output from the optimizer
                 'disp': 0})
    if viz:
        plt.ioff()
    return (cam, try_both_orient, opt_pose[:3].r)
Пример #32
0
    # SMPL parameters + camera
    print('opening %s' % results_path)
    with open(results_path, 'r') as f:
        res = pickle.load(f)
    poses = res['poses']
    betas = res['betas']

    # Camera rotation is always at identity,
    # The rotation of the body is encoded by the first 3 bits of poses.
    cam_ts = res['cam_ts']
    focal_length = res['focal_length']
    principal_pt = res['principal_pt']

    # Setup camera:
    cam = ProjectPoints(
        f=focal_length, rt=np.zeros(3), k=np.zeros(5), c=principal_pt)

    h = 480
    w = 640

    # Corresponding ids of detected and SMPL joints (except head)
    lsp_ids = range(0, 12)
    smpl_ids = [8, 5, 2, 1, 4, 7, 21, 19, 17, 16, 18, 20]

    plt.ion()

    for i, (joints, pose, beta,
            cam_t) in enumerate(zip(est.T, poses, betas, cam_ts)):

        joints = joints[lsp_ids, :]