Пример #1
0
 def __init__(self, t, rod, rad, length):
     self.t = t  # translation of the axis
     self.rod = rod  # rotation of the axis in Rodrigues form
     self.rad = rad  # radious of the capsule
     self.length = length # length of the axis
     axis0 = ch.vstack([0, ch.abs(self.length), 0])
     self.axis = ch.vstack((t.T, (t + Rodrigues(rod).dot(axis0)).T))
     v0 = ch.hstack([v[:26].T*rad, (v[26:].T*rad)+ axis0])
     self.v = ((t + Rodrigues(rod).dot(v0)).T)
     self.set_sphere_centers()
Пример #2
0
def lambertian_spotlight(v, vn, pos, dir, spot_exponent, camcoord=False, camera_t=None, camera_rt=None):
    """
    :param v: vertices
    :param vn: vertex normals
    :param light_pos: light position
    :param light_dir: light direction
    :param spot_exponent: spot exponent (a la opengl)
    :param camcoord: if True, then pos and dir are wrt the camera
    :param camera_t: 3-vector indicating translation of camera
    :param camera_rt: 3-vector indicating direction of camera
    :return: Vx1 array of brightness
    """

    if camcoord: # Transform pos and dir from camera to world coordinate system
        assert(camera_t is not None and camera_rt is not None)
        from opendr.geometry import Rodrigues
        rot = Rodrigues(rt=camera_rt)
        pos = rot.T.dot(pos-camera_t)
        dir = rot.T.dot(dir)

    dir = dir / ch.sqrt(ch.sum(dir**2.))
    v_minus_light = v - pos.reshape((1,3))
    v_distances = ch.sqrt(ch.sum(v_minus_light**2, axis=1))
    v_minus_light_normed = v_minus_light / v_distances.reshape((-1,1))
    cosangle = v_minus_light_normed.dot(dir.reshape((3,1)))
    light_dot_normal = ch.sum(vn*v_minus_light_normed, axis=1)
    light_dot_normal.label = 'light_dot_normal'
    cosangle.label = 'cosangle'
    result = light_dot_normal.ravel() * cosangle.ravel()**spot_exponent
    result = result / v_distances ** 2.
    result = maximum(result, 0.0)

    return result
Пример #3
0
def RigidTransformSlow(**kwargs):
    # Returns a Ch object with dterms 'v', 'rt', and 't'

    result = Ch(lambda v, rt, t : v.dot(Rodrigues(rt=rt)) + t)
    if len(kwargs) > 0:
        result.set(**kwargs)
    return result
    def r_and_derivatives(self):
        tmp = self.v.dot(Rodrigues(self.rt)) + self.t

        return ch.hstack((
            col(2. / (self.right - self.left) * tmp[:, 0] - (self.right + self.left) / (self.right - self.left) + 1.) * self.width / 2.,
            col(2. / (self.bottom - self.top) * tmp[:, 1] - (self.bottom + self.top) / (self.bottom - self.top) + 1.) * self.height / 2.,
        ))
Пример #5
0
 def __init__(self, t, rod, rad, length):
     assert (hasattr(t, 'dterms'))
     # the translation should be a chumpy object (differentiable wrt shape)
     self.t = t  # translation of the axis
     self.rod = rod  # rotation of the axis in Rodrigues form
     # the radius should be a chumpy object (differentiable wrt shape)
     assert (hasattr(rad, 'dterms'))
     self.rad = rad  # radius of the capsule
     # the length should be a chumpy object (differentiable wrt shape)
     assert (hasattr(length, 'dterms'))
     self.length = length  # length of the axis
     axis0 = ch.vstack([0, ch.abs(self.length), 0])
     self.axis = ch.vstack((t.T, (t + Rodrigues(rod).dot(axis0)).T))
     v0 = ch.hstack([v[:26].T * rad, (v[26:].T * rad) + axis0])
     self.v = ((t + Rodrigues(rod).dot(v0)).T)
     self.set_sphere_centers()
    def unproject_points(self, uvd, camera_space=False):
        tmp = np.hstack((
            col(2. * uvd[:, 0] / self.width - 1 + (self.right + self.left) / (self.right - self.left)).r * (self.right - self.left).r / 2.,
            col(2. * uvd[:, 1] / self.height - 1 + (self.bottom + self.top) / (self.bottom - self.top)).r * (self.bottom - self.top).r / 2.,
            np.ones((uvd.shape[0], 1))
        ))

        if camera_space:
            return tmp
        tmp -= self.t.r  # translate

        return tmp.dot(Rodrigues(self.rt).r.T)  # rotate
Пример #7
0
def test_earth():
    m = get_earthmesh(trans=ch.array([0, 0, 0]), rotation=ch.zeros(3))
    # Create V, A, U, f: geometry, brightness, camera, renderer
    V = ch.array(m.v)
    A = SphericalHarmonics(vn=VertNormals(v=V, f=m.f),
                           components=[3., 2., 0., 0., 0., 0., 0., 0., 0.],
                           light_color=ch.ones(3))
    # camera
    U = ProjectPoints(v=V,
                      f=[w, w],
                      c=[w / 2., h / 2.],
                      k=ch.zeros(5),
                      t=ch.zeros(3),
                      rt=ch.zeros(3))
    f = TexturedRenderer(vc=A,
                         camera=U,
                         f=m.f,
                         bgcolor=[0., 0., 0.],
                         texture_image=m.texture_image,
                         vt=m.vt,
                         ft=m.ft,
                         frustum={
                             'width': w,
                             'height': h,
                             'near': 1,
                             'far': 20
                         })

    # Parameterize the vertices
    translation, rotation = ch.array([0, 0, 8]), ch.zeros(3)
    f.v = translation + V.dot(Rodrigues(rotation))

    observed = f.r
    np.random.seed(1)
    # this is reactive
    # in the sense that changes to values will affect function which depend on them.
    translation[:] = translation.r + np.random.rand(3)
    rotation[:] = rotation.r + np.random.rand(3) * .2
    # Create the energy
    E_raw = f - observed
    E_pyr = gaussian_pyramid(E_raw, n_levels=6, normalization='size')

    Image.fromarray((observed * 255).astype(np.uint8)).save(
        os.path.join(save_dir, "reference.png"))
    step = 0
    Image.fromarray((f.r * 255).astype(np.uint8)).save(
        os.path.join(save_dir, "step_{:05d}.png".format(step)))

    print('OPTIMIZING TRANSLATION, ROTATION, AND LIGHT PARMS')
    free_variables = [translation, rotation]
    ch.minimize({'pyr': E_pyr}, x0=free_variables, callback=create_callback(f))
    ch.minimize({'raw': E_raw}, x0=free_variables, callback=create_callback(f))
Пример #8
0
def get_capsules(model, wrt_betas=None, length_regs=None, rad_regs=None):
    from opendr.geometry import Rodrigues
    if length_regs is not None:
        n_shape_dofs = length_regs.shape[0] - 1
    else:
        n_shape_dofs = model.betas.r.size
    segm = np.argmax(model.weights_prior, axis=1)
    J_off = ch.zeros((len(joint2name), 3))
    rots = rots0.copy()
    mujoco_t_mid = [0, 3, 6, 9]
    if wrt_betas is not None:
        # if we want to differentiate wrt betas (shape), we must have the
        # regressors...
        assert (length_regs is not None and rad_regs is not None)
        # ... and betas must be a chumpy object
        assert (hasattr(wrt_betas, 'dterms'))
        pad = ch.concatenate(
            (wrt_betas, ch.zeros(n_shape_dofs - len(wrt_betas)), ch.ones(1)))
        lengths = pad.dot(length_regs)
        rads = pad.dot(rad_regs)
    else:
        lengths = ch.ones(len(joint2name))
        rads = ch.ones(len(joint2name))
    betas = wrt_betas if wrt_betas is not None else model.betas
    n_betas = len(betas)
    # the joint regressors are the original, pre-optimized ones
    # (middle of the part frontier)
    myJ_regressor = model.J_regressor_prior
    myJ0 = ch.vstack((ch.ch.MatVecMult(
        myJ_regressor, model.v_template[:, 0] +
        model.shapedirs[:, :, :n_betas].dot(betas)[:, 0]),
                      ch.ch.MatVecMult(
                          myJ_regressor, model.v_template[:, 1] +
                          model.shapedirs[:, :, :n_betas].dot(betas)[:, 1]),
                      ch.ch.MatVecMult(
                          myJ_regressor, model.v_template[:, 2] +
                          model.shapedirs[:, :, :n_betas].dot(betas)[:, 2]))).T
    # with small adjustments for hips, spine and feet
    myJ = ch.vstack([
        ch.concatenate([
            myJ0[0, 0], (.6 * myJ0[0, 1] + .2 * myJ0[1, 1] + .2 * myJ0[2, 1]),
            myJ0[9, 2]
        ]),
        ch.vstack([myJ0[i] for i in range(1, 7)]),
        ch.concatenate(
            [myJ0[7, 0], (1.1 * myJ0[7, 1] - .1 * myJ0[4, 1]), myJ0[7, 2]]),
        ch.concatenate(
            [myJ0[8, 0], (1.1 * myJ0[8, 1] - .1 * myJ0[5, 1]), myJ0[8, 2]]),
        ch.concatenate(
            [myJ0[9, 0], myJ0[9, 1], (.2 * myJ0[9, 2] + .8 * myJ0[12, 2])]),
        ch.vstack([myJ0[i] for i in range(10, 24)])
    ])
    capsules = []
    # create one capsule per mujoco joint
    for ijoint, segms in enumerate(mujoco2segm):
        if wrt_betas is None:
            vidxs = np.asarray([segm == k for k in segms]).any(axis=0)
            verts = model.v_template[vidxs].r
            dims = (verts.max(axis=0) - verts.min(axis=0))
            rads[ijoint] = .5 * ((dims[(np.argmax(dims) + 1) % 3] + dims[
                (np.argmax(dims) + 2) % 3]) / 4.)
            lengths[ijoint] = max(dims) - 2. * rads[ijoint].r
        # the core joints are different, since the capsule is not in the joint
        # but in the middle
        if ijoint in mujoco_t_mid:
            len_offset = ch.vstack(
                [ch.zeros(1),
                 ch.abs(lengths[ijoint]) / 2.,
                 ch.zeros(1)]).reshape(3, 1)
            caps = Capsule(
                (J_off[ijoint] + myJ[mujoco2segm[ijoint][0]]).reshape(3, 1) -
                Rodrigues(rots[ijoint]).dot(len_offset), rots[ijoint],
                rads[ijoint], lengths[ijoint])
        else:
            caps = Capsule(
                (J_off[ijoint] + myJ[mujoco2segm[ijoint][0]]).reshape(3, 1),
                rots[ijoint], rads[ijoint], lengths[ijoint])
        caps.id = ijoint
        capsules.append(caps)
    return capsules
Пример #9
0
def test_teapot():
    # load teapot and sphere
    reference = read_and_process_mesh(
        "example_data/pointclouds/teapot_mesh.obj",
        trans=ch.array([0, 0, 0]),
        rotation=ch.array([np.pi, 0, 0]))
    target = read_and_process_mesh(
        "example_data/pointclouds/sphere_normal_2K_mesh.obj",
        trans=ch.array([0, 0, 0]),
        rotation=ch.array([0, 0, 0]))

    # reference
    V_ref = ch.array(reference.v)
    vc_ref = ch.array(reference.vc)
    A_ref = LambertianPointLight(v=V_ref, f=reference.f, num_verts=len(
        V_ref), light_pos=ch.array([-1000, -1000, -1000]), vc=vc_ref,
        light_color=ch.array([0.9, 0, 0])) +\
        LambertianPointLight(v=V_ref, f=reference.f, num_verts=len(
            V_ref), light_pos=ch.array([1000, -1000, -1000]), vc=vc_ref,
        light_color=ch.array([0.0, 0.9, 0])) +\
        LambertianPointLight(v=V_ref, f=reference.f, num_verts=len(
            V_ref), light_pos=ch.array([-1000, 1000, -1000]), vc=vc_ref,
        light_color=ch.array([0.0, 0.0, 0.9]))
    U_ref = ProjectPoints(v=V_ref,
                          f=[w, w],
                          c=[w / 2., h / 2.],
                          k=ch.zeros(5),
                          t=ch.zeros(3),
                          rt=ch.zeros(3))
    f_ref = ColoredRenderer(vc=A_ref,
                            camera=U_ref,
                            f=reference.f,
                            bgcolor=[1.0, 1.0, 1.0],
                            frustum={
                                'width': w,
                                'height': h,
                                'near': 1,
                                'far': 20
                            })

    # target
    V_tgt = ch.array(target.v)
    vc_tgt = ch.array(target.vc)
    A_tgt = LambertianPointLight(v=V_tgt, f=target.f, num_verts=len(
        V_tgt), light_pos=ch.array([-1000, -1000, -1000]), vc=vc_tgt,
        light_color=ch.array([0.9, 0, 0])) +\
        LambertianPointLight(v=V_tgt, f=target.f, num_verts=len(
            V_tgt), light_pos=ch.array([1000, -1000, -1000]), vc=vc_tgt,
        light_color=ch.array([0.0, 0.9, 0])) +\
        LambertianPointLight(v=V_tgt, f=target.f, num_verts=len(
            V_tgt), light_pos=ch.array([-1000, 1000, -1000]), vc=vc_tgt,
        light_color=ch.array([0.0, 0.0, 0.9]))
    U_tgt = ProjectPoints(v=V_tgt,
                          f=[w, w],
                          c=[w / 2., h / 2.],
                          k=ch.zeros(5),
                          t=ch.zeros(3),
                          rt=ch.zeros(3))
    f_tgt = ColoredRenderer(vc=A_tgt,
                            camera=U_tgt,
                            f=target.f,
                            bgcolor=[1.0, 1.0, 1.0],
                            frustum={
                                'width': w,
                                'height': h,
                                'near': 1,
                                'far': 20
                            })
    # offset = ch.zeros(V_tgt.shape)
    translation, rotation = ch.array([0, 0, 6]), ch.zeros(3)
    f_tgt.v = translation + V_tgt.dot(Rodrigues(rotation))
    f_ref.v = translation + V_ref.dot(Rodrigues(rotation))

    op_mesh_target = om.read_trimesh(
        "example_data/pointclouds/sphere_normal_2K_mesh.obj")

    n_rotations = 144

    # camera positions
    for index in range(n_rotations):
        rotation[:] = np.random.rand(3) * np.pi * 2
        np.save(os.path.join(save_dir, "rot_v{:03d}".format(index)), rotation)
        img_ref = f_ref.r
        Image.fromarray((img_ref * 255).astype(np.uint8)).save(
            os.path.join(save_dir, "reference_v{:03d}.png".format(index)))
        img_tgt = f_tgt.r
        Image.fromarray((img_tgt * 255).astype(np.uint8)).save(
            os.path.join(save_dir, "target_v{:03d}.png".format(index)))

        E_raw = f_tgt - img_ref
        # E_pyr = gaussian_pyramid(E_raw, n_levels=6, normalization='size')
        free_variables = [V_tgt]
        # dogleg
        # Newton-CG
        # SLSQP
        # BFGS
        # trust-ncg
        method = "trust-ncg"
        maxiter = 30
        ch.minimize({'pyr': E_raw},
                    x0=free_variables,
                    method=method,
                    options=dict(maxiter=30),
                    callback=create_callback(f_tgt, step=index * maxiter))
        ch.minimize({'pyr': E_raw},
                    x0=free_variables,
                    method=method,
                    options=dict(maxiter=30),
                    callback=create_callback(f_tgt, step=index * maxiter))
        # is not the same?
        target.v = f_tgt.v.r.copy()
        # save mesh
        # mesh = pymesh.form_mesh(f_tgt.v.r, f_tgt.f)
        # pymesh.save_mesh(os.path.join(
        #     save_dir, "target_v{:03d}.obj".format(index)), mesh)
        point_array = op_mesh_target.points()
        point_array[:] = target.v
        np.copyto(op_mesh_target.points(), f_tgt.v.r)
        om.write_mesh(
            os.path.join(save_dir, "target_v{:03d}.obj".format(index)),
            op_mesh_target)
Пример #10
0
mesh = read_and_process_mesh("example_data/pointclouds/teapot_mesh.obj",
                             trans=ch.array([0, 0, 0]),
                             rotation=ch.array([np.pi, 0, 0]))
V_ref = ch.array(mesh.v)
# reference
A_ref = LambertianPointLight(v=V_ref, f=mesh.f, num_verts=len(V_ref), light_pos=ch.array([-1000, -1000, -1000]), vc=mesh.vc,
                             light_color=ch.array([0.9, 0, 0])) +\
    LambertianPointLight(v=V_ref, f=mesh.f, num_verts=len(V_ref), light_pos=ch.array([1000, -1000, -1000]), vc=mesh.vc,
                         light_color=ch.array([0.0, 0.9, 0])) +\
    LambertianPointLight(v=V_ref, f=mesh.f, num_verts=len(V_ref), light_pos=ch.array(
        [-1000, 1000, -1000]), vc=mesh.vc, light_color=ch.array([0.0, 0.0, 0.9]))
U_ref = ProjectPoints(v=V_ref,
                      f=[w, w],
                      c=[w / 2., h / 2.],
                      k=ch.zeros(5),
                      t=ch.zeros(3),
                      rt=ch.zeros(3))
f_ref = ColoredRenderer(vc=A_ref,
                        camera=U_ref,
                        f=mesh.f,
                        bgcolor=[1.0, 1.0, 1.0],
                        frustum={
                            'width': w,
                            'height': h,
                            'near': 1,
                            'far': 20
                        })
f_ref.v = translation + V_ref.dot(Rodrigues(rot))
Image.fromarray((f_ref.r * 255).astype(np.uint8)).save(
    os.path.join(save_dir, "opendr_ref.png"))
def world_to_cam(Pw, camera):
    from opendr.geometry import Rodrigues
    R = Rodrigues(camera.rt)
    P = Pw.dot(R.T) + camera.t
    #P = (Pw - camera.t).dot(R)
    return P