Esempio n. 1
0
    def test_maximum(self):
        from chumpy.utils import row, col
        from chumpy import maximum

        # Make sure that when we compare the max of two *identical* numbers,
        # we get the right derivatives wrt both
        the_max = maximum(ch.Ch(1), ch.Ch(1))
        self.assertTrue(the_max.r.ravel()[0] == 1.)
        self.assertTrue(the_max.dr_wrt(the_max.a)[0, 0] == 1.)
        self.assertTrue(the_max.dr_wrt(the_max.b)[0, 0] == 1.)

        # Now test given that all numbers are different, by allocating from
        # a pool of randomly permuted numbers.
        # We test combinations of scalars and 2d arrays.
        rnd = np.asarray(np.random.permutation(np.arange(20)), np.float64)
        c1 = ch.Ch(rnd[:6].reshape((2, 3)))
        c2 = ch.Ch(rnd[6:12].reshape((2, 3)))
        s1 = ch.Ch(rnd[12])
        s2 = ch.Ch(rnd[13])

        eps = .1
        for first in [c1, s1]:
            for second in [c2, s2]:
                the_max = maximum(first, second)

                for which_to_change in [first, second]:

                    max_r0 = the_max.r.copy()
                    max_r_diff = np.max(
                        np.abs(max_r0 - np.maximum(first.r, second.r)))
                    self.assertTrue(max_r_diff == 0)
                    max_dr = the_max.dr_wrt(which_to_change).copy()
                    which_to_change.x = which_to_change.x + eps
                    max_r1 = the_max.r.copy()

                    emp_diff = (the_max.r - max_r0).ravel()
                    pred_diff = max_dr.dot(col(
                        eps * np.ones(max_dr.shape[1]))).ravel()

                    #print('comparing the following numbers/vectors:')
                    #print(first.r)
                    #print(second.r)
                    #print('empirical vs predicted difference:')
                    #print(emp_diff)
                    #print(pred_diff)
                    #print('-----')

                    max_dr_diff = np.max(np.abs(emp_diff - pred_diff))
                    #print('max dr diff: %.2e' % (max_dr_diff,))
                    self.assertTrue(max_dr_diff < 1e-14)
Esempio n. 2
0
    def on_changed(self, which):
        if not hasattr(self, '_lpl'):
            self.add_dterm('_lpl', maximum(multiply(a=multiply()), 0.0))
        if not hasattr(self, 'ldn'):
            self.ldn = LightDotNormal(self.v.r.size / 3)
        if not hasattr(self, 'vn'):
            logger.info(
                'LambertianPointLight using auto-normals. This will be slow for derivative-free computations.'
            )
            self.vn = VertNormals(f=self.f, v=self.v)
            self.vn.needs_autoupdate = True
        if 'v' in which and hasattr(
                self.vn, 'needs_autoupdate') and self.vn.needs_autoupdate:
            self.vn.v = self.v

        ldn_args = {
            k: getattr(self, k)
            for k in which if k in ('light_pos', 'v', 'vn')
        }
        if len(ldn_args) > 0:
            self.ldn.set(**ldn_args)
            self._lpl.a.a.a = self.ldn.reshape((-1, 1))

        if 'num_verts' in which or 'light_color' in which:
            # nc = self.num_channels
            # IS = np.arange(self.num_verts*nc)
            # JS = np.repeat(np.arange(self.num_verts), 3)
            # data = (row(self.light_color)*np.ones((self.num_verts, 3))).ravel()
            # mtx = sp.csc_matrix((data, (IS,JS)), shape=(self.num_verts*3, self.num_verts))
            self._lpl.a.a.b = self.light_color.reshape((1, self.num_channels))

        if 'vc' in which:
            self._lpl.a.b = self.vc.reshape((-1, self.num_channels))
Esempio n. 3
0
    def on_changed(self, which):
        if not hasattr(self, '_lpl'):
            self.add_dterm('_lpl', maximum(multiply(a=multiply()), 0.0))
        if not hasattr(self, 'ldn'):
            self.ldn = LightDotNormal(self.v.r.size/3)            
        if not hasattr(self, 'vn'):
            logger.info('LambertianPointLight using auto-normals. This will be slow for derivative-free computations.')
            self.vn = VertNormals(f=self.f, v=self.v)
            self.vn.needs_autoupdate = True
        if 'v' in which and hasattr(self.vn, 'needs_autoupdate') and self.vn.needs_autoupdate:
            self.vn.v = self.v
        
        ldn_args = {k: getattr(self, k) for k in which if k in ('light_pos', 'v', 'vn')}
        if len(ldn_args) > 0:
            self.ldn.set(**ldn_args)
            self._lpl.a.a.a = self.ldn.reshape((-1,1))

        if 'num_verts' in which or 'light_color' in which:
            # nc = self.num_channels
            # IS = np.arange(self.num_verts*nc)
            # JS = np.repeat(np.arange(self.num_verts), 3)
            # data = (row(self.light_color)*np.ones((self.num_verts, 3))).ravel()
            # mtx = sp.csc_matrix((data, (IS,JS)), shape=(self.num_verts*3, self.num_verts))
            self._lpl.a.a.b = self.light_color.reshape((1,self.num_channels))

        if 'vc' in which:
            self._lpl.a.b = self.vc.reshape((-1,self.num_channels))
Esempio n. 4
0
def lambertian_spotlight(v, vn, pos, dir, spot_exponent, camcoord=False, camera_t=None, camera_rt=None):
    """
    :param v: vertices
    :param vn: vertex normals
    :param light_pos: light position
    :param light_dir: light direction
    :param spot_exponent: spot exponent (a la opengl)
    :param camcoord: if True, then pos and dir are wrt the camera
    :param camera_t: 3-vector indicating translation of camera
    :param camera_rt: 3-vector indicating direction of camera
    :return: Vx1 array of brightness
    """

    if camcoord: # Transform pos and dir from camera to world coordinate system
        assert(camera_t is not None and camera_rt is not None)
        from opendr.geometry import Rodrigues
        rot = Rodrigues(rt=camera_rt)
        pos = rot.T.dot(pos-camera_t)
        dir = rot.T.dot(dir)

    dir = dir / ch.sqrt(ch.sum(dir**2.))
    v_minus_light = v - pos.reshape((1,3))
    v_distances = ch.sqrt(ch.sum(v_minus_light**2, axis=1))
    v_minus_light_normed = v_minus_light / v_distances.reshape((-1,1))
    cosangle = v_minus_light_normed.dot(dir.reshape((3,1)))
    light_dot_normal = ch.sum(vn*v_minus_light_normed, axis=1)
    light_dot_normal.label = 'light_dot_normal'
    cosangle.label = 'cosangle'
    result = light_dot_normal.ravel() * cosangle.ravel()**spot_exponent
    result = result / v_distances ** 2.
    result = maximum(result, 0.0)

    return result
Esempio n. 5
0
def lambertian_spotlight(v, vn, pos, dir, spot_exponent, camcoord=False, camera_t=None, camera_rt=None):
    """
    :param v: vertices
    :param vn: vertex normals
    :param light_pos: light position
    :param light_dir: light direction
    :param spot_exponent: spot exponent (a la opengl)
    :param camcoord: if True, then pos and dir are wrt the camera
    :param camera_t: 3-vector indicating translation of camera
    :param camera_rt: 3-vector indicating direction of camera
    :return: Vx1 array of brightness
    """

    if camcoord: # Transform pos and dir from camera to world coordinate system
        assert(camera_t is not None and camera_rt is not None)
        from opendr.geometry import Rodrigues
        rot = Rodrigues(rt=camera_rt)
        pos = rot.T.dot(pos-camera_t)
        dir = rot.T.dot(dir)

    dir = dir / ch.sqrt(ch.sum(dir**2.))
    v_minus_light = v - pos.reshape((1,3))
    v_distances = ch.sqrt(ch.sum(v_minus_light**2, axis=1))
    v_minus_light_normed = v_minus_light / v_distances.reshape((-1,1))
    cosangle = v_minus_light_normed.dot(dir.reshape((3,1)))
    light_dot_normal = ch.sum(vn*v_minus_light_normed, axis=1)
    light_dot_normal.label = 'light_dot_normal'
    cosangle.label = 'cosangle'
    result = light_dot_normal.ravel() * cosangle.ravel()**spot_exponent
    result = result / v_distances ** 2.
    result = maximum(result, 0.0)

    return result
Esempio n. 6
0
    def getHandJointConstraints(self, theta, isValidTheta=False):
        '''
        get constraints on the joint angles when input is theta vector itself (first 3 elems are NOT global rot)
        :param theta: Nx45 tensor if isValidTheta is False and Nx25 if isValidTheta is True
        :param isValidTheta:
        :return:
        '''

        if not isValidTheta:
            assert (theta.shape)[-1] == 45
            validTheta = theta[self.validThetaIDs[3:] - 3]
        else:
            assert (theta.shape)[-1] == len(self.validThetaIDs[3:])
            validTheta = theta

        phyConstMax = (ch.maximum(
            self.minThetaVals[self.validThetaIDs[3:]] - validTheta, 0))
        phyConstMin = (ch.maximum(
            validTheta - self.maxThetaVals[self.validThetaIDs[3:]], 0))

        return phyConstMin, phyConstMax
Esempio n. 7
0
    def getHandJointConstraintsCh(self, theta, isValidTheta=False):
        '''
        chumpy implementation of getHandJointConstraints
        :param theta: Nx48 tensor if isValidTheta is False and Nx25 if isValidTheta is True
        :param isValidTheta:
        :return:
        '''
        import chumpy as ch

        if not isValidTheta:
            assert (theta.shape)[-1] == 45
            validTheta = theta[self.validThetaIDs[3:] - 3]
        else:
            assert (theta.shape)[-1] == len(self.validThetaIDs[3:])
            validTheta = theta

        phyConstMax = (ch.maximum(
            self.minThetaVals[self.validThetaIDs[3:]] - validTheta, 0))
        phyConstMin = (ch.maximum(
            validTheta - self.maxThetaVals[self.validThetaIDs[3:]], 0))

        return phyConstMin, phyConstMax
Esempio n. 8
0
def computeGlobalAndDirectionalLighting(vn, vc, chLightAzimuth,
                                        chLightElevation, chLightIntensity,
                                        chGlobalConstant):

    # Construct point light source
    rangeMeshes = range(len(vn))
    vc_list = []
    chRotAzMat = geometry.RotateZ(a=chLightAzimuth)[0:3, 0:3]
    chRotElMat = geometry.RotateX(a=chLightElevation)[0:3, 0:3]
    chLightVector = -ch.dot(chRotAzMat, ch.dot(chRotElMat, np.array([0, 0, -1
                                                                     ])))
    for mesh in rangeMeshes:
        l1 = ch.maximum(ch.dot(vn[mesh], chLightVector).reshape((-1, 1)), 0.)
        vcmesh = vc[mesh] * (chLightIntensity * l1 + chGlobalConstant)
        vc_list = vc_list + [vcmesh]
    return vc_list
def set_pose_objs(sv,
                  cam,
                  landmarks,
                  key_vids,
                  animal=None,
                  shape_data_name=None,
                  nbetas=0,
                  kp_weights=None,
                  fix_rot=False,
                  SOLVE_FLATER=True,
                  FIX_CAM=False,
                  ONLY_KEYP=False,
                  OPT_SHAPE=True,
                  DO_FREE_SHAPE=False):

    nCameras = len(cam)
    nClips = nCameras

    params = set_params(SOLVE_FLATER, nCameras)

    pose_prior = [set_pose_prior(len(sv[0].pose.r)) for _ in range(nClips)]
    pose_prior_tail = [
        set_pose_prior_tail(len(sv[0].pose.r)) for _ in range(nClips)
    ]
    if OPT_SHAPE:
        shape_prior = [
            set_shape_prior(DO_FREE_SHAPE, animal, shape_data_name)
            for _ in range(nClips)
        ]

    # indices with no prior
    noprior_ind = ~pose_prior[0].use_ind
    noprior_ind[:3] = False

    limit_prior = [set_limit_prior(len(sv[0].pose.r)) for _ in range(nClips)]

    init_rot = [sv[ic].pose[:3].r.copy() for ic in range(nClips)]
    init_trans = [sv[ic].trans.r.copy() for ic in range(nClips)]
    init_pose = [sv[ic].pose.r.copy() for ic in range(nClips)]

    # Setup keypoint projection error with multi verts:
    j2d = [None] * nCameras
    assignments = [None] * nCameras
    num_points = [None] * nCameras
    use_ids = [None] * nCameras
    visible_vids = [None] * nCameras
    all_vids = [None] * nCameras

    for i in range(nCameras):
        visible = landmarks[i][:, 2].astype(bool)

        use_ids[i] = [
            id for id in np.arange(landmarks[i].shape[0]) if visible[id]
        ]
        visible_vids[i] = np.hstack([key_vids[i][id] for id in use_ids[i]])

        group = np.hstack([
            index * np.ones(len(key_vids[i][row_id]))
            for index, row_id in enumerate(use_ids[i])
        ])
        assignments[i] = np.vstack(
            [group == j for j in np.arange(group[-1] + 1)])
        num_points[i] = len(use_ids[i])

        all_vids[i] = visible_vids[i]
        cam[i].v = sv[i][all_vids[i], :]
        j2d[i] = landmarks[i][use_ids[i], :2]

        if kp_weights is None:
            kp_weights = np.ones((landmarks[i].shape[0], 1))

    def kp_proj_error(i, w, sigma):
        return w * kp_weights[use_ids[i]] * GMOf(
            ch.vstack([
                cam[i][choice] if np.sum(choice) == 1 else cam[i][choice].mean(
                    axis=0) for choice in assignments[i]
            ]) - j2d[i], sigma) / np.sqrt(num_points[i])

    objs = {}
    for i in range(nCameras):
        objs['kp_proj_' + str(i)] = kp_proj_error(i, params['k_kp_term'],
                                                  params['k_robust_sig'])
        if not ONLY_KEYP:
            objs['trans_init_' +
                 str(i)] = params['k_trans_term'] * (sv[i].trans -
                                                     init_trans[i])

        if not ONLY_KEYP:
            if fix_rot:
                objs['fix_rot_' +
                     str(i)] = params['k_rot_term'] * (sv[i].pose[:3] -
                                                       init_rot[i])
        if OPT_SHAPE:
            if (i > 0):
                objs['betas_var_' +
                     str(i)] = params['betas_var'] * ch.abs(sv[i - 1].betas -
                                                            sv[i].betas)
            objs['shape_prior_' +
                 str(i)] = shape_prior[i](sv[i].betas) / np.sqrt(nbetas)

    if not FIX_CAM:
        for i in range(nCameras):
            objs['feq_' + str(i)] = 1e3 * (cam[i].f[0] - cam[i].f[1])
            objs['fpos_' + str(i)] = 1e3 * ch.maximum(0, 500 - cam[i].f[0])
        if not SOLVE_FLATER:
            for i in range(nCameras):
                objs['freg_' + str(i)] = 9 * 1e2 * (cam[i].f[0] - 3000) / 1000.
                objs['cam_t_pos_' +
                     str(i)] = 1e3 * ch.maximum(0, 0.01 - sv[i].trans[2])
                del objs['trans_init_' + str(i)]

    num_pose_prior = len(pose_prior[0](sv[0].pose))
    num_limit_prior = len(limit_prior[0](sv[0].pose))

    if not ONLY_KEYP:
        if np.sum(noprior_ind) > 0:
            objs['rest_poseprior_' + str(i)] = params['k_rest_pose_term'] * (
                sv[i].pose[noprior_ind] - init_pose[noprior_ind]) / np.sqrt(
                    len(sv[i].pose[noprior_ind]))
        for i in range(nClips):
            objs['pose_limit_' +
                 str(i)] = params['k_limit_term'] * limit_prior[i](
                     sv[i].pose) / np.sqrt(num_limit_prior)
            objs['pose_prior_' +
                 str(i)] = pose_prior[i](sv[i].pose) / np.sqrt(num_pose_prior)
            objs['pose_prior_tail_' + str(i)] = 2.0 * pose_prior_tail[i](
                sv[i].pose) / np.sqrt(num_pose_prior)

    return objs, params, j2d
def estimate_global_pose(landmarks,
                         key_vids,
                         model,
                         cam,
                         img,
                         fix_t=False,
                         viz=False,
                         SOLVE_FLATER=True):
    '''
    Estimates the global rotation and translation.
    only diff in estimate_global_pose from single_frame_ferrari is that all animals have the same kp order.
    '''
    # Redefining part names..
    part_names = [
        'leftEye', 'rightEye', 'chin', 'frontLeftFoot', 'frontRightFoot',
        'backLeftFoot', 'backRightFoot', 'tailStart', 'frontLeftKnee',
        'frontRightKnee', 'backLeftKnee', 'backRightKnee', 'leftShoulder',
        'rightShoulder', 'frontLeftAnkle', 'frontRightAnkle', 'backLeftAnkle',
        'backRightAnkle', 'neck', 'TailTip'
    ]

    # Use shoulder to "knee"(elbow) distance. also tail to "knee" if available.
    use_names = [
        'neck', 'leftShoulder', 'rightShoulder', 'backLeftKnee',
        'backRightKnee', 'tailStart', 'frontLeftKnee', 'frontRightKnee'
    ]
    use_ids = [part_names.index(name) for name in use_names]
    # These might not be visible
    visible = landmarks[:, 2].astype(bool)
    use_ids = [id for id in use_ids if visible[id]]
    if len(use_ids) < 3:
        print('Frontal?..')
        use_names += [
            'frontLeftAnkle', 'frontRightAnkle', 'backLeftAnkle',
            'backRightAnkle'
        ]
        model.pose[1] = np.pi / 2

    init_t = estimate_translation(landmarks, key_vids, cam.f[0].r, model)

    use_ids = [part_names.index(name) for name in use_names]
    use_ids = [id for id in use_ids if visible[id]]

    # Setup projection error:
    all_vids = np.hstack([key_vids[id] for id in use_ids])
    cam.v = model[all_vids]

    keypoints = landmarks[use_ids, :2].astype(float)

    # Duplicate keypoints for the # of vertices for that kp.
    num_verts_per_kp = [len(key_vids[row_id]) for row_id in use_ids]
    j2d = np.vstack([
        np.tile(kp, (num_rep, 1))
        for kp, num_rep in zip(keypoints, num_verts_per_kp)
    ])

    assert (cam.r.shape == j2d.shape)

    # SLOW but correct method!!
    # remember which ones belongs together,,
    group = np.hstack([
        index * np.ones(len(key_vids[row_id]))
        for index, row_id in enumerate(use_ids)
    ])
    assignments = np.vstack([group == i for i in np.arange(group[-1] + 1)])
    num_points = len(use_ids)
    proj_error = (ch.vstack([
        cam[choice] if np.sum(choice) == 1 else cam[choice].mean(axis=0)
        for choice in assignments
    ]) - keypoints) / np.sqrt(num_points)

    # Fast but not matching average:
    # Normalization weight
    j2d_norm_weights = np.sqrt(
        1. / len(use_ids) *
        np.vstack([1. / num * np.ones((num, 1)) for num in num_verts_per_kp]))
    proj_error_fast = j2d_norm_weights * (cam - j2d)

    if fix_t:
        obj = {'cam': proj_error_fast}
    else:
        obj = {
            'cam': proj_error_fast,
            'cam_t': 1e1 * (model.trans[2] - init_t[2])
        }

    # Only estimate body orientation
    if fix_t:
        free_variables = [model.pose[:3]]
    else:
        free_variables = [model.trans, model.pose[:3]]

    if not SOLVE_FLATER:
        obj['feq'] = 1e3 * (cam.f[0] - cam.f[1])
        # So it's under control
        obj['freg'] = 1e1 * (cam.f[0] - 3000) / 1000.
        # here without this cam.f goes negative.. (asking margin of 500)
        obj['fpos'] = ch.maximum(0, 500 - cam.f[0])
        # cam t also has to be positive!
        obj['cam_t_pos'] = ch.maximum(0, 0.01 - model.trans[2])
        del obj['cam_t']
        free_variables.append(cam.f)

    if viz:
        import matplotlib.pyplot as plt
        plt.ion()

        def on_step(_):
            plt.figure(1, figsize=(5, 5))
            plt.cla()
            plt.imshow(img[:, :, ::-1])
            img_here = render_mesh(Mesh(model.r, model.f), img.shape[1],
                                   img.shape[0], cam)
            plt.imshow(img_here)
            plt.scatter(j2d[:, 0], j2d[:, 1], c='w')
            plt.scatter(cam.r[:, 0], cam.r[:, 1])
            plt.draw()
            plt.pause(1e-3)
            if 'feq' in obj:
                print('flength %.1f %.1f, z %.f' %
                      (cam.f[0], cam.f[1], model.trans[2]))
    else:
        on_step = None

    from time import time
    t0 = time()
    init_angles = [[0, 0, 0]]  #, [1.5,0,0], [1.5,-1.,0]]
    scores = np.zeros(len(init_angles))
    for i, angle in enumerate(init_angles):
        # Init translation
        model.trans[:] = init_t
        model.pose[:3] = angle
        ch.minimize(obj,
                    x0=free_variables,
                    method='dogleg',
                    callback=on_step,
                    options={
                        'maxiter': 100,
                        'e_3': .0001
                    })
        scores[i] = np.sum(obj['cam'].r**2.)
    j = np.argmin(scores)
    model.trans[:] = init_t
    model.pose[:3] = init_angles[j]
    ch.minimize(obj,
                x0=free_variables,
                method='dogleg',
                callback=on_step,
                options={
                    'maxiter': 100,
                    'e_3': .0001
                })

    print('Took %g' % (time() - t0))

    #import pdb; pdb.set_trace()

    if viz:
        dist = np.mean(model.r, axis=0)[2]
        img_here = render_mesh(Mesh(model.r, model.f), img.shape[1],
                               img.shape[0], cam)
        plt.imshow(img[:, :, ::-1])
        plt.imshow(img_here)

    return model.pose[:3].r, model.trans.r
 def __call__(self, x):
     zeros = np.zeros_like(x[self.prefix:])
     res = ch.maximum(
         x[self.prefix:] - self.max_values, zeros) + ch.maximum(
             self.min_values - x[self.prefix:], zeros)
     return res
Esempio n. 12
0
def compute_cost(params,
                 p0,
                 u_fwd,
                 v_fwd,
                 A_reference,
                 mu_reference,
                 mu_refine,
                 x,
                 y,
                 sigma=1.0,
                 optimize_H=True,
                 optimize_q=False,
                 optimize_B=True):
    """ Compute structure matching cost using chumpy
    
    Parameters:
    x[0] - x[7]: Coordinates of corner points in second frame
    x[8],x[9]: Q
    x[10]: B
    
    p1 : Coordinates of corner points in first frame
    
    We optimize over all parameters
    """

    params_ch = ch.array(params)

    # Extract corner points
    p1 = params_ch[:8].reshape((4, 2))
    q = params_ch[8:10]
    B = params_ch[10]

    # Convert the ones we do not want to
    if not optimize_B:
        B = B()
    if not optimize_H:
        p1 = np.array(p1)
    if not optimize_q:
        q = np.array(q)

    # Compute unity vectors towards q
    vx = q[0] - x
    vy = q[1] - y
    nrm = ch.maximum(1.0, ch.sqrt((vx**2 + vy**2)))
    vx /= nrm
    vy /= nrm

    if not optimize_q:
        vx = np.copy(vx)
        vy = np.copy(vy)

    # Compute differentiable homography (mapping I1 to I0)
    H_inv = chumpy_get_H(p1, p0)
    if not optimize_H:
        H_inv = np.copy(H_inv)

    # Remove differentiable homography from backward flow
    x_new = x + u_fwd
    y_new = y + v_fwd

    D = H_inv[2, 0] * x_new + H_inv[2, 1] * y_new + H_inv[2, 2]
    u_parallax = (H_inv[0, 0] * x_new + H_inv[0, 1] * y_new +
                  H_inv[0, 2]) / D - x
    v_parallax = (H_inv[1, 0] * x_new + H_inv[1, 1] * y_new +
                  H_inv[1, 2]) / D - y
    r = u_parallax * vx + v_parallax * vy

    # Compute A estimates
    A_refined = r / (B * (r / mu_refine - nrm / mu_refine))

    # The data we want to match
    z_match = A_reference * (mu_refine / mu_reference)
    z = A_refined

    #
    # Use Geman-Mcclure error
    #

    err_per_px = (z - z_match)**2
    err = (sigma * err_per_px / (sigma**2 + err_per_px)).sum()
    # Lorentzian
    # err = (sigma * ch.log(1+0.5 * err_per_px/(sigma**2))).sum()
    derr = err.dr_wrt(params_ch)

    return err, np.copy(derr).flatten()