Exemplo n.º 1
0
def lrotmin(p):
    if isinstance(p, np.ndarray):
        p = p.ravel()[3:]
        return np.concatenate([(cv2.Rodrigues(np.array(pp))[0] - np.eye(3)).ravel() for pp in p.reshape((-1, 3))]).ravel()
    if p.ndim != 2 or p.shape[1] != 3:
        p = p.reshape((-1, 3))
    p = p[1:]
    return ch.concatenate([(Rodrigues(pp) - ch.eye(3)).ravel() for pp in p]).ravel()
Exemplo n.º 2
0
def lrotmin(p): 
    if isinstance(p, np.ndarray):
        p = p.ravel()[3:]
        return np.concatenate([(cv2.Rodrigues(np.array(pp))[0]-np.eye(3)).ravel() for pp in p.reshape((-1,3))]).ravel()        
    if p.ndim != 2 or p.shape[1] != 3:
        p = p.reshape((-1,3))
    p = p[1:]
    return ch.concatenate([(Rodrigues(pp)-ch.eye(3)).ravel() for pp in p]).ravel()
Exemplo n.º 3
0
    def test_sum_mean_std_var(self):
        for fn in [ch.sum, ch.mean, ch.var, ch.std]:

            # Create fake input and differences in input space
            data1 = ch.ones((3, 4, 7, 2))
            data2 = ch.array(data1.r + .1 *
                             np.random.rand(data1.size).reshape(data1.shape))
            diff = data2.r - data1.r

            # Compute outputs
            result1 = fn(data1, axis=2)
            result2 = fn(data2, axis=2)

            # Empirical and predicted derivatives
            gt = result2.r - result1.r
            pred = result1.dr_wrt(data1).dot(diff.ravel()).reshape(gt.shape)

            #print(np.max(np.abs(gt - pred)))

            if fn in [ch.std, ch.var]:
                self.assertTrue(1e-2 > np.max(np.abs(gt - pred)))
            else:
                self.assertTrue(1e-14 > np.max(np.abs(gt - pred)))
                # test caching
                dr0 = result1.dr_wrt(data1)
                data1[:] = np.random.randn(data1.size).reshape(data1.shape)
                self.assertTrue(
                    result1.dr_wrt(data1) is
                    dr0)  # changing values shouldn't force recompute
                result1.axis = 1
                self.assertTrue(result1.dr_wrt(data1) is not dr0)

        self.assertEqual(
            ch.mean(ch.eye(3), axis=1).ndim,
            np.mean(np.eye(3), axis=1).ndim)
        self.assertEqual(
            ch.mean(ch.eye(3), axis=0).ndim,
            np.mean(np.eye(3), axis=0).ndim)
        self.assertEqual(
            ch.sum(ch.eye(3), axis=1).ndim,
            np.sum(np.eye(3), axis=1).ndim)
        self.assertEqual(
            ch.sum(ch.eye(3), axis=0).ndim,
            np.sum(np.eye(3), axis=0).ndim)
Exemplo n.º 4
0
        dd[s] = ch.array(dd[s])
    else:
        print type(dd[s])

dd['v_shaped'] = dd['shapedirs'].dot(dd['betas'])+dd['v_template']
v_shaped = dd['v_shaped']

J_tmpx = MatVecMult(dd['J_regressor'], v_shaped[:,0])
J_tmpy = MatVecMult(dd['J_regressor'], v_shaped[:,1])
J_tmpz = MatVecMult(dd['J_regressor'], v_shaped[:,2])
dd['J'] = ch.vstack((J_tmpx, J_tmpy, J_tmpz)).T

if dd['pose'].ndim != 2 or p.shape[1] != 3:
    p = dd['pose'].reshape((-1,3))
    p = p[1:]
    c= ch.concatenate([(Rodrigues(pp)-ch.eye(3)).ravel() for pp in p]).ravel()

dd['v_posed'] = v_shaped + dd['posedirs'].dot(c)

args = {
        'pose': dd['pose'],
        'v': dd['v_posed'],
        'J': dd['J'],
        'weights': dd['weights'],
        'kintree_table': dd['kintree_table'],
        'xp': ch,
        'want_Jtr': True,
        'bs_style': dd['bs_style']
}

pose=args['pose']
Exemplo n.º 5
0
import chumpy as ch
import numpy as np
import chumpy as ch
from os.path import join

from smpl_webuser.serialization import load_model
from fitting.landmarks import load_embedding, landmark_error_3d
from fitting.util import load_binary_pickle, write_simple_obj, safe_mkdir, mat_save
import scipy.io as sio
from mpl_toolkits.mplot3d import Axes3D
import matplotlib
matplotlib.use("qt4agg")
import matplotlib.pyplot
import myplot.vtkplot as vp
import quaternion

x1, x2, x3, x4 = ch.eye(10), ch.array(1), ch.array(5), ch.array(10)
print "model.trans:"
print x1
y = x1 * (x2 - x3) + x4
print x1, x2, x3, x4
print y
print y.dr_wrt(x2)


def kk():
    hua = 1
    sddd = 2


kk()