Beispiel #1
0
def rotation_matrix(angle, along, units="rad", around=None):
        from lib import transformations as tf
        if units == "rad":
            pass
        elif units == "deg":
            angle = angle / 360.0 * np.pi * 2.0
        else:
            raise UsageError()

        if around is None:
            # center = 0
            # t = np.eye(4)
            pass
        else:
            check_vector4(around)
            center = around
            # print "center=",center
            t = np.eye(4)
            t[:, 3] = center
            assert t[3, 3] == 1.
            tinv = np.eye(4)
            tinv[:, 3] = -center
            tinv[3, 3] = 1
            # print "t", t
            # print "tinv", tinv

        check_vector4(along)
        rm = tf.rotation_matrix(angle, along[0:3])

        if around is None:
            return rm
        else:
            return np.dot(np.dot(rm, tinv), t)
Beispiel #2
0
 def implicitGradient(self, p):  # -> Vector3D :
     check_vector4(p)
     tp = np.dot(self.invmatrix, p)
     g = self.base_object.implicitGradient(tp)
     g[3] = 0  # important
     v4 = np.dot(np.transpose(self.invmatrix), g)
     v4[3] = 1
     return v4
 def hessianMatrix(self, p):
     check_vector4(p)
     ha = self.a.hessianMatrix(p)
     hb = self.b.hessianMatrix(p)
     h = ha * self.afactor  +  hb * self.bfactor
     #todo
     check_matrix3(h)
     return h
 def hessianMatrix(self, p):
     check_vector4(p)
     h = np.array([[0, 0, 0], [0, 0, 0], [0, 0, 0]], ndmin=2)
     check_matrix3(h)
     #todo : array of matrix3s
     n = p.shape[0]
     ha = np.tile(h[np.newaxis, :, :], (n, 1, 1))
     return h
Beispiel #5
0
 def hessianMatrix(self, p):
     #warning: not tested
     check_vector4(p)
     tp = np.dot(self.invmatrix, p)
     h1 = self.base_object.hessianMatrix(tp)
     h = np.dot(h1, self.invmatrix)  # which one is correct?
     h = np.dot(self.invmatrix, h1)   # which one is correct?
     raise VirtualException()
     return h
Beispiel #6
0
 def implicitFunction(self, p):
     check_vector4(p)
     va = self.a.implicitFunction(p)
     vb = - self.b.implicitFunction(p)
     if va > vb:
         v = vb
     else:
         v = va
     return v
Beispiel #7
0
 def hessianMatrix(self, p):
     check_vector4(p)
     va = self.a.implicitFunction(p)
     vb = self.b.implicitFunction(p)
     if va > vb:
         h = self.b.hessianMatrix(p)
     else:
         h = self.a.hessianMatrix(p)
     check_matrix3(h)
     return h
Beispiel #8
0
 def implicitGradient(self, p):
     check_vector4(p)
     va = self.a.implicitFunction(p)
     vb = self.b.implicitFunction(p)
     if va > vb:
         grad = self.b.implicitGradient(p)
     else:
         grad = self.a.implicitGradient(p)
     check_vector4(grad)
     return grad
    def implicitFunction(self, p):
        check_vector4(p)
        va = self.a.implicitFunction(p)
        vb = self.b.implicitFunction(p)
        #v = va * self.afactor  +  vb * self.bfactor + 1

        fa = self.afactor
        fb = self.bfactor
        v = -(1 - (fa*np.exp(va)   +  fb*np.exp(vb) ) / (fa+fb))

        return v
Beispiel #10
0
    def ellipsoid_point_and_gradient(self, m, x, correctGrad, center=None):
        """ Checks if the point x is on the surface, and if the gradient is correct."""
        e = nonvec.Ellipsoid(m)
        v = e.implicitFunction(x)
        g = e.implicitGradient(x)
        check_vector4(g)
        correctScalar = 0
        assert np.abs(v - correctScalar) < TOLERANCE, ("Implicit Function's scalar value incorrect: %2.20f" % (v,))
        #assert np.allclose( g, correctGrad , atol=TOLERANCE ) , "Incorrect gradient"
        msg = "nonvec.Ellipsoid(m): "+str(e)

        (are_parallel,are_directed) = vectors_parallel_and_direction(g[0:3], correctGrad[0:3])
        self.assertTrue(are_parallel, "Incorrect gradient: not parallel "+msg)
        self.assertTrue( are_directed, "parallel but opposite directions "+msg)
Beispiel #11
0
    def implicitFunction(self, p):
        check_vector4(p)
        chosen_i = None
        v = +np.infty
        for i in range(len(self.p0)):
            p0 = self.p0[i]
            n0 = self.n0[i]
            vi = np.dot(p - p0, n0)
            if vi < v:
                v = vi
                chosen_i = i

        assert not chosen_i is None

        return v
    def implicitGradient(self, p):
        check_vector4(p)
        va = self.a.implicitFunction(p)
        vb = self.b.implicitFunction(p)
        ca = va #np.tile( va[:,np.newaxis], (1,4) )
        cb = vb #np.tile( vb[:,np.newaxis], (1,4) )

        grada = self.a.implicitGradient(p)
        gradb = self.b.implicitGradient(p)
        #not tested
        fa = self.afactor
        fb = self.bfactor
        grad = + (fa*np.exp(ca)*grada   +  fb*np.exp(cb)*gradb ) / (fa+fb)
        grad[3] = 1
        check_vector4(grad)
        return grad
        def side(x, y, z):
            p0 = (make_vector4(x, y, z) + 0.0)
            p0 = p0 / 2.0 * size
            n0 = -make_vector4(x, y, z)
            n0 = n0
            self.p0 += [p0]
            self.n0 += [n0]
            self.p0[-1][3] = 1
            self.n0[-1][3] = 1
            #print(self.p0[-1])
            check_vector4(self.p0[-1])
            check_vector4(self.n0[-1])

            def norm2(v):
                return v[0] * v[0] + v[1] * v[1] + v[2] * v[2]

            assert norm2(self.n0[-1][0:3]) - 1 == 0.0
def func_test_bisection_p(iobj, sp1, sp2, prop, max_iter_count):

    global count_converged
    global count_not_converged

    from basic_types import check_vector4
    check_vector4(sp1)
    check_vector4(sp2)
    p1 = sp1.reshape((1, 4))
    p2 = sp2.reshape((1, 4))
    f1 = iobj.implicitFunction(p1)
    f2 = iobj.implicitFunction(p2)
    print "f1,f2: ", f1, f2

    #p = bisection_prop_2(iobj, p1, p2, f1, f2, 20)
    if prop:
        converged, p, p2, jj = bisection_prop_2(iobj, p1, p2, f1, f2,
                                                max_iter_count)
        assert not p is None
    else:
        p, jj = bisection_3_standard(iobj, p1, p2, f1, f2, max_iter_count)
        converged = p is not None

    if not converged:
        count_not_converged += 1
        if prop:
            if VERBOSE:
                print "*** bisection_prop_2 did not converge. Hence the result is an interval"  # this happens
                print(p, p2, jj)
        if not prop:
            if VERBOSE:
                print "*** bisection_standard did not converge. Hence the result is None"
                print p, jj
    else:
        print(count_converged)
        count_converged += 1
        if VERBOSE:
            print "converged: p", p
            print "iteration: ", jj
        from basic_types import make_vector4_vectorized
        p4 = make_vector4_vectorized(p[0, 0], p[0, 1], p[0, 2])
        f = iobj.implicitFunction(p4)
        if VERBOSE:
            print("f(p)=", f)
            print("Total distance travelled: ", np.linalg.norm(sp1 - p),
                  np.linalg.norm(sp2 - p))
Beispiel #15
0
    def rotate(self, angle, along, units="rad"):
        if units == "rad":
            pass
        elif units == "deg":
            angle = angle / 360.0 * np.pi * 2.0
        else:
            raise UsageError()  # UsageError

        check_vector4(along)
        rm = tf.rotation_matrix(angle, along[0:3])
        self.matrix = np.dot(rm, self.matrix)
        self.invmatrix = make_inverse(self.matrix)

        #print(angle /(3.1415926536*2) * 360 )
        #print(rm)
        #print(self.matrix)
        return self
Beispiel #16
0
    def implicitGradient(self, p):
        check_vector4(p)
        chosen_i = None
        v = +np.infty
        for i in range(len(self.p0)):
            p0 = self.p0[i]
            n0 = self.n0[i]
            vi = np.dot(p - p0, n0)
            if vi < v:
                v = vi
                chosen_i = i
                grad = n0

        assert not chosen_i is None

        #not tested
        grad[3] = 1
        check_vector4(grad)
        return grad
Beispiel #17
0
    def test_ellipsoid_random_points(self):
        """Testing hundreds of random points on a sphere of size RADIUS=3"""
        for i in range(0, 100):

            RADIUS = 3
            POW = 4  # higher POW will get points more toward parallel to axes

            rcenter = make_random_vector(1000, 1.0)[0:3]
            r0 = make_random_vector(RADIUS, POW)[0:3]
            r = r0 + rcenter
            assert r.shape[0] == 3
            x = make_vector4(r[0], r[1], r[2])

            m = np.eye(4) * RADIUS
            m[0:3, 3] = rcenter[0:3]
            m[3, 3] = 1

            expected_grad = make_vector4(-r0[0], -r0[1], -r0[2])
            check_vector4(expected_grad)

            self.ellipsoid_point_and_gradient(m, x,  expected_grad)
Beispiel #18
0
def optimize_vertex(vert, iobj, radius_of_max_change):
    v = make_vector4(vert[0], vert[1], vert[2])  # inefficient
    check_vector4(v)

    #f = iobj.implicitFunction(v)
    #g = iobj.implicitGradient(v)
    #v += g * np.random.rand() * radius_of_max_change * 2
    #v[3] = 1

    iterations = 2  #14
    for i in range(iterations):

        f = iobj.implicitFunction(v)
        g = iobj.implicitGradient(v)

        tau = 0.1  # * 3
        a = 1
        z_force = -tau * a * f * g

        v += z_force
        v[3] = 1

    return v[0:3]
Beispiel #19
0
    def __init__(self, A, w, u, radius_u, radius_v, c_len):
        """ The cross section of a SimpleCylinder is always a circle.
        """
        (self.A, self.w, self.u, self.radius_u, self.radius_v, self.c_len) = \
            (A, w, u, radius_u, radius_v, c_len)

        check_vector4(w)
        check_vector4(u)
        check_vector4(A)
        assert w[3] == 1
        assert u[3] == 1
        assert A[3] == 1
        w = w[:3]
        u = u[:3]
        A = A[:3]

        v = np.cross(u[:3], w[:3])
        assert w.shape == (3,)
        assert u.shape == (3,)
        assert v.shape == (3,)
        assert A.shape == (3,)

        self.u = u[:, np.newaxis]
        self.v = v[:, np.newaxis]
        self.w = w[:, np.newaxis]
        self.A = A[:, np.newaxis]
        assert self.u.shape == (3, 1)
        assert self.v.shape == (3, 1)
        assert self.w.shape == (3, 1)
        assert self.A.shape == (3, 1)

        assert np.isscalar(radius_u)
        assert np.isscalar(radius_v)

        norm_tol = 0.00000001  # 1000 km
        assert np.abs(np.linalg.norm(w)-1.0) < norm_tol
        assert np.abs(np.linalg.norm(u)-1.0) < norm_tol
        self.UVW = np.concatenate( (self.u, self.v, self.w), axis=1 )
        self.UVW_inv = np.linalg.inv( self.UVW )
        assert self.c_len > 0
        assert self.integrity_invariant()
Beispiel #20
0
 def hessianMatrix(self, p):
     check_vector4(p)
     h = np.array([[-2, 0, 0], [0, -2, 0], [0, 0, -2]], ndmin=2)
     check_matrix3(h)
     return h
Beispiel #21
0
 def implicitGradient(self, p):
     check_vector4(p)
     grad = -2 * p
     grad[3] = 1
     check_vector4(grad)
     return grad
Beispiel #22
0
 def implicitFunction(self, p):
     check_vector4(p)
     return 1.0 - (np.dot(p[:3], p[:3]))
Beispiel #23
0
 def implicitFunction(self, p):
     check_vector4(p)
     tp = np.dot(self.invmatrix, p)
     v = self.base_object.implicitFunction(tp)
     return v
Beispiel #24
0
def numerical_gradient(iobj,
                       pos0,
                       delta_t=0.01 / 10.0 / 10.0,
                       order=5,
                       is_vectorized="unspecified"):
    #0.1 is not enough for delta_t
    assert is_vectorized != "unspecified"
    if is_vectorized:
        check_vector4(pos0)  # incorrect
        #assert False
        #check_vector4_vectorized(pos0)
        assert issubclass(type(iobj), vectorized.ImplicitFunctionVectorized)
    else:
        check_vector4(pos0)
        assert issubclass(type(iobj), nonvec.ImplicitFunctionPointwise)

    m = order  # sample points: -m,...,-1,0,1,2,...,+m

    _VERBOSE = False
    from lib import finite_diff_weights

    sample_points = range(-m, m + 1)
    n = m * 2 + 1

    x0 = 0
    findiff_weights = finite_diff_weights.weights(k=1,
                                                  x0=x0,
                                                  xs=np.array(sample_points) *
                                                  delta_t)
    del x0

    pos0_4 = repeat_vect4(1, pos0)
    pos = np.tile(pos0_4, (3 * n, 1))
    assert not issubclass(pos.dtype.type, np.integer)

    if pos.shape[0] in [1, 1718772L]:
        set_trace()

    dx = repeat_vect4(1, make_vector4(1, 0, 0))
    dy = repeat_vect4(1, make_vector4(0, 1, 0))
    dz = repeat_vect4(1, make_vector4(0, 0, 1))
    dxyz = [dx, dy, dz]

    ci = 0
    for d in range(3):
        for i in sample_points:
            dd = dxyz[d]

            if pos.shape[0] == 1 and ci == 1:
                set_trace()

            pos[ci, :] = pos[ci, :] + (dd * delta_t * float(i))
            #w[ci] = findef(i,n)
            ci += 1

    pos[:, 3] = 1

    if is_vectorized:
        v = iobj.implicitFunction(pos)  # v .shape: (3,11)
    else:
        v = np.zeros((pos.shape[0], ))
        for i in range(pos.shape[0]):
            v1 = iobj.implicitFunction(pos[i, :])  # v .shape: (3,11)
            v[i] = v1
    v3 = np.reshape(v, (3, n), order='C')  # v3 .shape: (11,)
    #print( np.diff(v, axis=0) / delta_t )
    #print( np.diff(v3, axis=1) / delta_t )

    #Lipchitz_L
    #Lipschitz constant = Lipchitz_B
    Lipchitz_B = 50
    Lipchitz_beta = 1  # order. Keep it 1
    #H\:older continuous:   |f(h)-f(0)| <= B|h|^beta
    b_h_beta = Lipchitz_B * (np.abs(delta_t)**Lipchitz_beta)
    #print("v3=",v3)
    #print("diff=",np.diff(v3, axis=1) )
    #print(b_h_beta)

    d0 = np.abs(np.diff(v3, axis=1))
    #lipschitz_condition = d <= b_h_beta
    nonsmooth_ness = d0 / (np.abs(delta_t)**Lipchitz_beta)
    #nonsmooth_ness2 = np.mean(nonsmooth_ness, axis=1)

    d = np.abs(np.diff(v3, n=1, axis=1)) / np.abs(delta_t)
    d = d - np.tile(np.mean(d, axis=1, keepdims=True), (1, d.shape[1]))
    d = np.abs(d) / np.abs(delta_t)
    d = d - np.tile(np.mean(d, axis=1, keepdims=True), (1, d.shape[1]))
    #d = np.abs(d)
    #d = np.abs( np.diff(d, n=1, axis=1) ) / np.abs(delta_t)
    #nonsmooth_ness = d / (np.abs(delta_t)**Lipchitz_beta)
    #if(np.max(np.ravel(d))) > 50:
    #    print("warning")
    #    print(nonsmooth_ness)

    #print(d)
    if (np.max(np.ravel(nonsmooth_ness))) > 100 * 10:
        print "warning: nonsmooth ",
        #print(nonsmooth_ness)  # lots of zeros and one big value
    """ Calculating the numerical derivative using finite difference (convolution with weights) """
    #convolusion
    grad_cnv = np.dot(v3, findiff_weights)

    #grad_cnv = np.reshape(grad_cnv, (1,3))[:,np.newaxis]
    #grad_cnv = np.concatenate( ( grad_cnv[np.newaxis,:], np.reshape(np.array([1]),(1,1)) ), axis=1)

    def v3_to_v14(v):
        """ Converts shape from (3,) into a (1,4) vector4 """
        assert v.ndim == 1
        return np.concatenate(
            (v[np.newaxis, :], np.reshape(np.array([1]), (1, 1))), axis=1)

    grad_cnv = v3_to_v14(grad_cnv)
    #print("weights: ",findiff_weights)

    #Detecting sharp edges (non-smooth points, i.e. corners and edges and ridges)
    if np.max(np.abs(grad_cnv)) > 100:
        pass
        #print("*******  max(grad) > 100")
        #print(np.abs(grad_cnv))
    #else:
    #    print(np.abs(grad_cnv))

    if _VERBOSE:
        #np.set_printoptions( precision=9 )
        np.set_printoptions(
            formatter={'all': lambda x: '' + ("%2.19f" % (x, ))})
    """ Calculating the numerical derivative using 'mean of diff' """
    grad_mean = np.mean(-np.diff(v3, axis=1) / delta_t, axis=1)
    if _VERBOSE:
        print("grad_mean: ", grad_mean)
        print("grad_convolusion: ", grad_cnv)

    if False:
        g = iobj.implicitGradient(pos0_4)
    if _VERBOSE:
        print("grad_analytical: ", g)

        #print( grad_cnv.shape )
        #print( g.shape )
        #print( grad_mean.shape )

        print("Errors:")
        print("conv error: ", g - grad_cnv)
        #Amazing precision: [[0.0000000000001995071 -0.0000000038590481921 0.0000000000000008882  0.0000000000000000000]]

        print("mean error: ", g - v3_to_v14(grad_mean))
        #Terrible error: [-0.262   2.12266  0 ]

        #v3 * findiff_weights

        print("to be continued")

    assert not np.any(np.isnan(grad_cnv.ravel()))
    return grad_cnv