Example #1
0
def almost_equal3(a, b, TOLERANCE):
    assert not np.any(np.isnan(a.ravel()))
    assert not np.any(np.isinf(b.ravel()))
    assert not issubclass(a.dtype.type, np.integer)
    check_vector3(a)
    check_vector3(b)
    return np.sum(np.abs(a - b)) < TOLERANCE
Example #2
0
 def implicitGradient(self, p):
     check_vector3(p)
     p = np.concatenate((p, np.ones((p.shape[0], 1))), axis=1)
     tp = np.dot(self.invmatrix, p)
     g = self.base_object.implicitGradient(tp)
     v3 = np.dot(np.transpose(self.invmatrix), g)
     return v3
Example #3
0
def numerical_gradient(iobj, pos0, delta_t=0.01 / 10.0 / 10.0, order=5):

    check_vector3(pos0)

    assert issubclass(type(iobj), vector3.ImplicitFunction)

    m = order  # sample points: -m,...,-1,0,1,2,...,+m

    sample_points = range(-m, m + 1)
    n = m * 2 + 1

    x0 = 0
    findiff_weights = weights(k=1, x0=x0, xs=np.array(sample_points) * delta_t)

    pos = repeat_vect3(1, pos0)
    pos3 = np.tile(pos, (3 * n, 1))

    assert not issubclass(pos.dtype.type, np.integer)

    dx = repeat_vect3(1, make_vector3(1, 0, 0))
    dy = repeat_vect3(1, make_vector3(0, 1, 0))
    dz = repeat_vect3(1, make_vector3(0, 0, 1))
    dxyz = [dx, dy, dz]

    ci = 0
    for d in range(3):
        for i in sample_points:
            dd = dxyz[d]
            pos3[ci, :] = pos3[ci, :] + (dd * delta_t * float(i))
            ci += 1

    v = iobj.implicitFunction(pos3)

    v3 = np.reshape(v, (3, n), order='C')

    Lipchitz_beta = 1  # order. Keep it 1

    d0 = np.abs(np.diff(v3, axis=1))

    nonsmooth_ness = d0 / (np.abs(delta_t)**Lipchitz_beta)

    d = np.abs(np.diff(v3, n=1, axis=1)) / np.abs(delta_t)
    d = d - np.tile(np.mean(d, axis=1, keepdims=True), (1, d.shape[1]))
    d = np.abs(d) / np.abs(delta_t)
    d = d - np.tile(np.mean(d, axis=1, keepdims=True), (1, d.shape[1]))

    if (np.max(np.ravel(nonsmooth_ness))) > 100 * 10:
        print "warning: nonsmooth ",
    """ Calculating the numerical derivative using finite difference (convolution with weights) """
    # convolusion
    grad_cnv = np.dot(v3, findiff_weights)

    # Detecting sharp edges (non-smooth points, i.e. corners and edges and ridges)
    if np.max(np.abs(grad_cnv)) > 100:
        pass

    return grad_cnv.reshape(1, 3)
Example #4
0
    def rotate(self, angle, along, units="rad"):
        if units == "rad":
            pass
        elif units == "deg":
            angle = angle / 360.0 * np.pi * 2.0
        else:
            raise ValueError()

        check_vector3(along)
        rm = tf.rotation_matrix(angle, along)
        self.matrix = np.dot(rm, self.matrix)
        self.invmatrix = make_inverse(self.matrix)

        return self
Example #5
0
        def side(x, y, z):
            p0 = make_vector3(x, y, z)
            p0 = p0 / 2.0 * size
            n0 = -make_vector3(x, y, z)
            n0 = n0
            self.p0 += [p0]
            self.n0 += [n0]
            # print(self.p0[-1])
            check_vector3(self.p0[-1])
            check_vector3(self.n0[-1])

            def norm2(v):
                return v[0] * v[0] + v[1] * v[1] + v[2] * v[2]

            assert norm2(self.n0[-1]) - 1 == 0.0
Example #6
0
    def implicitFunction(self, p):
        check_vector3(p)
        chosen_i = None
        v = +np.infty
        for i in range(len(self.p0)):
            p0 = self.p0[i]
            n0 = self.n0[i]
            vi = np.dot(p - p0, n0)
            if vi < v:
                v = vi
                chosen_i = i

        assert chosen_i is not None

        return v
Example #7
0
    def implicitGradient(self, p):
        check_vector3(p)
        chosen_i = None
        v = +np.infty
        for i in range(len(self.p0)):
            p0 = self.p0[i]
            n0 = self.n0[i]
            vi = np.dot(p - p0, n0)
            if vi < v:
                v = vi
                chosen_i = i
                grad = n0

        assert chosen_i is not None
        check_vector3(grad)
        return grad
Example #8
0
def numerical_gradient(iobj, pos0, delta_t=0.01/10.0/10.0, order=5):

    check_vector3(pos0)
    assert issubclass(type(iobj), vector3.ImplicitFunction)
    m = order

    _VERBOSE = False
    import finite_diff_weights

    sample_points = range(-m, m+1)
    n = m*2+1

    x0 = 0
    findiff_weights = finite_diff_weights.weights(k=1, x0=x0, xs=np.array(sample_points) * delta_t)

    pos0_3 = repeat_vect3(1, pos0)
    pos = np.tile(pos0_3, (3*n, 1))
    assert not issubclass(pos.dtype.type, np.integer)

    dx = repeat_vect3(1, make_vector3(1, 0, 0))
    dy = repeat_vect3(1, make_vector3(0, 1, 0))
    dz = repeat_vect3(1, make_vector3(0, 0, 1))
    dxyz = [dx, dy, dz]

    ci = 0
    for d in range(3):
        for i in sample_points:
            dd = dxyz[d]
            pos[ci, :] = pos[ci, :] + (dd * delta_t * float(i))
            ci += 1

    v = iobj.implicitFunction(pos)
    v3 = np.reshape(v, (3, n), order='C')

    Lipchitz_beta = 1

    d0 = np.abs(np.diff(v3, axis=1))
    nonsmooth_ness = d0 / (np.abs(delta_t)**Lipchitz_beta)

    d = np.abs(np.diff(v3, n=1, axis=1)) / np.abs(delta_t)
    d = d - np.tile(np.mean(d, axis=1, keepdims=True), (1, d.shape[1]))
    d = np.abs(d) / np.abs(delta_t)
    d = d - np.tile(np.mean(d, axis=1, keepdims=True), (1, d.shape[1]))

    if(np.max(np.ravel(nonsmooth_ness))) > 100*10:
        print "warning: nonsmooth ",

    """ Calculating the numerical derivative using finite difference (convolution with weights) """

    grad_cnv = np.dot(v3, findiff_weights)

    if np.max(np.abs(grad_cnv)) > 100:
        pass

    if _VERBOSE:
        np.set_printoptions(formatter={'all': lambda x: ''+("%2.19f" % (x,))})

    """ Calculating the numerical derivative using 'mean of diff' """
    grad_mean = np.mean(-np.diff(v3, axis=1) / delta_t, axis=1)
    if _VERBOSE:
        sys.stderr.write("grad_mean: ", grad_mean)
        sys.stderr.write("grad_convolusion: ", grad_cnv)

    if False:
        g = iobj.implicitGradient(pos0_3)
    if _VERBOSE:
        sys.stderr.write("grad_analytical: ", g)

        sys.stderr.write("Errors:")
        sys.stderr.write("conv error: ", g - grad_cnv)

        sys.stderr.write("to be continued")

    return grad_cnv
Example #9
0
 def implicitGradient(self, p):
     check_vector3(p)
     grad = -2 * p
     check_vector3(grad)
     return grad
Example #10
0
 def implicitFunction(self, p):
     check_vector3(p)
     return 1.0 - (np.dot(p, p))
Example #11
0
 def implicitFunction(self, p):
     check_vector3(p)
     p = np.concatenate((p, np.ones((p.shape[0], 1))), axis=1)
     tp = np.dot(self.invmatrix, p)
     v = self.base_object.implicitFunction(tp)
     return v