def implicitFunction(self, xa, return_grad=False): check_vector3_vectorized(xa) assert self.w.shape == (3, 1) assert self.A.shape == (3, 1) x = xa count = x.shape[0] assert x.shape == (count, 3) aa = np.tile(np.transpose(self.A), (count, 1)) assert x.shape == (count, 3) assert aa.shape == (count, 3) t_ = np.dot(x - aa, self.w) # Nx1 assert t_.shape == (count, 1) t = t_[:, 0] # assert t.shape == (count, ) t_arr_1xN = t.reshape((1, count)) assert t_arr_1xN.shape == (1, count) p = np.transpose(aa) + np.dot(self.w, t_arr_1xN) # 3x1 * 1xcount assert p.shape == (3, count) assert x.shape == (count, 3) assert self.radius_u == self.radius_v r = np.linalg.norm(x - np.transpose(p), ord=2, axis=1) assert r.shape == (count, ) t0 = t t1 = self.c_len - t r_ = self.radius_u - r m3 = np.concatenate( (t0[:, np.newaxis], t1[:, np.newaxis], r_[:, np.newaxis]), axis=1) fval = np.min(m3, axis=1) if not return_grad: return fval else: c_t0 = np.logical_and(t0 <= t1, t0 <= r_) c_t1 = np.logical_and(t1 <= t0, t1 <= r_) c_r = np.logical_and(r_ <= t0, r_ <= t1) assert c_t0.ndim == 1 assert c_t1.ndim == 1 c_t0 = np.tile(c_t0[:, np.newaxis], (1, 3)) c_t1 = np.tile(c_t1[:, np.newaxis], (1, 3)) c_r = np.tile(c_r[:, np.newaxis], (1, 3)) grad_t0 = np.tile(self.w[np.newaxis, :, 0], (count, 1)) grad_t1 = np.tile(-self.w[np.newaxis, :, 0], (count, 1)) grad_r = np.transpose(p) - x # print c_t0.shape, "c_t0" # print c_t1.shape, "c_t1" # print c_r.shape, "c_r" # print grad_r.shape, "g_r" # print grad_t1.shape, "g_t1" a = (c_t0) * grad_t0 b = (c_t1) * grad_t1 c = (c_r) * grad_r g3 = a + b + c check_vector3_vectorized(g3) return fval, g3
def implicitGradient(self, p): check_vector3_vectorized(p) sides = 6 na = np.zeros((sides, 3)) n = p.shape[0] temp = np.zeros((n, sides)) for i in range(len(self.p0)): p0 = self.p0[i] n0 = self.n0[i] sub = p - np.tile(p0[np.newaxis, :], (n, 1)) vi = np.dot(sub, n0) temp[:, i] = vi na[i, :] = n0 ia = np.argmin(temp, axis=1) assert ia.shape == (n, ) g = na[ia, :] check_vector3_vectorized(g) return g
def implicitFunction(self, p): check_vector3_vectorized(p) N = p.shape[0] # print "self.lamda", self.lamda theta = p[:, 2] * self.lamda # print theta.shape # assert theta.shape == (N,) ca = np.cos(theta) sa = np.sin(theta) # print theta.shape, "theta" # print theta p2 = np.concatenate(( ca[:, np.newaxis] * p[:, 0, np.newaxis] - sa[:, np.newaxis] * p[:, 1, np.newaxis], sa[:, np.newaxis] * p[:, 0, np.newaxis] + ca[:, np.newaxis] * p[:, 1, np.newaxis], p[:, 2, np.newaxis], ), axis=1) v = self.base_object.implicitFunction(p2) check_scalar_vectorized(v) return v
def implicitGradient(self, x): check_vector3_vectorized(x) count = x.shape[0] g = np.zeros((count, 3)) for i in range(x.shape[0]): v = x[i, 0:3] g[i, :] = numerical_gradient(self, v) return g
def implicitFunction(self, p): check_vector3_vectorized(p) va = self.a.implicitFunction(p) vb = self.b.implicitFunction(p) c = 1.0 - np.greater(va, vb) v = va * c + vb * (1 - c) check_scalar_vectorized(v) return v
def implicitFunction(self, p): check_vector3_vectorized(p) p = np.concatenate((p, np.ones((p.shape[0], 1))), axis=1) tp = np.dot(self.invmatrix, np.transpose(p)) tp = np.transpose(tp) tp = tp[:, :3] v = self.base_object.implicitFunction(tp) check_scalar_vectorized(v) return v
def implicitFunction(self, pa): check_vector3_vectorized(pa) pa = np.concatenate((pa, np.ones((pa.shape[0], 1))), axis=1) tp = np.dot(self.invmatrix, np.transpose( pa)) # inefficient. todo: multiply from right => will be efficient tp = np.transpose(tp) tp = tp[:, :3] v = self.sphere.implicitFunction(tp) check_scalar_vectorized(v) return v
def implicitFunction(self, p): check_vector3_vectorized(p) va = self.a.implicitFunction(p) vb = self.b.implicitFunction(p) fa = self.afactor fb = self.bfactor v = -(1 - (fa*np.exp(va) + fb*np.exp(vb)) / (fa+fb)) check_scalar_vectorized(v) return v
def compute_centroid_gradients(centroids, iobj, normalise=True): centroids = centroids[:, :3] assert centroids is not None check_vector3_vectorized(centroids) centroid_gradients = iobj.implicitGradient(centroids) assert not np.any(np.isnan(centroid_gradients)) assert not np.any(np.isinf(centroid_gradients)) if normalise: centroid_normals = normalize_vector3_vectorized(centroid_gradients) return centroid_normals else: return centroid_gradients
def implicitFunction(self, p): check_vector3_vectorized(p) sides = len(self.p0) n = p.shape[0] temp = np.zeros((n, sides)) for i in range(sides): p0 = self.p0[i] n0 = self.n0[i] sub = p - np.tile(p0[np.newaxis, :], (n, 1)) vi = np.dot(sub, n0) temp[:, i] = vi va = np.amin(temp, axis=1) return va
def implicitGradient(self, p): check_vector3_vectorized(p) va = self.a.implicitFunction(p) vb = self.b.implicitFunction(p) c = np.greater(va, vb) c = np.tile(np.expand_dims(c, axis=1), (1, 3)) assert c.shape[1:] == (3, ) grada = self.a.implicitGradient(p) gradb = self.b.implicitGradient(p) grad = grada * c + gradb * (1 - c) check_vector3_vectorized(grad) return grad
def implicitGradient(self, p): check_vector3_vectorized(p) va = self.a.implicitFunction(p) vb = self.b.implicitFunction(p) ca = np.tile(va[:, np.newaxis], (1, 3)) cb = np.tile(vb[:, np.newaxis], (1, 3)) grada = self.a.implicitGradient(p) gradb = self.b.implicitGradient(p) # not tested fa = self.afactor fb = self.bfactor grad = + (fa*np.exp(ca)*grada + fb*np.exp(cb)*gradb) / (fa+fb) check_vector3_vectorized(grad) return grad
def implicitFunction(self, xa): check_vector3_vectorized(xa) assert self.w.shape == (3, 1) assert self.A.shape == (3, 1) x = xa count = x.shape[0] assert x.shape == (count, 3) aa = np.tile(np.transpose(self.A), (count, 1)) assert x.shape == (count, 3) assert aa.shape == (count, 3) t = np.dot(x - aa, self.w.ravel()) assert t.shape == (count,) t1 = np.dot(x - aa, self.w).reshape((1, count)) assert t1.shape == (1, count) p = np.transpose(aa) + np.dot(self.w, t1) # 3x1 * 1xcount assert p.shape == (3, count) assert x.shape == (count, 3) ab = np.dot(self.UVW_inv, np.transpose(x) - p) assert ab.shape == (3, count) theta = np.arctan2(ab[1, :], ab[0, :]) r = np.linalg.norm(x - np.transpose(p), ord=2, axis=1) assert r.shape == (count,) print('----------------------') print(self.UVW) print(self.UVW_inv) print(np.transpose(x) - p) print(ab) print(ab[1, :]) print(ab[0, :]) print(theta) inside_ness = t / self.slen inside_ness = 1 - 2 * np.abs(inside_ness-0.5) inside_ness = (inside_ness > 0)*1.0 pi2 = np.pi*2 def phi(x): return np.abs(2*(x - np.floor(x)) - 1.0)*2.0 - 1.0 return (-r + self.r0 + self.delta * phi(t / self.twist_rate - theta/pi2 + self.phi0))*inside_ness
def visualise_gradients(mlab, pos, iobj, arrow_size): lm = arrow_size # 1. # STEPSIZE pos3 = pos pnormals = -iobj.implicitGradient(pos3) pnormals = normalize_vector4_vectorized(pnormals) check_vector3_vectorized(pos3) xyz = pos3 uvw = pnormals[:, 0:3] / 2. xx, yy, zz = xyz[:, 0], xyz[:, 1], xyz[:, 2] uu, vv, ww = uvw[:, 0], uvw[:, 1], uvw[:, 2] mlab.quiver3d(xx, yy, zz, uu, vv, ww, color=(0, 0, 0), scale_factor=np.abs(lm), line_width=0.5)
def ellipsoid_point_and_gradient_vectorized(self, m, xa, correctGrad, center=None): """ Checks if the point x is on the surface, and if the gradient is correct.""" e = vector3.Ellipsoid(m) msg = "Ellipsoid(m): " + str(e) va = e.implicitFunction(xa) ga = e.implicitGradient(xa) check_vector3_vectorized(ga) N = xa.shape[0] check_scalar_vectorized(va, N) assert ga.ndim == 2 assert ga.shape == (N, 3) assert correctGrad.shape == (N, 3) correctScalar = 0 less_a = np.less(np.abs(va - correctScalar), TOLERANCE) if not np.all(less_a): sys.stderr.write("Some error:") sys.stderr.write(xa) sys.stderr.write(va) sys.stderr.write(ga) sys.stderr.write(e) self.assertTrue(np.all(less_a), ("Implicit Function's scalar value incorrect")) for i in range(ga.shape[0]): (are_parallel, are_directed) = vectors_parallel_and_direction( ga[i, :], correctGrad[i, :]) self.assertTrue(are_parallel, "Incorrect gradient: not parallel " + msg) self.assertTrue(are_directed, "parallel but opposite directions " + msg)
def test_ellipsoid_random_points(self): """Testing hundreds of random points on a sphere of size RADIUS""" for i in range(0, 30): RADIUS = 3 POW = 4 # higher POW will get points more toward parallel to axes N = 500 rcenter = make_random_vector3(1000, 1.0) centers_a = repeat_vect3(N, rcenter) r0 = make_random_vector3_vectorized(N, RADIUS, POW) r = r0 + centers_a assert r.shape[1] == 3 xa = r m = np.eye(4) * RADIUS m[0:3, 3] = rcenter m[3, 3] = 1 expected_grad = -r0 check_vector3_vectorized(expected_grad) self.ellipsoid_point_and_gradient_vectorized(m, xa, expected_grad)
def implicitGradient(self, pa): check_vector3_vectorized(pa) pa = np.concatenate((pa, np.ones((pa.shape[0], 1))), axis=1) tp = np.dot(self.invmatrix, np.transpose(pa)) tp = np.transpose(tp) tp = tp[:, :3] g = self.sphere.implicitGradient(tp) check_vector3_vectorized(g) g = np.concatenate((g, np.ones((g.shape[0], 1))), axis=1) v4 = np.dot(np.transpose(self.invmatrix), np.transpose(g)) v4 = np.transpose(v4) v3 = v4[:, :3] check_vector3_vectorized(v3) return v3
def implicitGradient(self, p): # -> Vector3D : check_vector3_vectorized(p) p = np.concatenate((p, np.ones((p.shape[0], 1))), axis=1) tp = np.dot(self.invmatrix, np.transpose(p)) tp = np.transpose(tp) tp = tp[:, :3] g = self.base_object.implicitGradient(tp) check_vector3_vectorized(g) g = np.concatenate((g, np.ones((g.shape[0], 1))), axis=1) v4 = np.dot(np.transpose(self.invmatrix), (np.transpose(g))) v4 = np.transpose(v4) v3 = v4[:, :3] check_vector3_vectorized(v3) return v3
def bisection_vectorized5_(iobj, x1_arr, x2_arr, ROOT_TOLERANCE): """ based on bisection_vectorized5. Note that this functin assumes there is no root in x1 and x2.""" check_vector3_vectorized(x1_arr) check_vector3_vectorized(x2_arr) assert x1_arr.shape[0] == x2_arr.shape[0] v1_arr = iobj.implicitFunction(x1_arr) v2_arr = iobj.implicitFunction(x2_arr) result_x_arr = np.ones(x1_arr.shape) n = x1_arr.shape[0] active_indices = np.arange(0, n) # mid active_count = n solved_count = 0 x_mid_arr = np.ones((active_count, 3)) v_mid_arr = np.zeros((active_count, )) iteration = 1 while True: # print "iteration", iteration # assert np.all(mysign_np(v2_arr[:active_count], ROOT_TOLERANCE) * mysign_np(v1_arr[:active_count], ROOT_TOLERANCE) < 0 - EPS) # greater or equal assert np.all(v1_arr[:active_count] < 0 - ROOT_TOLERANCE) assert active_indices.shape[0] == x1_arr[:active_count].shape[0] assert active_indices.shape[0] == x2_arr[:active_count].shape[0] x_mid_arr[:active_count] = (x1_arr[:active_count] + x2_arr[:active_count]) / 2.0 v_mid_arr[:active_count] = iobj.implicitFunction( x_mid_arr[:active_count, :]) assert active_indices.shape == (active_count, ) assert active_indices.ndim == 1 abs_ = np.abs(v_mid_arr[:active_count]) indices_boundary = np.nonzero(abs_ <= ROOT_TOLERANCE)[0] # eq indices_outside = np.nonzero( v_mid_arr[:active_count] < -ROOT_TOLERANCE)[0] # gt indices_inside = np.nonzero(v_mid_arr[:active_count] > +ROOT_TOLERANCE )[0] # -v_mid_arr < ROOT_TOLERANCE indices_eitherside = np.nonzero(abs_ > ROOT_TOLERANCE)[0] assert indices_boundary.size + indices_inside.size + indices_outside.size == active_count assert indices_eitherside.size + indices_boundary.size == active_count which_zeroed = active_indices[indices_boundary] # new start = mid found_count = indices_boundary.shape[0] solved_count += found_count assert active_count - found_count + solved_count == n result_x_arr[which_zeroed] = x_mid_arr[indices_boundary] assert np.all(indices_boundary < active_count) v2_arr[indices_inside] = v_mid_arr[indices_inside] x2_arr[indices_inside] = x_mid_arr[indices_inside] v1_arr[indices_outside] = v_mid_arr[indices_outside] x1_arr[indices_outside] = x_mid_arr[indices_outside] assert np.all(indices_outside < active_count) assert np.all(indices_inside < active_count) # ------ next round: -------- assert active_count == active_indices.size active_indices = active_indices[indices_eitherside] assert active_count - found_count == active_indices.size old_active_count = active_count active_count = active_count - found_count assert active_count == indices_eitherside.size # again: does this hold again? assert active_count == active_indices.size iteration += 1 assert np.all(indices_eitherside < old_active_count) v1_arr[:active_count] = v1_arr[indices_eitherside] v2_arr[:active_count] = v2_arr[indices_eitherside] x1_arr[:active_count] = x1_arr[indices_eitherside] x2_arr[:active_count] = x2_arr[indices_eitherside] assert active_indices.shape == v1_arr[:active_count].shape assert active_indices.shape[0] == active_count del old_active_count assert len(active_indices) == active_count if len(active_indices) == 0: break assert active_indices.size == 0 optimisation_used = optimised_used() if not optimisation_used: v_arr = iobj.implicitFunction(result_x_arr) assert np.all(np.abs(v_arr) < ROOT_TOLERANCE) return result_x_arr
def numerical_gradient_vectorized_v2(iobj, pos0, delta_t=0.01 / 100., order=5): # not useful here because we are working with a vector whose dimension are(3,1) """ A proper vectorized implementation. See numerical_gradient() """ # Note: 0.1 is not enough for delta_t check_vector3_vectorized(pos0) assert issubclass(type(iobj), vector3.ImplicitFunctionVectorized) assert pos0.ndim == 2 if pos0.shape[0] == 0: return np.zeros((0, 3)) m = order # sample points: -m,...,-1,0,1,2,...,+m sample_points = range(-m, m + 1) n = m * 2 + 1 x0 = 0 findiff_weights = weights(k=1, x0=x0, xs=np.array(sample_points) * delta_t) del x0 assert n < 20 pos0_3 = pos0[:, np.newaxis, :] pos = np.tile(pos0_3, (1, 3 * n, 1)) assert not issubclass(pos.dtype.type, np.integer) dx = make_vector3(1, 0, 0)[np.newaxis, np.newaxis, :] dy = make_vector3(0, 1, 0)[np.newaxis, np.newaxis, :] dz = make_vector3(0, 0, 1)[np.newaxis, np.newaxis, :] dxyz = [dx, dy, dz] ci = 0 for d in range(3): dd = dxyz[d] for i in sample_points: pos[:, ci, :] = pos[:, ci, :] + (dd * (delta_t * float(i))) assert ci < 3 * n ci += 1 vsize = pos0.shape[0] v = iobj.implicitFunction(pos.reshape((vsize * 3 * n), 3)) # v .shape: (3,11) v3 = np.reshape(v, (vsize, 3, n), order='C') # v3 .shape: (11,) if True: Lipchitz_B = 50 # Lipschitz constant Lipchitz_beta = 1 # order. Keep it 1 b_h_beta = Lipchitz_B * (np.abs(delta_t)**Lipchitz_beta) d0 = np.abs(np.diff(v3, axis=1 + 1)) nonsmooth_ness = d0 / (np.abs(delta_t)**Lipchitz_beta) del d0, b_h_beta, Lipchitz_beta, Lipchitz_B # print nonsmooth_ness.shape # set_trace() if (np.max(np.ravel(nonsmooth_ness))) > 100 * 10: print "warning: nonsmooth ", del nonsmooth_ness if False: d = np.abs(np.diff(v3, n=1, axis=1 + 1)) / np.abs(delta_t) d = d - np.tile(np.mean(d, axis=1 + 1, keepdims=True), (1, 1, d.shape[1 + 1])) d = np.abs(d) / np.abs(delta_t) d = d - np.tile(np.mean(d, axis=1 + 1, keepdims=True), (1, 1, d.shape[1 + 1])) del d """ Calculating the numerical derivative using finite difference (convolution with weights) """ # convolusion grad_cnv = np.dot( v3, findiff_weights ) # "sum product over the last axis of a and the second-to-last of b" assert not np.any(np.isnan(grad_cnv), axis=None) return grad_cnv
def check_centroids_projection(self, iobj, objname=None): TOLERANCE = 0.00001 # TOLERANCE = 0.7 to pass the test for every object """Do the centroids projection """ if iobj is not None: VERTEX_RELAXATION_ITERATIONS_COUNT = 0 (RANGE_MIN, RANGE_MAX, STEPSIZE) = (-3, +5, 0.2) if objname == "cube_with_cylinders" or objname == "twist_object" or objname == "french_fries" or objname == "rdice_vec" or objname == "rods" or objname == "bowl_15_holes": VERTEX_RELAXATION_ITERATIONS_COUNT = 1 if objname == "cyl4": (RANGE_MIN, RANGE_MAX, STEPSIZE) = (-32 / 2, +32 / 2, 1.92 / 4.0) elif objname == "french_fries" or objname == "rods": (RANGE_MIN, RANGE_MAX, STEPSIZE) = (-3, +5, 0.11) # 0.05 elif objname == "bowl_15_holes": (RANGE_MIN, RANGE_MAX, STEPSIZE) = (-3, +5, 0.15) elif objname == "cyl3": (RANGE_MIN, RANGE_MAX, STEPSIZE) = (-32 / 2, +32 / 2, 1.92 / 4.0) elif objname == "cyl1": (RANGE_MIN, RANGE_MAX, STEPSIZE) = (-16, +32, 1.92 * 0.2 * 10 / 2.0) from stl_tests import make_mc_values_grid gridvals = make_mc_values_grid(iobj, RANGE_MIN, RANGE_MAX, STEPSIZE, old=False) vertex, faces = vtk_mc(gridvals, (RANGE_MIN, RANGE_MAX, STEPSIZE)) sys.stderr.write("MC calculated.") sys.stdout.flush() from ohtake_belyaev_demo_subdivision_projection_qem import process2_vertex_resampling_relaxation, compute_average_edge_length, set_centers_on_surface__ohtake_v3s from ohtake_belyaev_demo_subdivision_projection_qem import compute_centroid_gradients, vertices_apply_qem3 for i in range(VERTEX_RELAXATION_ITERATIONS_COUNT): vertex, faces_not_used, centroids = process2_vertex_resampling_relaxation(vertex, faces, iobj) assert not np.any(np.isnan(vertex.ravel())) # fails sys.stderr.write("Vertex relaxation applied.") sys.stdout.flush() # projection average_edge = compute_average_edge_length(vertex, faces) old_centroids = np.mean(vertex[faces[:], :], axis=1) check_vector3_vectorized(old_centroids) new_centroids = old_centroids.copy() set_centers_on_surface__ohtake_v3s(iobj, new_centroids, average_edge) vertex_neighbours_list = mesh_utils.make_neighbour_faces_of_vertex(faces) centroid_gradients = compute_centroid_gradients(new_centroids, iobj) new_vertex_qem = \ vertices_apply_qem3(vertex, faces, new_centroids, vertex_neighbours_list, centroid_gradients) check_vector3_vectorized(new_vertex_qem) # checking if the projection is correct by calling the implicitFunction f = iobj.implicitFunction(new_vertex_qem) # Two ways of doing this test, in the first one we strictly consider that the test fail if one value is superior # to the tolerance and in the second we print the numbere of point who fail the test Number_of_point_who_fail = True if Number_of_point_who_fail is True: fail = 0 for i in range(new_vertex_qem.shape[0]): if math.fabs(f[i]) > TOLERANCE: fail += 1 print objname, "Number of points:", new_centroids.shape[0], "Number of points who fails the test:", fail else: for i in range(new_vertex_qem.shape[0]): print "Fail the test", math.fabs(f[i]) self.assertTrue(math.fabs(f[i]) < TOLERANCE)