def generate_power_func(n): @jit(float_(float_)) def nth_power(x): return x ** n # This is a native call print(nth_power(10)) # Return closure and keep all cell variables alive return nth_power
def generate_power_func(n): @jit(float_(float_)) def nth_power(x): return x**n # This is a native call print(nth_power(10)) # Return closure and keep all cell variables alive return nth_power
def test_type_inference(self): global vector_add vector_add = vectorize([ bool_(double, int_), double(double, double), float_(double, float_), ])(add) cfunc = jit(func) self.assertEqual(cfunc(np.dtype(np.float64), np.dtype('i')), int8[:]) self.assertEqual(cfunc(np.dtype(np.float64), np.dtype(np.float64)), double[:]) self.assertEqual(cfunc(np.dtype(np.float64), np.dtype(np.float32)), float_[:])
def test_type_inference(self): """This is testing numpy ufunc dispatch machinery""" global vector_add vector_add = vectorize([ bool_(double, int_), double(double, double), float_(double, float_), ])(add) cfunc = jit(func) def numba_type_equal(a, b): self.assertEqual(a.dtype, b.dtype) self.assertEqual(a.ndim, b.ndim) numba_type_equal(cfunc(np.dtype(np.float64), np.dtype("i")), bool_[:]) numba_type_equal(cfunc(np.dtype(np.float64), np.dtype(np.float64)), double[:]) # This is because the double(double, double) matches first numba_type_equal(cfunc(np.dtype(np.float64), np.dtype(np.float32)), double[:])
def test_type_inference(self): """This is testing numpy ufunc dispatch machinery """ global vector_add vector_add = vectorize([ bool_(double, int_), double(double, double), float_(double, float_), ])(add) cfunc = jit(func) def numba_type_equal(a, b): self.assertEqual(a.dtype, b.dtype) self.assertEqual(a.ndim, b.ndim) numba_type_equal(cfunc(np.dtype(np.float64), np.dtype('i')), bool_[:]) numba_type_equal(cfunc(np.dtype(np.float64), np.dtype(np.float64)), double[:]) # This is because the double(double, double) matches first numba_type_equal(cfunc(np.dtype(np.float64), np.dtype(np.float32)), double[:])
def SOR(phi, tol = 1e-3, omega = 1.8): """ Implementation of Simultaneous Over Relaxation SOR Parameters ---------- phi : numpy array Starting values of phi tol : float, optional Tolerance when stop. The default is 1e-6. omega : float, optional omega value. The default is 1.8. Returns ------- phi_first : numpy array Phi array after relaxation """ omega = numba.float_(omega) phi_bool = np.zeros_like(phi, dtype = bool) phi_bool = np.where(phi != 0, True, False) sum_first = np.sum(phi).astype(np.float64) phi_first = np.copy(phi).astype(np.float64) phi_last = np.copy(phi).astype(np.float64) overtol = True # Animation animate = False ims = [] if animate: fig = plt.figure(figsize=(15,10)) while overtol: for i in range(1, phi.shape[0]-1): for j in range(1,phi.shape[1]-1): if phi_bool[i,j] == True: continue phi_last[i,j] = (1-omega) * phi_first[i,j] + omega/4 * (phi_last[i-1,j] + phi_first[i+1,j] + phi_last[i,j-1] + phi_first[i,j+1]) sum_last = np.sum(phi_last) if np.abs(sum_last - sum_first) < tol: overtol = False sum_first = sum_last phi_first = np.copy(phi_last) phi_last = np.copy(phi) if animate: image = plt.imshow(np.flipud(phi_first), cmap='jet', interpolation = 'spline16', animated = True) ims.append([image]) if animate: anime = animation.ArtistAnimation(fig, ims, interval = 250) anime.save("Animaatio_face.mp4") return phi_first
return fallback # startind was too large... go backwards for i in range(start - 1, -1, -1): if forward and crd[i] <= point: startinds[m] = i return i if not forward and crd[i] >= point: startinds[m] = i return i # if we've gone too far, pick the first index fallback = 0 startinds[m] = fallback return fallback @nb.jit(nb.float_(nb.float_[:,:,:,::1], nb.int_, nb.float_[:], nb.float_[:], nb.float_[:], nb.float_[:], nb.int_[:]), nopython=False) def interp_trilin(v, m, crdz, crdy, crdx, x, startinds): ix = np.array([0, 0, 0], dtype=nb.int_) p = np.array([0, 0, 0], dtype=nb.int_) xd = np.array([0.0, 0.0, 0.0], dtype=nb.float_) crds = [crdz, crdy, crdx] # find iz, iy, ix from startinds for i in range(3): ind = closest_ind(crds[i], x[i], startinds, i) ix[i] = ind p[i] = 1 xd[i] = (x[i] - crds[i][ind]) / (crds[i][ind + 1] - crds[i][ind]) c00 = v[ix[0], ix[1] , ix[2] , m] + xd[0] * (v[ix[0] + p[0], ix[1] , ix[2] , m] - v[ix[0], ix[1] , ix[2] , m])
@jit(float_[:, :, :, ::1](float_[:, :, :, ::1]), nopython=True, nogil=True, cache=True) def null_to_chi(null): m, _, k, l = null.shape return null / std_vectorize_0_2_3(np.ascontiguousarray( null[:, 1:, :, :])).reshape((m, 1, k, l)) # 5 statistics ################################################################# @vectorize_0_1 @jit(float_(float_[:, :]), nopython=True, nogil=True, cache=True) def Y_1(chi): return np.abs(chi.sum()) Y_2 = vectorize_0_1(np.max) @vectorize_0_1 @jit(float_(float_[:, :]), nopython=True, nogil=True, cache=True) def Y_3(chi_sq): return chi_sq.sum(axis=1).max() @vectorize_0_1 @jit(float_(float_[:, :]), nopython=True, nogil=True, cache=True)