def solve(x0, A, b, omega, tol): '''Wrapper function to use SOR algorithm to solve Ax = b Parameters =========== x0: numpy array First guess for solution A: numpy array matrix describing linear system b: numpy array vector of constants omega: float relaxation factor tol: float error tolerancd for stopping iteration (error for convergence) ''' # Format dtype of all parameters to numba double precision float # this helps @njit work correctly x0 = f8(x0) A = f8(A) b = f8(b) omega = f8(omega) M = u8(10**6) tol = f8(tol) # call actual SOR algorithm (need the numba dtypes to allow njit compile) x = solve_body(x0, A, b, omega, M, tol) return x
def solve_body(x0, A, b, omega, M, tol): '''SOR function. Iterates until errror is less than specified, or M iterations, whichever comes first. Parameters =========== x0: numpy array First guess for solution A: numpy array matrix describing linear system b: numpy array vector of constants omega: float relaxation factor M: integer maximum number of steps while seeking convergence tol: float error size for stopping iteration (error for convergence) ''' x = x0 # set initial guess err = f8(tol + 1000000) # initial error level # While loop. Main loop exit after M iterations, but has a secondary break # that stops when the observed error is less than selected tolerance for i in range(M): x_new = SOR_iter(x, A, b, omega) # perform SOR iteration err = resid(x_new, A, b) # compute new residual x = x_new # reset x for new pass (or for output) # second break parameter (under error tolerance) if err < tol: break return x
def resid(x_new, x): ''' Calculate L2-norm of x and x_new''' # need to be numba double precision float so njit will work in body of # solve function err = f8(np.linalg.norm(x_new - x)) return err
def resid(x, A, b): ''' Calculate L2-norm of b and x dot A ''' # need to be numba double precision float so njit will work in body of # solve function err = f8(np.linalg.norm(b - np.dot(A, x))) return err
def convolve(signal, ref, window, result): smem = cuda.shared.array(0, f8) i, j = cuda.grid(2) S = signal.size W = window.size R = ref.shape[0] Bix = cuda.blockIdx.x # Block index along the x dimension -> indexing the signal BDx = cuda.blockDim.x # Number of threads along x -> Many things tix = cuda.threadIdx.x # x thread id within block [0,blockdim.x) -> indexing the window tiy = cuda.threadIdx.y # y thread id within block [0,blockdim.y) -> indexing of memory tif = tix + tiy * BDx # thread index within a block (flat) -> indexing lines and shared memory index = j + tix # reference and signal index value = f8(0) if (tix < W) & (index < S): value = window[tix] * (ref[R, index] * signal[index]) value = reduce_warp(value, u4(0xffffffff)) # Reduced sum should be present in the value of all threads with lane index == 0 # Store the warp reduction in the shared memory if tif % 32 == 0: # For all threads with lane index == 0 smem[tif // 32] = value # Flat warp id cuda.syncthreads() # When the blocksize is smaller than a single warp (32), we are done. # In this case we can be very specific about the locations we need if (BDx <= 32) and (tix == 0): result[Bix, j] = smem[tiy] # Otherwise, take values from the shared memory van reduce. # NOTE: maximum number of threads is 1024 which is 32 times a warp (consisting of 32 threads) # This means, the warp reductions of 32 warps, fit baxck into a single warp. # Disperse the reduction values from the memory over the first threads along the x direction. # All others become 0 Nwx = (BDx - 1) // 32 + 1 if (tix < BDx // 32): values = smem[tix + Nwx * tiy] else: values = 0 # Perhaps its better to put the index definition outside the if-else block and remove this barrier cuda.syncthreads() # All threads in a first warp along x if tix // 32 == 0: value = reduce_warp(value, u4(0xffffffff)) cuda.syncthreads() if (tix == 0) and (j < S): result[Bix, j] = value
def MC_Amer_call(S0, K, mu, r, d, sigma, t, T, delta_t, N): ''' Parameters ========== S0: float Initial value of stock process mu: float Drift of stock process r: float Risk Free rate delta: float Continuous dividend yield sigma: float Volatility of stock process T: float Time horizon delta_t: float Time step size N: integer Number of paths to generate Returns ======= C: Float Value of American call option ''' M = int((T - t) / delta_t) # number of steps. Must be an integer. S = generate_paths(S0, mu, d, sigma, delta_t, N, M) h = np.maximum(S - K, 0) #compute exercise values g = h[M] #set up excercised value vector tau = np.repeat(T, N) #set up stopping time vector for j in range(M - 1, 0, -1): k = S[j] > K #in the money boolean vector x = S[j, k] # in the money points y = g[k] * np.exp(-(r - d) * (tau[k] - t * delta_t)) a, __ = fit(func, x, y) #regression step C_hat = func(S[j, k], a[0], a[1], a[2], a[3]) #find estimated continuation value g[k][C_hat >= h[j, k]] = h[j, k][ C_hat >= h[j, k]] #update g where excercise more than continuation tau[k][C_hat >= h[j, k]] = j / f8(M) * ( T - t) #update optimal excercise time C_0 = np.sum(np.exp(-(r - d) * delta_t * tau).T * g) / N V_0 = np.maximum(C_0, h[0, 0]) return V_0
def solve(A, d): '''Helper function for Thomas algorith. Breaks matrix into tridiagonal elements for easier processing by algorithm. ''' # pass numba float64 dtype np.arrays to the solve function - need to # perform this step to allow for nopython execution of thomas algorithm # which yields maximum speed a = f8(np.diagonal(A, offset=0)) b = f8(np.diagonal(A, offset=1)) c = f8(np.diagonal(A, offset=-1)) dfloat = f8(d) D = np.diag(a, 0) + np.diag(b, 1) + np.diag(c, -1) #create test matrix # test if D is 'close enough' to A - if not that means A was not # tridiagonal and the function raises an exception if not np.allclose(A, D): raise Exception('The given A is not tridiagonal') # pass to thomas algorithm solver x = solve_body(a, b, c, dfloat) return x
def run_target(N, target): print '== Target', target vect_discriminant = vectorize([f4(f4, f4, f4), f8(f8, f8, f8)], target=target)(discriminant) A, B, C = generate_input(N, dtype=np.float32) D = np.empty(A.shape, dtype=A.dtype) ts = time() D = vect_discriminant(A, B, C) te = time() total_time = (te - ts) print 'Execution time %.4f' % total_time print 'Throughput %.4f' % (N / total_time) if '-verify' in sys.argv[1:]: check_answer(D, A, B, C)
def solve_body(v0, b, g, omega, tol, theta, lamb, M): '''Projected SOR function. Iterates until errror is less than specified, or M iterations, whichever comes first. ''' # set initial guess err = f8(tol + 1000000) # initial error level v = v0 # While loop. Main loop exit after M iterations, but has a secondary break # that stops when the observed error is less than selected tolerance for k in range(0, M): v_new = SOR_iter(b, v, g, omega, tol, theta, lamb) # perform SOR iteration err = resid(v_new, v) # compute new residual v = v_new # second break parameter (under error tolerance) if err < tol: break return v
def solve(v0, b, g, omega, tol, theta, lamb): '''Wrapper function to use SOR algorithm to solve Ax = b Parameters =========== xv: numpy array First guess for solution b: numpy array vector to represent A * w g: numpy array vector representing early excercise values omega: float relaxation factor tol: float error tolerancd for stopping iteration (error for convergence) theta: float parameter controlling what discretization method is being used lamb: float lambda parameter from option pricing model ''' # Format dtype of all parameters to numba double precision float # this helps @njit work correctly v0 = f8(v0) b = f8(b) g = f8(g) omega = f8(omega) M = u8(10**6) tol = f8(tol) theta = f8(theta) lamb = f8(lamb) # call actual SOR algorithm (need the numba dtypes to allow njit compile) x = solve_body(v0, b, g, omega, tol, theta, lamb, M) return x
# this is actually a rotation with -rad (use symmetry of sin/cos) sin_rad = sin(rad) cos_rad = cos(rad) return point[0] * cos_rad + point[2] * sin_rad, point[ 1], point[2] * cos_rad - point[0] * sin_rad # @cc.export('coords2cartesian', dtype_3floattuple(f8, f8)) @jit(dtype_3floattuple(f8, f8), nopython=True, cache=True) def coords2cartesian(lng_rad, lat_rad): return cos(lng_rad) * cos(lat_rad), sin(lng_rad) * cos(lat_rad), sin( lat_rad) # @cc.export('distance_to_point_on_equator', f8(f8, f8, f8)) @jit(f8(f8, f8, f8), nopython=True, cache=True) def distance_to_point_on_equator(lng_rad, lat_rad, lng_rad_p1): """ uses the simplified haversine formula for this special case (lat_p1 = 0) :param lng_rad: the longitude of the point in radians :param lat_rad: the latitude of the point :param lng_rad_p1: the latitude of the point1 on the equator (lat=0) :return: distance between the point and p1 (lng_rad_p1,0) in km this is only an approximation since the earth is not a real sphere """ # 2* for the distance in rad and * 12742 (mean diameter of earth) for the distance in km return 12742 * asin( sqrt(((sin(lat_rad / 2))**2 + cos(lat_rad) * (sin( (lng_rad - lng_rad_p1) / 2))**2)))
def y_rotate(rad, point): # y stays the same # this is actually a rotation with -rad (use symmetry of sin/cos) sin_rad = sin(rad) cos_rad = cos(rad) return point[0] * cos_rad + point[2] * sin_rad, point[1], point[2] * cos_rad - point[0] * sin_rad # @cc.export('coords2cartesian', dtype_3float_tuple(f8, f8)) @njit(dtype_3float_tuple(f8, f8), cache=True) def coords2cartesian(lng_rad, lat_rad): return cos(lng_rad) * cos(lat_rad), sin(lng_rad) * cos(lat_rad), sin(lat_rad) # @cc.export('distance_to_point_on_equator', f8(f8, f8, f8)) @njit(f8(f8, f8, f8), cache=True) def distance_to_point_on_equator(lng_rad, lat_rad, lng_rad_p1): """ uses the simplified haversine formula for this special case (lat_p1 = 0) :param lng_rad: the longitude of the point in radians :param lat_rad: the latitude of the point :param lng_rad_p1: the latitude of the point1 on the equator (lat=0) :return: distance between the point and p1 (lng_rad_p1,0) in km this is only an approximation since the earth is not a real sphere """ # 2* for the distance in rad and * 12742 (mean diameter of earth) for the distance in km return 12742 * asin(sqrt(((sin(lat_rad / 2)) ** 2 + cos(lat_rad) * (sin((lng_rad - lng_rad_p1) / 2)) ** 2))) # @cc.export('haversine', f8(f8, f8, f8, f8)) @njit(f8(f8, f8, f8, f8), cache=True)
THROTTLE_MID_SPEED = 1400.0 BOOST_ACCELERATION = 991.6667 BREAK_ACCELERATION = 3500.0 MAX_CAR_SPEED = 2300.0 BOOST_CONSUMPTION_RATE = 33.3 # per second # constants of the acceleration between 0 to 1400 velocity: acceleration = a * velocity + b a = -(THROTTLE_ACCELERATION_0 - THROTTLE_ACCELERATION_1400) / THROTTLE_MID_SPEED b = THROTTLE_ACCELERATION_0 b2 = THROTTLE_ACCELERATION_0 + BOOST_ACCELERATION fast_jit = jit(f8(f8, f8), nopython=True, fastmath=True, cache=True) State = namedtuple("State", ["dist", "vel", "boost", "time"]) class VelocityRange: max_speed = None use_boost = None @staticmethod def distance_traveled(t: float, v0: float) -> float: raise NotImplementedError @staticmethod def velocity_reached(t: float, v0: float) -> float: raise NotImplementedError
if mean.shape[0] != cov.shape[0]: raise ValueError("mean and cov must have same length") L = np.linalg.cholesky(cov) from numpy.random import standard_normal z = standard_normal(final_shape).reshape(mean.shape[0],-1) x = L.dot(z).T x += mean x.shape = tuple(final_shape) return x, L #@nb.njit(nb.f8[:](nb.f8[:, :], nb.f8[:, :]), nogil=True) @nb.njit(nb.f8(nb.f8[:, :], nb.f8[:, :]), nogil=True, cache=True) def poly_line_intersect(poly, line): # extend_line = True eps = 1e-6 none = np.inf # np.zeros(1) v0v1 = poly[1, :] - poly[0, :] v0v2 = poly[2, :] - poly[0, :] dir = line[1, :] - line[0, :] line_len = math.sqrt(np.sum(dir**2)) if line_len < eps: return none dir = dir/line_len pvec = cross3d(dir, v0v2)
@jit(Tuple((f8[::1], i8))(f8, f8, f8, f8, f8, f8[::1], f8[::1]), nopython=True) def calc_host_propensity_vectors(TAU_H, B_H, D_H, K_H, dtBac, c, dtVec): birthPropVec = (1 + B_H * c) / TAU_H deathPropVec = (1 - D_H * c) * c.size / (K_H * TAU_H) totPropVec = np.concatenate((birthPropVec, deathPropVec)) cumulPropVec = totPropVec.cumsum() # calc number of required time steps (dtHost, numSubStep) = calc_dynamic_timestep(cumulPropVec[-1], dtVec, dtBac) # calc probVec cumulPropVec *= dtHost return (cumulPropVec, numSubStep) # get composition of host parent that will give birth @jit(f8(f8[::1], f8[::1], i8), nopython=True) def host_create_offspring(c, d, id_group): # draw offspring composition fracPar = c[id_group] / (c[id_group] + d[id_group]) return fracPar # update host dynamics while keeping microbial dynamics fixed @jit( Tuple((f8[::1], f8[::1], f8[::1], i8))(f8[::1], f8[::1], f8[::1], f8[::1], i8, f8, f8, f8[:, ::1], i8)) def update_host(CVec, DVec, AgeVec, cumulPropVec, numSubStep, n0, sigma, rndMat, ridx): # init vectors that keep track of changes in host numGroup = CVec.size cNewTemp = np.zeros(numSubStep)
smally2 = y2 // 3 largey2 = smally2 if y2 == smally2 * 3 else smally2 + 1 largez1 = z1 // 3 smallz1 = largez1 if z1 == largez1 * 3 else largez1 + 1 smallz2 = z2 // 3 largez2 = smallz2 if z2 == smallz2 * 3 else smallz2 + 1 if np.any(roughgrid[smallx1:smallx2, smally1:smally2, smallz1:smallz2]): return True if not np.any(roughgrid[largex1:largex2, largey1:largey2, largez1:largez2]): return False return np.any(grid[x1:x2, y1:y2, z1:z2]) @nb.njit( nb.f8(nb.b1[:, :, :], nb.b1[:, :, :], nb.i8, nb.i8, nb.i8, nb.i8[:, :, :], nb.f8[:, :], nb.i8)) def useBoostedTree2(grid, roughgrid, anchorx, anchory, direction, btsplits, btleaves, ntrees): score = 0. for tree in range(ntrees): splitidx = 0 for depth in range(3): tsplit = btsplits[tree, splitidx] if direction == 0: x1 = anchorx + tsplit[0] x2 = anchorx + tsplit[3] y1 = anchory + tsplit[1] y2 = anchory + tsplit[4] else: x1 = anchorx + 48 - tsplit[ 3] ### change when changing anchor!!!
n = int(n) fib = [0] * n fib[1] = 1 for i in range(2, n): fib[i] = fib[i - 2] + fib[i - 1] return pd.Series(fib) @lru_cache(maxsize=None) def fibratio(n): n = int(n) f = fib(n) return f / f.iat[n - 1] @jit(f8(f8[:], i8, i8), nopython=True) def __rci_d__(v, i, p): sum = 0.0 for j in range(p): o = 1 k = v[i - j] for l in range(p): if k < v[i - l]: o = o + 1 sum = sum + (j + 1 - o)**2 return sum @jit(void(f8[:], i8, i8, f8[:]), nopython=True) def __rci_core__(v, n, p, r): k = (p * (p**2 - 1))
S01 = S[y1, x0, k] S10 = S[y0, x1, k] S11 = S[y1, x1, k] ustar01 = ustar[y1, x0, k] ustar11 = ustar[y1, x1, k] ustar21 = ustar[y1, x2, k] vstar10 = vstar[y0, x1, k] vstar11 = vstar[y1, x1, k] vstar12 = vstar[y2, x1, k] if isnan(S11) == True: ZonalFlux[j, i, k:] = nan MeridFlux[j, i, k:] = nan break else: ZonalFlux[j, i, k] = cal_a_GM_Flux(S00, S01, S10, S11, ustar01, ustar11, ustar21, ew_dist) MeridFlux[j, i, k] = cal_a_GM_Flux(S00, S10, S01, S11, vstar10, vstar11, vstar12, ns_dist) return ZonalFlux, MeridFlux @jit(f8(f8, f8, f8, f8, f8, f8, f8, f8)) def cal_a_GM_Flux(S00, S01, S10, S11, U0, U1, U2, dist): Mt = 60 * 60 * 24 * 30 if any(isnan([S00, S01, S10, S11, U0, U1, U2])) == False: Flux = -((U2 + U1) * (S10 + S11) / 4.0 - (U1 + U0) * (S01 + S00) / 4.0) / dist * Mt else: Flux = nan return Flux
from compas.numerical.drx.drx_numpy import _beam_data from compas.numerical.drx.drx_numpy import _create_arrays # from compas_hpc.geometry import cross_vectors_numba as cross # from compas_hpc.geometry import dot_vectors_numba as dot # from compas_hpc.geometry import length_vector_numba as length from time import time __all__ = [ 'drx_numba', ] @jit(f8(f8[:]), nogil=True, nopython=True, parallel=False, cache=True) def length(a): """Calculate the length of a vector. Parameters ---------- a : array XYZ components of the vector. Returns ------- float: The length of the vector. """ return sqrt(a[0]**2 + a[1]**2 + a[2]**2)
@njit(f8[:](f8[:], f8[:])) def update_state(x, c_params): μ_c, ρ, ϕ_z, v, d, ϕ_σ = c_params z, σ = x # update state σ2 = v * σ**2 + d + ϕ_σ * randn() σ = np.sqrt(max(σ2, 0)) z = ρ * z + ϕ_z * σ * randn() return np.array((z, σ)) @njit(f8(f8[:], f8[:], f8[:])) def eval_kappa(x, y, c_params): """ Computes kappa_{t+1} given z_t and sigma_t """ μ_c, ρ, ϕ_z, v, d, ϕ_σ = c_params z, σ = x return μ_c + z + σ * randn() def by_function_factory(by, parallelization_flag=False): """ Produces functions that compute the stability test value Lambda via Monte Carlo.
SPLIT_CHOICE_all_max = 2 PRED_CHOICE_majority = 1 PRED_CHOICE_pure_majority = 2 PRED_CHOICE_majority_general = 3 PRED_CHOICE_pure_majority_general = 4 N = 100 def time_ms(f): f() #warm start return " %0.6f ms" % (1000.0 * (timeit.timeit(f, number=N) / float(N))) @njit(f8(u4, u4[:]), cache=True) #,inline='always') def gini(total, counts): if (total > 0): s = 0.0 for c_i in counts: prob = c_i / total s += prob * prob return 1.0 - s else: return 0.0 @njit(nogil=True, fastmath=True, cache=False) def unique_counts(inp): ''' Finds the unique classes in an input array of class labels
import numpy as np import numba from numba import f8, void """ Functions operating on 2-Dimensional vectors. """ # TODO: Normal, Tangent @numba.vectorize([f8(f8)]) def wrap_to_pi(rad): """ Wraps angles in rad in radians, to the interval [−pi pi]. Pi maps to pi and −pi maps to −pi. (In general, odd, positive multiples of pi map to pi and odd, negative multiples of pi map to −pi.) [Matlab](http://se.mathworks.com/help/map/ref/wraptopi.html) :param rad: Angle in radians. :return: Angle in [-pi, pi]. """ rad_ = rad % (2 * np.pi) if rad < 0 and rad_ == np.pi: # negative multiples of pi map to −pi return -np.pi elif rad_ > np.pi: return rad_ - (2 * np.pi) else: return rad_
for i in range(n): for l in range(i): val = 0 for j in range(m): val += M[j] * C[j, i] * C[j, l] H[i, l] = val d = np.empty((1, n)) for i in range(n): val = 0 for j in range(m): val += M[j] * C[j, i] * C[j, i] d[0, i] = val return H, d, g.T, f @nb.jit(nb.f8(nb.f8[:, :], nb.f8[:])) def function_values(C, x): eCx = exp(dot(C, x)) f = sum(log1p(eCx)) return f def shrinkage(a, kappa): return maximum(0, a - kappa) - maximum(0, -a - kappa) def l1_OLS(A, b, lam, x, z): return 0.5 * sum((A.dot(x) - b)**2) + lam * np.norm(z, 1) def lasso_admm_cholesky(A, rho):
@njit(void(f8[:,:],u4,u4,f8,b1[:])) def split__(array, length, dim, threshold, res): for i in range(length): if array[i, dim] <= threshold: res[i] = True @jit(b1[:](f8[:,:],u4,f8)) def split(array, dim, threshold): length = len(array) res = np.zeros((length), dtype=bool) split__(array, length, dim, threshold, res) return res @njit(f8(f8[:,:],f8[:],f8[:])) def calc_bbox_growth(data, node_min_d, node_max_d): """ Calculate the difference in linear dimension between the current node, and the incoming data. Roughly, this means calculating how much the bounding box would have to grow to accommodate all the new points, in each dimension, and then summing across these dimensions. """ n_rows, n_cols = data.shape total = 0 for j in range(n_cols): # Keep track of maximum extension required for lower # and upper bound, respectively, in this dimension l_extension = 0
import numpy as np from numba import njit, b1, i1, i8, f8 from numba.types import UniTuple @njit(f8(i8, i8, f8, f8, b1[:, :], b1[:, :], f8[:, :], b1), cache=True) def signals_order_func_np(i, col, run_cash, run_shares, entries, exits, volume, accumulate): """Order function to buy/sell based on signals.""" if run_shares > 0: if entries[i, col] and not exits[i, col]: if accumulate: return volume[i, col] elif not entries[i, col] and exits[i, col]: return -volume[i, col] else: if entries[i, col] and not exits[i, col]: return volume[i, col] elif not entries[i, col] and exits[i, col]: if accumulate: return -volume[i, col] return 0. @njit(f8(i8, i8, f8, f8, f8[:, :], b1), cache=True) def orders_order_func_np(i, col, run_cash, run_shares, orders, is_target): """Buy/sell the amount of shares specified by orders.""" if is_target: return orders[i, col] - run_shares else: return orders[i, col]
Raises: RebinError: for the following cases: old: └┴┴┴┴┘ new: └┴┴┴┘ old: └┴┴┴┴┘ new: └┴┴┴┘ """ if (in_edges[..., -1] <= out_edges[..., 0]).any(): raise RebinError("Input edges are all smaller than output edges") if (in_edges[..., 0] >= out_edges[..., -1]).any(): raise RebinError("Input edges are all larger than output edges") @nb.vectorize([nb.f8(nb.f8, nb.f8, nb.f8, nb.f8)], nopython=True) def _linear_offset(slope, cts, low, high): """ Calculate the offset of the linear approximation of slope when splitting counts between bins. Args: slope: cts: counts within the bin low: lower bin edge energy high: higher bin edge energy Returns: the offset """ if np.abs(slope) < 1e-6:
if axis == 0: b = array([0., 0., 0.]) for i in prange(m): b[0] += a[i, 0] b[1] += a[i, 1] b[2] += a[i, 2] elif axis == 1: b = np.zeros(m) for i in prange(m): b[i] += a[i, 0] + a[i, 1] + a[i, 2] return b @jit(f8(f8[:]), nogil=True, nopython=True, parallel=True) def norm_vector_numba(a): """ Calculate the L2 norm or length of a vector. Parameters ---------- a (array): XYZ components of the vector. Returns ------- float: The L2 norm of the vector. """ return sqrt(a[0]**2 + a[1]**2 + a[2]**2) @jit(f8[:](f8[:, :]), nogil=True, nopython=True, parallel=False)
# this is actually a rotation with -rad (use symmetry of sin/cos) sin_rad = sin(rad) cos_rad = cos(rad) return point[0] * cos_rad + point[2] * sin_rad, point[ 1], point[2] * cos_rad - point[0] * sin_rad # @cc.export('coords2cartesian', dtype_3float_tuple(f8, f8)) @njit(dtype_3float_tuple(f8, f8), cache=True) def coords2cartesian(lng_rad, lat_rad): return cos(lng_rad) * cos(lat_rad), sin(lng_rad) * cos(lat_rad), sin( lat_rad) # @cc.export('distance_to_point_on_equator', f8(f8, f8, f8)) @njit(f8(f8, f8, f8), cache=True) def distance_to_point_on_equator(lng_rad, lat_rad, lng_rad_p1): """ uses the simplified haversine formula for this special case (lat_p1 = 0) :param lng_rad: the longitude of the point in radians :param lat_rad: the latitude of the point :param lng_rad_p1: the latitude of the point1 on the equator (lat=0) :return: distance between the point and p1 (lng_rad_p1,0) in km this is only an approximation since the earth is not a real sphere """ # 2* for the distance in rad and * 12742 (mean diameter of earth) for the distance in km return 12742 * asin( sqrt(((sin(lat_rad / 2))**2 + cos(lat_rad) * (sin( (lng_rad - lng_rad_p1) / 2))**2)))
tmp = 1 + 4 * ((alpha - theta2) / fwhm)**2 return 1 / tmp def pseudo_voigt(theta2, alpha, fwhm, eta): """ Original Pseudo-Voigt function for profiling peaks - Thompson, D. E. Cox & J. B. Hastings (1986). """ L = lorentzian(theta2, alpha, fwhm) G = gaussian(theta2, alpha, fwhm) return eta * L + (1 - eta) * G @nb.njit(nb.f8(nb.f8[:], nb.f8[:], nb.f8, nb.i8, nb.f8[:], nb.f8[:])) def similarity_calculate(r, w, d, Npts, fy, gy): """ Compute the similarity between the pair of spectra f, g """ xCorrfg_w, aCorrff_w, aCorrgg_w = 0, 0, 0 for r0, w0 in zip(r, w): Corrfg, Corrff, Corrgg = 0, 0, 0 shift = int(r0 / d) for i in range(Npts): if 0 <= i + shift <= Npts - 1: Corrfg += fy[i] * gy[i + shift] * d Corrff += fy[i] * fy[i + shift] * d Corrgg += gy[i] * gy[i + shift] * d
"""Evacuation related functions""" import numba import numpy as np from numba.typing.typeof import typeof from crowddynamics.core.geom2D import line_intersect from crowddynamics.core.sensory_region import is_obstacle_between_points from crowddynamics.core.structures import obstacle_type_linear from crowddynamics.core.vector2D import length from numba import i8, f8, optional from crowddynamics.simulation.agents import NO_TARGET @numba.jit(f8(f8, f8, optional(f8), f8), nopython=True, nogil=True, cache=True) def narrow_exit_capacity(d_door, d_agent, d_layer=None, coeff=1.0): r""" Capacity estimation :math:`\beta` of unidirectional flow through narrow bottleneck. Capacity of the bottleneck increases in stepwise manner. Estimation 1 Simple estimation .. math:: \beta_{simple} = c \left \lfloor \frac{d_{door}}{d_{agent}} \right \rfloor Estimation 2 More sophisticated estimation [Hoogendoorn2005a]_, [Seyfried2007a]_ .. math:: \beta_{hoogen} = c \left \lfloor \frac{d_{door} - (d_{agent} - d_{layer})}{d_{layer}} \right \rfloor,\quad d_{door} \geq d_{agent}
import numba from numba import f8 import numpy as np from scipy.spatial.qhull import Delaunay from shapely.geometry import Polygon, Point @numba.jit(f8(f8[:], f8[:], f8[:]), nopython=True, nogil=True) def triangle_area(a, b, c): return np.abs(a[0] * (b[1] - c[1]) + b[0] * (c[1] - a[1]) + c[0] * (a[1] - b[1])) / 2 @numba.jit(f8[:](f8[:], f8[:], f8[:]), nopython=True, nogil=True) def random_sample_triangle(a, b, c): """ Uniform sampling of a triangle ------------------------------ Generate uniform random sample from a triangle defined by points A, B and C [1]_, [2]_. Point inside the triangle is given .. math:: P = (1 - \sqrt{r_1}) A + (\sqrt{r_1} (1 - r_2)) B + (r_2 \sqrt{r_1}) C, where random variables are .. math:: r_1, r_2 \sim \mathcal{U}(0, 1) References
Monte Carlo based computation of the test value, SSY model, with active truncation. Unfortunately this file replicates a huge amount of code from ssy_monte_carlo_test.py. This is purely for reasons of efficiency, and the limits of what can and can't be done with current versions of Numba. """ from numpy.random import randn from numba import jit, njit, f8, prange from ssy_model import * @njit(f8(f8)) def truncated_randn(truncation_val): y = randn() if y > truncation_val: return truncation_val if y < -truncation_val: return -truncation_val return y @njit(f8[:](f8[:], f8[:], f8)) def update_state(x, c_params, trunc_val): μ_c, ρ, ϕ_z, σ_bar, ϕ_c, ρ_hz, σ_hz, ρ_hc, σ_hc = c_params z, h_z, h_c = x
# import glob from os.path import join import numpy as np import os # import pandas as pd # from utils.helper_fncs import save_obj, load_obj # import pickle from numba import jit, njit import numba as nb @njit(nb.f8(nb.u8, nb.u8, nb.u8, nb.u8)) def compute_overlap(xA, xB, yA, yB): # x,y files, A start, B end num = yB - yA + xB - xA den = max(xB, yB) - min(xA, yA) folap = max(0, num / den - 1) return folap @njit(nb.f8(nb.u8, nb.u8, nb.u8, nb.u8)) def compute_intersect(xA, xB, yA, yB): num = yB - yA + xB - xA den = max(xB, yB) - min(xA, yA) intersct = max(num - den, 0) return intersct @njit(nb.b1[:](nb.u8[:, :], nb.u8[:, :], nb.f8[:, :], nb.f8))
import numba as nb from numba import jit, f8, int32,b1 # Fluid Specific Heat percGly=20 @jit(f8 (f8 ,f8 ),nopython=True) def fCp(Tref,percglyvol=percGly): if(Tref>=292): return 0.6502826505e5 - 0.55296090e8 / Tref + 0.8269255092e2 * percglyvol - 0.8130626690e-1 * percglyvol ** 2 + 0.167409225e11 / Tref ** 2 - 0.180431100e4 * (0.6336420000e-3 + 0.1107710000e-1 * percglyvol - 0.1089140000e-4 * percglyvol ** 2) ** 2 - 0.44220204e7 * (0.6336420000e-3 + 0.1107710000e-1 * percglyvol - 0.1089140000e-4 * percglyvol ** 2) / Tref - 0.16876350e13 / Tref ** 3 - 0.68413806e3 * (0.6336420000e-3 + 0.1107710000e-1 * percglyvol - 0.1089140000e-4 * percglyvol ** 2) ** 3 + 0.52095834e6 * (0.6336420000e-3 + 0.1107710000e-1 * percglyvol - 0.1089140000e-4 * percglyvol ** 2) ** 2 / Tref + 0.54229338e9 * (0.6336420000e-3 + 0.1107710000e-1 * percglyvol - 0.1089140000e-4 * percglyvol ** 2) / Tref ** 2 else: return (0.2451e6 - 0.25033e4 * Tref + 0.867151e1 * Tref ** 2 - 0.100147e-1 * Tref ** 3) * (0.1560553517e2 - 0.13270e5 / Tref + 0.1984462465e-1 * percglyvol - 0.1951194310e-4 * percglyvol ** 2 + 0.40175e7 / Tref ** 2 - 0.43300e0 * (0.6336420000e-3 + 0.1107710000e-1 * percglyvol - 0.1089140000e-4 * percglyvol ** 2) ** 2 - 0.10612e4 * (0.6336420000e-3 + 0.1107710000e-1 * percglyvol - 0.1089140000e-4 * percglyvol ** 2) / Tref - 0.4050e9 / Tref ** 3 - 0.16418e0 * (0.6336420000e-3 + 0.1107710000e-1 * percglyvol - 0.1089140000e-4 * percglyvol ** 2) ** 3 + 0.12502e3 * (0.6336420000e-3 + 0.1107710000e-1 * percglyvol - 0.1089140000e-4 * percglyvol ** 2) ** 2 / Tref + 0.13014e6 * (0.6336420000e-3 + 0.1107710000e-1 * percglyvol - 0.1089140000e-4 * percglyvol ** 2) / Tref ** 2)
import numpy as np # import scipy.special as scsp import numba as nub # # @nub.autojit(nub.double(nub.double, nub.double, nub.double)) # @nub.autojit() # def loglike_vonmises(x, params): # mu = params[0] # kappa = params[1] # out = kappa*np.cos(x - mu) - np.log(2.*np.pi) - np.log(scsp.i0(kappa)) # return out # @nub.autojit(locals={'thetas':nub.double[:], 'datapoint':nub.double[:], 'ATtcB':nub.double, 'sampled_feature_index':nub.int_, 'mean_fixed_contrib':nub.double[:], 'inv_covariance_fixed_contrib':nub.double[:,:]}) @nub.jit(nub.f8(nub.f8, nub.f8[:], nub.f8[:], nub.object_, nub.f8, nub.int_, nub.f8[:], nub.f8[:, :])) def loglike_fct(new_theta, thetas, datapoint, rn, ATtcB, sampled_feature_index, mean_fixed_contrib, inv_covariance_fixed_contrib): ''' Compute the loglikelihood of: theta_r | n_tc theta_r' tc ''' # print 'what?', params, len(params) # thetas = params[0] # datapoint = params[1] # # rn = params[2] # # theta_mu = params[3] # # theta_kappa = params[4] # ATtcB = nub.double(params[5]) # sampled_feature_index = params[6] # mean_fixed_contrib = params[7]