Пример #1
0
    def __call__(self, t: np.float64, y: np.array) -> np.array:
        # get interpolant data

        v_gas = double(self._gas.Velocity(t))
        T = double(self._gas.Temperature(
            t))  # this cast is necessary, not sure why just yet
        dT = double(self._gas.Temperature(t, derivative=1))

        rho = self._gas.Density(t)
        drho = self._gas.Density(t, derivative=1)

        # call calculators
        xpnd = drho / rho
        vol = self._gas.mass_0 / rho
        self._cbar[:] = self._gas.cbar(vol)

        dydt = _f(self._dust_calc, self._dust_par, y, self._cbar, T, dT)

        expand(xpnd, y[0:self._net.NG], dydt[0:self._net.NG])

        dadt, d_conc = destroy(self._gas, self._net, vol, rho, y, T, v_gas)
        conc_update(d_conc, dydt[0:self._net.NG], y[0:self._net.NG])
        #erode(dadt, y[self._net.NG+self._net.ND*N_MOMENTS:], dydt[self._net.NG+self._net.ND*N_MOMENTS:], self._net.NG, self._dust_calc)
        erode_grow(dadt, y, dydt, self._net.NG, self._dust_calc)

        return dydt
def loglike_fct(new_theta, thetas, datapoint, rn, ATtcB, sampled_feature_index, mean_fixed_contrib, inv_covariance_fixed_contrib):
    '''
        Compute the loglikelihood of: theta_r | n_tc theta_r' tc
    '''
    
    # print 'what?', params, len(params)

    # thetas = params[0]
    # datapoint = params[1]
    # # rn = params[2]
    # # theta_mu = params[3]
    # # theta_kappa = params[4]
    # ATtcB = nub.double(params[5])
    # sampled_feature_index = params[6]
    # mean_fixed_contrib = params[7]
    # inv_covariance_fixed_contrib = params[8]

    # Put the new proposed point correctly
    thetas[sampled_feature_index] = new_theta

    # print nub.typeof(mean_fixed_contrib), nub.typeof(inv_covariance_fixed_contrib)
    # print inv_covariance_fixed_contrib

    # a = rn.get_network_response_numba(thetas)

    like_mean = datapoint - mean_fixed_contrib - ATtcB*rn.get_network_response_numba(thetas)

    # Using inverse covariance as param
    # return theta_kappa*np.cos(thetas[sampled_feature_index] - theta_mu) - 0.5*np.dot(like_mean, np.dot(inv_covariance_fixed_contrib, like_mean))
    return -0.5*nub.double(np.dot(like_mean, np.dot(inv_covariance_fixed_contrib, like_mean)))
Пример #3
0
def test_fromnumba():
    import numba as nb

    def fromstr(a):
        return Type_fromstring(a)

    def fromnumba(t):
        return Type.fromnumba(t, target_info)

    assert fromnumba(nb.void) == fromstr('void')
    assert fromnumba(nb.boolean) == fromstr('bool')
    assert fromnumba(nb.int8) == fromstr('int8')
    assert fromnumba(nb.int16) == fromstr('int16')
    assert fromnumba(nb.int32) == fromstr('int32')
    assert fromnumba(nb.int64) == fromstr('int64')
    assert fromnumba(nb.uint8) == fromstr('uint8')
    assert fromnumba(nb.uint16) == fromstr('uint16')
    assert fromnumba(nb.uint32) == fromstr('uint32')
    assert fromnumba(nb.uint64) == fromstr('uint64')
    assert fromnumba(nb.float_) == fromstr('float32')
    assert fromnumba(nb.double) == fromstr('float64')
    assert fromnumba(nb.complex64) == fromstr('complex64')
    assert fromnumba(nb.complex128) == fromstr('complex128')
    assert fromnumba(nb.types.CPointer(nb.float64)) == fromstr('double*')
    assert fromnumba(nb.double(nb.int64, nb.float_)) == fromstr('d(i64, f)')
Пример #4
0
    def __init__(self, w, h):
        # All instance attributes must be defined in the initializer
        self.width = w
        self.height = h

        # Types can be explicitly specified through casts
        self.some_attr = double(1.0)
Пример #5
0
    def __init__(self, w, h):
        # All instance attributes must be defined in the initializer
        self.width = w
        self.height = h

        # Types can be explicitly specified through casts
        self.some_attr = double(1.0)
Пример #6
0
    def __call__(self, t: np.float64, y: np.array) -> np.array:
        # get interpolant data
        T = double(self._gas.Temperature(
            t))  # this cast is necessary, not sure why just yet
        dT = double(self._gas.Temperature(t, derivative=1))
        rho = self._gas.Density(t)
        drho = self._gas.Density(t, derivative=1)

        # call calculators
        xpnd = drho / rho
        vol = self._gas.mass_0 / rho
        self._cbar[:] = self._gas.cbar(vol)
        dydt = _f(self._dust_calc, self._dust_par, y, self._cbar, T, dT)
        expand(xpnd, y[0:self._net.NG], dydt[0:self._net.NG])

        return dydt
Пример #7
0
def SGD(R, P, Q, W, K=num_of_features, num_of_iterations=num_of_iterations):
    steps = num_of_iterations * 10
    weighted_errors = []
    alpha = 0.002
    beta = 0.01
    QT = Q.T
    n, m = R.shape
    step = 0
    err = 0.0
    for step in xrange(steps):
        for i in xrange(n):
            for j in xrange(m):
                if R[i,j] > 0:
                    temp = double(np.dot(P[i,:],QT[:,j]))
                    eij = R[i,j] - temp
                    for k in xrange(K):
                        P[i,k] += 2 * alpha * (eij * QT[k,j] - beta * P[i,k])
                        QT[k,j] += 2 * alpha * (eij * P[i,k] - beta * QT[k,j])
        
		err = get_error(R, P, Q, W)
	    weighted_errors.append(err)
        print_stamp("Step " + str(step) + " done.")
        if err < 0.5:
            break

    return weighted_errors, P, QT.T, step + 1
Пример #8
0
def compile_rematch():
    '''
    Compile the rematch function with numba.

    :return: Compiled version of the rematch function.
    '''
    signatureRem = nb.double(nb.double[:, :], nb.double, nb.double)
    nb_rematch = nb.jit(signatureRem, nopython=True, nogil=True,cache=True)(rematch)
    return nb_rematch
Пример #9
0
def main(*args):
    import numba as nb
    test_ast = ast_module.parse('def test_fn(a, b):\n  return a + b\n\n',
                                   '<string>', 'exec')
    exec(compile(test_ast, '<string>', 'exec'))
    test_fn_ast = test_ast.body[-1]
    test_fn_sig = nb.double(nb.double, nb.double)
    test_fn_sig.name = test_fn.__name__
    env = NumbaEnvironment.get_environment()
    with TranslationContext(env, test_fn, test_fn_ast, test_fn_sig):
        env.get_pipeline()(test_fn_ast, env)
    assert env.pipeline_stages == pipeline
Пример #10
0
def main(*args):
    import numba as nb
    test_ast = ast_module.parse('def test_fn(a, b):\n  return a + b\n\n',
                                '<string>', 'exec')
    exec(compile(test_ast, '<string>', 'exec'))
    test_fn_ast = test_ast.body[-1]
    test_fn_sig = nb.double(nb.double, nb.double)
    test_fn_sig.name = test_fn.__name__
    env = NumbaEnvironment.get_environment()
    with TranslationContext(env, test_fn, test_fn_ast, test_fn_sig):
        env.get_pipeline()(test_fn_ast, env)
    assert env.pipeline_stages == pipeline
Пример #11
0
def cross3(vec1, vec2, result):
    """ Calculate the cross product of two 3d vectors. """
    a1, a2, a3 = double(vec1[0]), double(vec1[1]), double(vec1[2])
    b1, b2, b3 = double(vec2[0]), double(vec2[1]), double(vec2[2])
    result[0] = a2 * b3 - a3 * b2
    result[1] = a3 * b1 - a1 * b3
    result[2] = a1 * b2 - a2 * b1
Пример #12
0
def numba_cross_(vec1, vec2, result):
    """calculates the cross product of two 3d vectors"""
    a1, a2, a3 = double(vec1[0]), double(vec1[1]), double(vec1[2])
    b1, b2, b3 = double(vec2[0]), double(vec2[1]), double(vec2[2])
    result[0] = a2 * b3 - a3 * b2
    result[1] = a3 * b1 - a1 * b3
    result[2] = a1 * b2 - a2 * b1
    return result
Пример #13
0
def rgbd_cal_dmask2cbox(depth_img, col_img_h, col_img_w, dcm, ccm, ext, depth_scl, depth_mask):
    """
    Calibrating depth and color image for computing a bounding box
    in color image space based on the input depth mask
    """
    depth_height = depth_img.shape[0]
    depth_width = depth_img.shape[1]
    color_height = col_img_h
    color_width = col_img_w
    depth_mask_height = depth_mask.shape[0]
    depth_mask_width = depth_mask.shape[1]
    #assert depth_mask.shape == depth_img.shape, 'assertion failed -- rgbd_cal_dmask2cbox: depth_mask.shape == depth_img.shape'

    aligned = np.zeros((depth_height, depth_width, 3), dtype=np.float_)
    color_bbox = [color_width, color_height, 0, 0]

    fx_d, fy_d, cx_d, cy_d = double(dcm[0][0]), double(dcm[1][1]), double(dcm[0][2]), double(dcm[1][2])
    for v in range(depth_height):
        for u in range(depth_width):
            z = double(depth_img[v][u]) / double(depth_scl)
            x = (u - cx_d) * z / fx_d
            y = (v - cy_d) * z / fy_d
            transformed = mul(ext, np.array([x, y, z, 1]))
            aligned[v][u] = transformed[0:3]
    
    fx_c, fy_c, cx_c, cy_c = double(ccm[0][0]), double(ccm[1][1]), double(ccm[0][2]), double(ccm[1][2])
    validFlag = False
    for v in range(depth_mask_height):
        for u in range(depth_mask_width):
            if depth_mask[v][u] == 0 or aligned[v][u][2] == 0:
                continue
            x = aligned[v][u][0] * fx_c / aligned[v][u][2] + cx_c
            y = aligned[v][u][1] * fy_c / aligned[v][u][2] + cy_c
            if x > color_width or y > color_height or x < 0 or y < 0:
                continue
            color_bbox[0] = min(int(round(x)), color_bbox[0])
            color_bbox[1] = min(int(round(y)), color_bbox[1])
            color_bbox[2] = max(int(round(x)), color_bbox[2])
            color_bbox[3] = max(int(round(y)), color_bbox[3])
            validFlag = True
    if not validFlag:
        color_bbox = [0, 0, color_width, color_height]
    if color_bbox[2] - color_bbox[0] < 2:
        color_bbox[2] = color_bbox[0] + 2
    if color_bbox[3] - color_bbox[1] < 2:
        color_bbox[3] = color_bbox[1] + 2
    return (color_bbox)
    def test_type_inference(self):
        global vector_add
        vector_add = vectorize([
            bool_(double, int_),
            double(double, double),
            float_(double, float_),
        ])(add)

        cfunc = jit(func)

        self.assertEqual(cfunc(np.dtype(np.float64), np.dtype('i')), int8[:])
        self.assertEqual(cfunc(np.dtype(np.float64), np.dtype(np.float64)),
                         double[:])
        self.assertEqual(cfunc(np.dtype(np.float64), np.dtype(np.float32)),
                         float_[:])
Пример #15
0
def test_tonumba():
    def tonumba(a):
        return Type_fromstring(a).tonumba()

    assert tonumba('void') == nb.void
    assert tonumba('bool') == nb.boolean
    assert tonumba('int8') == nb.int8
    assert tonumba('int16') == nb.int16
    assert tonumba('int32') == nb.int32
    assert tonumba('int64') == nb.int64
    assert tonumba('uint8') == nb.uint8
    assert tonumba('uint16') == nb.uint16
    assert tonumba('uint32') == nb.uint32
    assert tonumba('uint64') == nb.uint64
    assert tonumba('float') == nb.float32
    assert tonumba('double') == nb.float64
    assert tonumba('complex') == nb.complex64
    assert tonumba('complex128') == nb.complex128
    assert tonumba('double*') == nb.types.CPointer(nb.float64)
    assert tonumba('()') == nb.void()
    assert tonumba('d(i64, f)') == nb.double(nb.int64, nb.float_)
    def test_type_inference(self):
        """This is testing numpy ufunc dispatch machinery"""
        global vector_add
        vector_add = vectorize([
            bool_(double, int_),
            double(double, double),
            float_(double, float_),
        ])(add)

        cfunc = jit(func)

        def numba_type_equal(a, b):
            self.assertEqual(a.dtype, b.dtype)
            self.assertEqual(a.ndim, b.ndim)

        numba_type_equal(cfunc(np.dtype(np.float64), np.dtype("i")), bool_[:])
        numba_type_equal(cfunc(np.dtype(np.float64), np.dtype(np.float64)),
                         double[:])
        # This is because the double(double, double) matches first
        numba_type_equal(cfunc(np.dtype(np.float64), np.dtype(np.float32)),
                         double[:])
Пример #17
0
def glm_one_group_numba(x, dispersion, 
       offset):
    """ Returns the estimated betas using newton-raphson

    Params
    x : count matrix
    dispersion : negative binomial dispersion value
    """
    maxit = 50
    ntags = x.shape[0]
    nsamples = x.shape[1]
    out = np.zeros(ntags)
    low_level = 1e-10
    for i in range(ntags):
        beta = 0
        nonzero = False 
        for j in range(nsamples):
            cur_val = x[i, j]
            if cur_val > low_level:
                beta += cur_val/np.exp(offset[j]) 
                nonzero = True
            else: pass
        if not nonzero: 
            beta = -np.inf
        else:
            beta = np.log(beta/double(nsamples))
            for it in range(maxit):
                dl = 0
                info = 0
                for k in range(nsamples):
                    mu = np.exp(beta + offset[k])
                    denominator = 1 + mu * dispersion[i]
                    dl += (x[i, k] - mu)/denominator
                    info += mu/denominator
                step = dl/info
                beta += step
                if abs(step) < 1e-6:
                    break
        out[i] = beta
    return out
Пример #18
0
def glm_one_group_numba(x, dispersion, offset):
    """ Returns the estimated betas using newton-raphson

    Params
    x : count matrix
    dispersion : negative binomial dispersion value
    """
    maxit = 50
    ntags = x.shape[0]
    nsamples = x.shape[1]
    out = np.zeros(ntags)
    low_level = 1e-10
    for i in range(ntags):
        beta = 0
        nonzero = False
        for j in range(nsamples):
            cur_val = x[i, j]
            if cur_val > low_level:
                beta += cur_val / np.exp(offset[j])
                nonzero = True
            else:
                pass
        if not nonzero:
            beta = -np.inf
        else:
            beta = np.log(beta / double(nsamples))
            for it in range(maxit):
                dl = 0
                info = 0
                for k in range(nsamples):
                    mu = np.exp(beta + offset[k])
                    denominator = 1 + mu * dispersion[i]
                    dl += (x[i, k] - mu) / denominator
                    info += mu / denominator
                step = dl / info
                beta += step
                if abs(step) < 1e-6:
                    break
        out[i] = beta
    return out
    def test_type_inference(self):
        """This is testing numpy ufunc dispatch machinery
        """
        global vector_add
        vector_add = vectorize([
            bool_(double, int_),
            double(double, double),
            float_(double, float_),
        ])(add)

        cfunc = jit(func)

        def numba_type_equal(a, b):
            self.assertEqual(a.dtype, b.dtype)
            self.assertEqual(a.ndim, b.ndim)

        numba_type_equal(cfunc(np.dtype(np.float64), np.dtype('i')), bool_[:])
        numba_type_equal(cfunc(np.dtype(np.float64), np.dtype(np.float64)),
                         double[:])
        # This is because the double(double, double) matches first
        numba_type_equal(cfunc(np.dtype(np.float64), np.dtype(np.float32)),
                         double[:])
Пример #20
0
size_y : int
    Size of cluster y
size_i : int
    Size of cluster i

Returns
-------
d_xyi : double
    Distance from the new cluster xy to cluster i
"""

import math

import numba as nb

sig = nb.double(nb.double, nb.double, nb.double, nb.int64, nb.int64, nb.int64)


@nb.njit(sig)
def _single(d_xi, d_yi, d_xy, size_x, size_y, size_i):
    return min(d_xi, d_yi)


@nb.njit(sig)
def _complete(d_xi, d_yi, d_xy, size_x, size_y, size_i):
    return max(d_xi, d_yi)


@nb.njit(sig)
def _average(d_xi, d_yi, d_xy, size_x, size_y, size_i):
    return (size_x * d_xi + size_y * d_yi) / (size_x + size_y)
import os
import cPickle as pickle
import csv
import data_analysis
from data_analysis import refdir,refdirahk114,caadir,vfile_store
#import calendar
import pandas as pd
from numba import jit,double

#plt.style.use('seaborn-darkgrid')
end_date=datetime(9999,1,1)
EXTMODE_INITIATE = ["SFGMJ059","SFGMJ064","SFGMSEXT","SFGMM002"]
EXTMODE_TERMINATE = ["SFGMJ065","SFGMJ050"]
EXTMODE_COMMANDS = EXTMODE_INITIATE+EXTMODE_TERMINATE

@jit(double(double[:]),nopython=True)
def numba_std(arr):
    return np.std(arr)
@jit(double(double[:]),nopython=True)
def numba_mean(arr):
    return np.mean(arr)
@jit(double(double[:]),nopython=True)
def numba_max(arr):
    return np.max(arr)
@jit(double(double[:]),nopython=True)
def numba_min(arr):
    return np.min(arr)

def getcommands(start_date,end_date):
    extmode_commanding_list = []
    for s in range(4):
Пример #22
0
    elif xi >= x[Nx - 2]:
        return Nx - 2

    # b. binary search
    half = Nx // 2
    while half:
        imid = imin + half
        if x[imid] <= xi:
            imin = imid
        Nx -= half
        half = Nx // 2

    return imin


@njit(double(double[:], double[:], double))
def interp_linear_1d_scalar(grid, value, xi):
    """ raw 1D interpolation """

    # a. search
    ix = binary_search(0, grid.size, grid, xi)

    # b. relative positive
    rel_x = (xi - grid[ix]) / (grid[ix + 1] - grid[ix])

    # c. interpolate
    return value[ix] + rel_x * (value[ix + 1] - value[ix])


@njit
def interp_linear_1d(grid, value, xi):
Пример #23
0
    def test_blackscholes(self):
        OPT_N = 400
        iterations = 2

        stockPrice = randfloat(np.random.random(OPT_N), 5.0, 30.0)
        optionStrike = randfloat(np.random.random(OPT_N), 1.0, 100.0)
        optionYears = randfloat(np.random.random(OPT_N), 0.25, 10.0)

        callResultNumpy = np.zeros(OPT_N)
        putResultNumpy = -np.ones(OPT_N)

        callResultNumba = np.zeros(OPT_N)
        putResultNumba = -np.ones(OPT_N)

        # numpy
        for i in range(iterations):
            black_scholes(callResultNumpy, putResultNumpy, stockPrice,
                          optionStrike, optionYears, RISKFREE, VOLATILITY)

        @cuda.jit(double(double), device=True, inline=True)
        def cnd_cuda(d):
            K = 1.0 / (1.0 + 0.2316419 * math.fabs(d))
            ret_val = (RSQRT2PI * math.exp(-0.5 * d * d) *
                       (K * (A1 + K * (A2 + K * (A3 + K * (A4 + K * A5))))))
            if d > 0:
                ret_val = 1.0 - ret_val
            return ret_val

        @cuda.jit(
            void(double[:], double[:], double[:], double[:], double[:], double,
                 double))
        def black_scholes_cuda(callResult, putResult, S, X, T, R, V):
            i = cuda.threadIdx.x + cuda.blockIdx.x * cuda.blockDim.x
            if i >= S.shape[0]:
                return
            sqrtT = math.sqrt(T[i])
            d1 = ((math.log(S[i] / X[i]) + (R + 0.5 * V * V) * T[i]) /
                  (V * sqrtT))
            d2 = d1 - V * sqrtT
            cndd1 = cnd_cuda(d1)
            cndd2 = cnd_cuda(d2)

            expRT = math.exp((-1. * R) * T[i])
            callResult[i] = (S[i] * cndd1 - X[i] * expRT * cndd2)
            putResult[i] = (X[i] * expRT * (1.0 - cndd2) - S[i] *
                            (1.0 - cndd1))

        # numba
        blockdim = 512, 1
        griddim = int(math.ceil(float(OPT_N) / blockdim[0])), 1
        stream = cuda.stream()
        d_callResult = cuda.to_device(callResultNumba, stream)
        d_putResult = cuda.to_device(putResultNumba, stream)
        d_stockPrice = cuda.to_device(stockPrice, stream)
        d_optionStrike = cuda.to_device(optionStrike, stream)
        d_optionYears = cuda.to_device(optionYears, stream)

        for i in range(iterations):
            black_scholes_cuda[griddim, blockdim,
                               stream](d_callResult, d_putResult, d_stockPrice,
                                       d_optionStrike, d_optionYears, RISKFREE,
                                       VOLATILITY)
        d_callResult.copy_to_host(callResultNumba, stream)
        d_putResult.copy_to_host(putResultNumba, stream)
        stream.synchronize()

        delta = np.abs(callResultNumpy - callResultNumba)
        L1norm = delta.sum() / np.abs(callResultNumpy).sum()

        max_abs_err = delta.max()
        self.assertTrue(L1norm < 1e-13)
        self.assertTrue(max_abs_err < 1e-13)
Пример #24
0
from ...containers import MuonEfficiencyContainer
from ...coordinates import CameraFrame, TelescopeFrame
from ...core import TelescopeComponent
from ...core.traits import FloatTelescopeParameter, IntTelescopeParameter

__all__ = ["MuonIntensityFitter"]

# ratio of the areas of the unit circle and a square of side lengths 2
CIRCLE_SQUARE_AREA_RATIO = np.pi / 4

# Sqrt of 2, as it is needed multiple times
SQRT2 = np.sqrt(2)


@vectorize([double(double, double, double)])
def chord_length(radius, rho, phi):
    """
    Function for integrating the length of a chord across a circle

    Parameters
    ----------
    radius: float or ndarray
        radius of circle
    rho: float or ndarray
        fractional distance of impact point from circle center
    phi: float or ndarray in radians
        rotation angles to calculate length

    Returns
    -------
Пример #25
0
"""
def fn( x ):
    result = 0.0
    for i in range( 100 ):
        result += (1+i+x)/(1+x)
    return result
""" 

def fn( x ):
    result = 0.0
    for i in range( 10000 ):
        result+=i*x
    return result

nb_fn = jit( double(double, ), nopython=True )( fn )

class Test(unittest.TestCase):

    def test_mvectorize(self):
        
        x = np.linspace( 1, 1000, 10000 )
        
        mf_fn = mvectorize( nb_fn, ( double[:], double[:] ), num_threads=8 )
        
        result = mf_fn( x )
        expected = np.vectorize( fn )( x )
        
        np.testing.assert_array_equal( expected, result )
        
    def test_mvectorize_performance(self):
Пример #26
0
#!/usr/bin/env python
# coding: UTF-8
from __future__ import division

import numpy as np
import numba as nb
from numba import double, jit

def dot3_python(a,b):
	return a[0]*b[0] + a[1]*b[1] + a[2]*b[2]
dot3_numba = jit(double(double[:],double[:]),nopython=True)(dot3_python)

def _hamiltonian_jet_python(strengths, m,dH,ddH):
	"""
	Private function to Compute H and its gradient dH at the point m.
	This function is not meant to be used directly.
	"""
	n = len(strengths)

	H = 0.0
	for i in range(n):
		for j in range(i):
			## mi_dot_mj = m[3*i]*m[3*j] + m[3*i+1]*m[3*j+1] + m[3*i+2]*m[3*j+2]
			strength = strengths[i] * strengths[j]
			mi_dot_mj = dot3_numba(m[3*i:3*(i+1)],m[3*j:3*(j+1)])
			lij = 1.0-mi_dot_mj
			lijsq = lij**2
			H += strength*np.log(2.0*lij)
			for k in range(3):
				dH[3*i+k] -= strength*m[3*j+k]/lij
				dH[3*j+k] -= strength*m[3*i+k]/lij
Пример #27
0
          $ cor_atm = CorsikaAtmosphere(new_location, new_season)
          $ cor_atm.calc_thickl()

          Replace _thickl values with printout.

        """
        from scipy.integrate import quad
        thickl = []
        for h in self._atm_param[4]:
            thickl.append('{0:4.6f}'.format(
                quad(self.get_density, h, 112.8e5, epsrel=1e-4)[0]))
        if dbg:
            print '_thickl = np.array([' + ', '.join(thickl) + '])'


@jit(double(double, double, double[:, :]), target='cpu')
def planar_rho_inv_jit(X, cos_theta, param):
    """Optimized calculation of :math:`1/\\rho(X,\\theta)` in
    planar approximation.

    This function can be used for calculations where
    :math:`\\theta < 70^\\circ`.

    Args:
      X (float): slant depth in g/cm**2
      cos_theta (float): :math:`\\cos(\\theta)`

    Returns:
      float: :math:`1/\\rho(X,\\theta)` in cm**3/g
    """
    a = param[0]
            return 0.5 * np.floor( xi / 0.5 )
        elif xi<=20.0:            
            return 1.0 * np.floor( xi / 1.0 )
        elif xi<=30.0:            
            return 2.0 * np.floor( xi / 2.0 )
        elif xi<=50.0:            
            return 2.0 * np.floor( xi / 2.0 )
        elif xi<=100.0:            
            return 5.0 * np.floor( xi / 5.0 )
        elif xi<=1000.0:            
            return 10.0 * np.floor( xi / 10.0 )
        else:            
            return 1000.0
        return 0.0

    signature = double(double,)
    print( 'Compiling jit function' )
    nb_floor_closest_valid_odds = jit(signature, nopython=True)(floor_closest_valid_odds)
    
    print( 'Compiling 4 thread')
    mf4 = mvectorize( nb_floor_closest_valid_odds, ( double[:], double[:] ), num_threads=4 )
    print( 'Compiling 6 thread')
    mf6 = mvectorize( nb_floor_closest_valid_odds, ( double[:], double[:] ), num_threads=6 )    
    print( 'Compiling 7 thread')
    mf7 = mvectorize( nb_floor_closest_valid_odds, ( double[:], double[:] ), num_threads=7 )    
    print( 'Compiling 8 thread')
    mf8 = mvectorize( nb_floor_closest_valid_odds, ( double[:], double[:] ), num_threads=8 )
    
    signature = double[:](double[:],)
    vf = vectorize(['float64(float64)'], nopython=True)(floor_closest_valid_odds)
    
		ndi.correlate1d(output, [0.037659,  0.249153,  0.426375, 0.249153, 0.037659], ii, output, mode, cval, 0,)
	return return_value
# Gaussian filter kernel
#
def getGaussian( xs, ys, zs ):
	"""Return a gaussian filter kernel of the specified size
	"""
	tmp = np.zeros((xs,ys,zs))
	tmp[xs//2, ys//2, zs//2] = 1.0
	return ndi.gaussian_filter(tmp, (xs/6,ys/6,zs/6), mode='constant')


# Convolution of 3D filter with 3D data - only calulates the output for the centre trace
# Numba JIT used to accelerate the calculations
#
@jit(double(double[:,:,:], double[:,:,:]))
def sconvolve(arr, filt):
	X,Y,Z = arr.shape
	Xf,Yf,Zf = filt.shape
	X2 = X//2
	Y2 = Y//2
	Xf2 = Xf//2
	Yf2 = Yf//2
	Zf2 = Zf//2
	result = np.zeros(Z)
	for i in range(Zf2, Z-Zf2):
		num = 0.0
		for ii in range(Xf):
			for jj in range(Yf):
				for kk in range(Zf):
					num += (filt[Xf-1-ii, Yf-1-jj, Zf-1-kk] * arr[X2-Xf2+ii, Y2-Yf2+jj, i-Zf2+kk])
Пример #30
0
                        np.sqrt((grid_im + m[im])**2 + (grid_jm + m[jm])**2 +
                                (grid_km + m[km])**2)), [res**3])
                # add the energy for each |m|
                e_k_per_cell = e_phys[im, jm, km] / (res**3)
                for ii in range(res**3):
                    e_k_mag[int(
                        mgrid[ii])] = e_k_mag[int(mgrid[ii])] + e_k_per_cell

    # compute the energy per physical wave number k (instead of m)
    k_mag = 2 * np.pi / length * np.linspace(0, m_max, m_max + 1)
    e_k_mag = e_k_mag * length / (2 * np.pi)
    logging.info('FINISH CALCULATING SPECTRUM')
    return k_mag, e_k_mag


@nb.jit(nb.double(nb.double, nb.double[:], nb.double[:]))
def energy_interp(k_mag, k_fit: np.ndarray, e_k_fit: np.ndarray):
    i = 0
    num = len(k_fit)
    e_k = ((k_mag - k_fit[0]) / (k_fit[1] - k_fit[0]) *
           (e_k_fit[1] - e_k_fit[0]) + e_k_fit[0])
    while k_mag > k_fit[i]:
        i += 1
        if i == num - 1:
            e_k = np.exp(
                (np.log(k_mag) - np.log(k_fit[num - 2])) /
                (np.log(k_fit[num - 1]) - np.log(k_fit[num - 2])) *
                (np.log(e_k_fit[num - 1]) - np.log(e_k_fit[num - 2])) +
                np.log(e_k_fit[num - 2]))
            break
        e_k = np.exp((np.log(k_mag) - np.log(k_fit[i - 1])) /
Пример #31
0
def mul(mat, vec):
    m00, m01, m02, m03 = double(mat[0][0]), double(mat[0][1]), double(mat[0][2]), double(mat[0][3])
    m10, m11, m12, m13 = double(mat[1][0]), double(mat[1][1]), double(mat[1][2]), double(mat[1][3])
    m20, m21, m22, m23 = double(mat[2][0]), double(mat[2][1]), double(mat[2][2]), double(mat[2][3])
    m30, m31, m32, m33 = double(mat[3][0]), double(mat[3][1]), double(mat[3][2]), double(mat[3][3])
    v0, v1, v2, v3 = double(vec[0]), double(vec[1]), double(vec[2]), double(vec[3])
    return np.array([m00 * v0 + m01 * v1 + m02 * v2 + m03 * v3,
                     m10 * v0 + m11 * v1 + m12 * v2 + m13 * v3,
                     m20 * v0 + m21 * v1 + m22 * v2 + m23 * v3,
                     m30 * v0 + m31 * v1 + m32 * v2 + m33 * v3])
import numpy as np
from numba import njit, boolean, int32, double, void
from .linear_interp import binary_search

@njit(double(double[:],double[:],double[:,:],double,double,int32,int32),fastmath=True)
def _interp_2d(grid1,grid2,value,xi1,xi2,j1,j2):
    """ 2d interpolation for one point with known location
        
    Args:

        grid1 (numpy.ndarray): 1d grid
        grid2 (numpy.ndarray): 1d grid
        value (numpy.ndarray): value array (2d)
        xi1 (double): input point
        xi2 (double): input point
        j1 (int): location in grid 
        j2 (int): location in grid

    Returns:

        yi (double): output

    """

    # a. left/right
    nom_1_left = grid1[j1+1]-xi1
    nom_1_right = xi1-grid1[j1]

    nom_2_left = grid2[j2+1]-xi2
    nom_2_right = xi2-grid2[j2]
Пример #33
0
from math import pi
from numba import jit, double

is_windows = sys.platform.startswith('win32')
if is_windows:
    raise OSError('Example does not work on Windows platforms yet.')

proc = CDLL(None)

c_sin = proc.sin
c_sin.argtypes = [c_double]
c_sin.restype = c_double


def use_c_sin(x):
    return c_sin(x)


ctype_wrapping = CFUNCTYPE(c_double, c_double)(use_c_sin)


def use_ctype_wrapping(x):
    return ctype_wrapping(x)


cfunc = jit(double(double))(use_c_sin)
print(cfunc(pi))

cfunc = jit(double(double))(use_ctype_wrapping)
print(cfunc(pi))
Пример #34
0
                nomi = interp.nomi_last[i]*b

            index = interp.index_last[i]+(pos_left+add)*facs
                
            for k in range(interp.dimy):
                nom[k] += nomi*interp.y[interp.yoffset[k] + index]
        
        inv_denom = 1/denom
        for k in range(interp.dimy):
            yi[ixi*interp.dimy + k] = nom[k]*inv_denom

######
# 1D #
######

@njit(double(double[:],double[:],double))
def interp_1d(grid,value,xi):
    """ raw 1D interpolation """

    # a. search
    ix = binary_search(0,grid.size,grid,xi)
    
    # b. relative positive
    rel_x = (xi - grid[ix])/(grid[ix+1]-grid[ix])
    
    # c. interpolate
    return value[ix] + rel_x * (value[ix+1]-value[ix])

@njit(void(double[:],double[:],double[:],double[:],boolean))
def _interp_1d_vec(grid,value,xi,yi,monotone):
    """ raw 1D interpolation for a monotone vector """
Пример #35
0
Файл: sum.py Проект: ASPP/numba
# -*- coding: utf-8 -*-
from __future__ import print_function, division, absolute_import
from numba import double
from numba.decorators import jit as jit

def sum2d(arr):
    M, N = arr.shape
    result = 0.0
    for i in range(M):
        for j in range(N):
            result += arr[i,j]
    return result

jitsum2d = jit(sum2d)
csum2d = jitsum2d.compile(double(double[:,::1]))

from numpy import random
arr = random.randn(100, 100)

import time
start = time.time()
res = sum2d(arr)
duration = time.time() - start
print("Result from python is %s in %s (msec)" % (res, duration*1000))

csum2d(arr)       # warm up

start = time.time()
res = csum2d(arr)
duration2 = time.time() - start
print("Result from compiled is %s in %s (msec)" % (res, duration2*1000))
import numpy as np
from numba import njit, boolean, int32, double, void
from .linear_interp import binary_search


@njit(double(double[:], double[:], double[:, :], double, double, int32, int32),
      fastmath=True)
def _interp_2d(grid1, grid2, value, xi1, xi2, j1, j2):
    """ 2d interpolation for one point with known location
        
    Args:

        grid1 (numpy.ndarray): 1d grid
        grid2 (numpy.ndarray): 1d grid
        value (numpy.ndarray): value array (2d)
        xi1 (double): input point
        xi2 (double): input point
        j1 (int): location in grid 
        j2 (int): location in grid

    Returns:

        yi (double): output

    """

    # a. left/right
    nom_1_left = grid1[j1 + 1] - xi1
    nom_1_right = xi1 - grid1[j1]

    nom_2_left = grid2[j2 + 1] - xi2
Пример #37
0
from __future__ import print_function, division, absolute_import
from numba import double
from numba.decorators import jit as jit


def sum2d(arr):
    M, N = arr.shape
    result = 0.0
    for i in range(M):
        for j in range(N):
            result += arr[i, j]
    return result


jitsum2d = jit(sum2d)
csum2d = jitsum2d.compile(double(double[:, ::1]))

from numpy import random
arr = random.randn(100, 100)

import time
start = time.time()
res = sum2d(arr)
duration = time.time() - start
print("Result from python is %s in %s (msec)" % (res, duration * 1000))

csum2d(arr)  # warm up

start = time.time()
res = csum2d(arr)
duration2 = time.time() - start
Пример #38
0
          $ cor_atm = CorsikaAtmosphere(new_location, new_season)
          $ cor_atm.calc_thickl()
          
          Replace _thickl values with printout. 
        
        """
        from scipy.integrate import quad
        thickl = []
        for h in self._atm_param[4]:
            thickl.append('{0:4.6f}'.format(quad(self.get_density, h,
                                                 112.8e5, epsrel=1e-4)[0]))
        print '_thickl = np.array([' + ', '.join(thickl) + '])'


@jit(double(double, double, double[:, :]), target='cpu')
def planar_rho_inv_jit(X, cos_theta, param):
    """Optimized calculation of :math:`1/\\rho(X,\\theta)` in
    planar approximation. 
    
    This function can be used for calculations where 
    :math:`\\theta < 70^\\circ`.  
    
    Args:
      X (float): slant depth in g/cm**2
      cos_theta (float): :math:`\\cos(\\theta)`
    
    Returns:
      float: :math:`1/\\rho(X,\\theta)` in cm**3/g
    """
    a = param[0]
Пример #39
0

is_windows = sys.platform.startswith('win32')
if is_windows:
    raise OSError('Example does not work on Windows platforms yet.')


proc = CDLL(None)

c_sin = proc.sin
c_sin.argtypes = [c_double]
c_sin.restype = c_double

def use_c_sin(x):
    return c_sin(x)


ctype_wrapping = CFUNCTYPE(c_double, c_double)(use_c_sin)

def use_ctype_wrapping(x):
    return ctype_wrapping(x)


cfunc = jit(double(double))(use_c_sin)
print(cfunc(pi))

cfunc = jit(double(double))(use_ctype_wrapping)
print(cfunc(pi))


Пример #40
0
    V = tuple(V[:]) + (V[0], )

    # loop through all edges of the polygon
    for i in range(len(V) - 1):  # edge from V[i] to V[i+1]
        if V[i][1] <= P[1]:  # start y <= P[1]
            if V[i + 1][1] > P[1]:  # an upward crossing
                if is_left(V[i], V[i + 1], P) > 0:  # P left of edge
                    wn += 1  # have a valid up intersect
        else:  # start y > P[1] (no test needed)
            if V[i + 1][1] <= P[1]:  # a downward crossing
                if is_left(V[i], V[i + 1], P) < 0:  # P right of edge
                    wn -= 1  # have a valid down intersect
    return wn


@njit(nb.double(nb.double[:], nb.double[:], nb.double[:]))
def is_left(p0, p1, p2):
    """

       is_left(): tests if a point is Left|On|Right of an infinite line.
    Input:  three points P0, P1, and P2
    Return: >0 for P2 left of the line through P0 and P1
            =0 for P2  on the line
            <0 for P2  right of the line
        See: Algorithm 1 "Area of Triangles and Polygons"
        http://geomalgorithms.com/a03-_inclusion.html

    :param p0: point [x,y] array
    :param p1: point [x,y] array
    :param p2: point [x,y] array
    :return:
Пример #41
0
xgk = np.array([
	0.995657163025808080735527280689003,
	0.973906528517171720077964012084452,
	0.930157491355708226001207180059508,
	0.865063366688984510732096688423493,
	0.780817726586416897063717578345042,
	0.679409568299024406234327365114874,
	0.562757134668604683339000099272694,
	0.433395394129247190799265943165784,
	0.294392862701460198131126603103866,
	0.148874338981631210884826001129720,
	0.000000000000000000000000000000000
],dtype='double')

# Bessels are from numerical recipies in C?
@nb.jit(nb.double(nb.double),nopython=True)
def bessel_i0(x):
	ax = fabs(x)
	if ax < 3.75:
		y = x/3.75
		y *= y
		return 1.0+y*(3.5156229+y*(3.0899424+y*(1.2067492
			+y*(0.2659732+y*(0.360768e-1+y*0.45813e-2)))))
	else:
		y = 3.75/ax
		ans = (exp(ax)/sqrt(ax)) * (0.39894228+y*(0.1328592e-1+y*(0.225319e-2+y*(-0.157565e-2+y*(0.916281e-2+y*(-0.2057706e-1+y*(0.2635537e-1+y*(-0.1647633e-1+y*0.392377e-2))))))))
	return ans

@nb.jit(nb.double(nb.double),nopython=True)
def bessel_i1(x):
	ax = fabs(x)
Пример #42
0

@nb.jit(cache=True)
def drawDisIdx(idx, p, size=1):
    # hight=max(idx)
    # low=min(idx)
    # while True:
    #     X = np.random.random_integers(low,hight)
    #     Y = np.random.uniform(0.0, 1)
    #     if Y < p[X]:
    #         return X
    disc = stats.rv_discrete(name='disc', values=(idx, p))
    return disc.rvs(size=size)


@nb.jit(nb.double(nb.double, nb.int32), cache=True)
def _drawTau(k, size=1):
    # hight=k*1.5
    # low=0
    # while True:
    #     X = np.random.uniform(low,hight)
    #     Y = np.random.uniform(0.0, k)
    #     if Y < k*exp(-k*X):
    #         return X
    sc = 1 / k
    return stats.expon.rvs(scale=sc, size=size)


@nb.jit(nb.double(nb.int64, nb.double, nb.double), cache=True)
def _draw_P_B_Tr(pN, T, bg_rate):
    while True:
Пример #43
0
import numpy as np
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
from numba import jit, double, int64


@jit(double(double, double))
def cos2phi(qc, qt):
    e1 = 3.94
    e2 = 4.84
    k1t = -0.105
    k2t = 0.149
    lamda = 0.262
    Delta = 0.5 * (e2 + k2t * qt - e1 - k1t * qt)
    return Delta / np.sqrt(Delta**2. + lamda**2. * qc**2.)


@jit(double(double, double))
def sin2phi(qc, qt):
    e1 = 3.94
    e2 = 4.84
    k1t = -0.105
    k2t = 0.149
    lamda = 0.262
    Delta = 0.5 * (e2 + k2t * qt - e1 - k1t * qt)
    return lamda * qc / np.sqrt(Delta**2. + lamda**2. * qc**2.)


@jit()
def make_kmat(nc, nt, alpha1, alpha2, qc, qt, gam):
    print(nc, nt)

# Gaussian filter kernel
#
def getGaussian(xs, ys, zs):
    """Return a gaussian filter kernel of the specified size
	"""
    tmp = np.zeros((xs, ys, zs))
    tmp[xs // 2, ys // 2, zs // 2] = 1.0
    return ndi.gaussian_filter(tmp, (xs / 6, ys / 6, zs / 6), mode='constant')


# Convolution of 3D filter with 3D data - only calulates the output for the centre trace
# Numba JIT used to accelerate the calculations
#
@jit(double(double[:, :, :], double[:, :, :]))
def sconvolve(arr, filt):
    X, Y, Z = arr.shape
    Xf, Yf, Zf = filt.shape
    X2 = X // 2
    Y2 = Y // 2
    Xf2 = Xf // 2
    Yf2 = Yf // 2
    Zf2 = Zf // 2
    result = np.zeros(Z)
    for i in range(Zf2, Z - Zf2):
        num = 0.0
        for ii in range(Xf):
            for jj in range(Yf):
                for kk in range(Zf):
                    num += (filt[Xf - 1 - ii, Yf - 1 - jj, Zf - 1 - kk] *
size_y : int
    Size of cluster y
size_i : int
    Size of cluster i

Returns
-------
d_xyi : double
    Distance from the new cluster xy to cluster i
"""

import math

import numba as nb

sig = nb.double(nb.double, nb.double, nb.double, nb.int64, nb.int64, nb.int64)


@nb.njit(sig)
def _single(d_xi, d_yi, d_xy, size_x, size_y, size_i):
    return min(d_xi, d_yi)


@nb.njit(sig)
def _complete(d_xi, d_yi, d_xy, size_x, size_y, size_i):
    return max(d_xi, d_yi)


@nb.njit(sig)
def _average(d_xi, d_yi, d_xy, size_x, size_y, size_i):
    return (size_x * d_xi + size_y * d_yi) / (size_x + size_y)