示例#1
0
def builtin_max(A, B, C):
    i = cuda.grid(1)

    if i >= len(C):
        return

    C[i] = float64(max(A[i], B[i]))
示例#2
0
def test_compute_signature():
    s = symbol('s', 'int64')
    t = symbol('t', 'float32')
    d = symbol('d', 'datetime')

    assert compute_signature(s + t) == float64(int64, float32)
    assert (compute_signature(d.truncate(days=1)) ==
            datetime64('D')(datetime64('us')))
    assert compute_signature(d.day + 1) == int64(datetime64('us'))
 def test_4(self):
     sig = [
         int32(int32, int32),
         uint32(uint32, uint32),
         float32(float32, float32),
         float64(float64, float64),
     ]
     func = self.funcs['func3']
     A = np.arange(100, dtype=np.float64)
     self._run_and_compare(func, sig, A, A)
     A = A.astype(np.float32)
     self._run_and_compare(func, sig, A, A)
     A = A.astype(np.int32)
     self._run_and_compare(func, sig, A, A)
     A = A.astype(np.uint32)
     self._run_and_compare(func, sig, A, A)
示例#4
0
    def _test_template_4(self, target):
        sig = [int32(int32, int32),
               uint32(uint32, uint32),
               float32(float32, float32),
               float64(float64, float64)]
        basic_ufunc = vectorize(sig, target=target)(vector_add)
        np_ufunc = np.add

        def test(ty):
            data = np.linspace(0., 100., 500).astype(ty)
            result = basic_ufunc(data, data)
            gold = np_ufunc(data, data)
            np.testing.assert_allclose(gold, result)

        test(np.double)
        test(np.float32)
        test(np.int32)
        test(np.uint32)
示例#5
0
文件: random.py 项目: sklam/numba
def xoroshiro128p_normal_float64(states, index):
    '''Return a normally distributed float32 and advance ``states[index]``.

    The return value is drawn from a Gaussian of mean=0 and sigma=1 using the
    Box-Muller transform.  This advances the RNG sequence by two steps.

    :type states: 1D array, dtype=xoroshiro128p_dtype
    :param states: array of RNG states
    :type index: int64
    :param index: offset in states to update
    :rtype: float64
    '''
    index = int64(index)

    u1 = xoroshiro128p_uniform_float32(states, index)
    u2 = xoroshiro128p_uniform_float32(states, index)

    z0 = math.sqrt(-float64(2.0) * math.log(u1)) * math.cos(TWO_PI_FLOAT64 * u2)
    # discarding second normal value
    # z1 = math.sqrt(-float64(2.0) * math.log(u1)) * math.sin(TWO_PI_FLOAT64 * u2)
    return z0
示例#6
0
def template_vectorize(self, target):
    # build basic native code ufunc
    sig = [int32(int32, int32),
           uint32(uint32, uint32),
           float32(float32, float32),
           float64(float64, float64)]
    basic_ufunc = vectorize(sig, target=target)(vector_add)

    # build python ufunc
    np_ufunc = np.add

    # test it out
    def test(ty):
        data = np.linspace(0., 100., 500).astype(ty)
        result = basic_ufunc(data, data)
        gold = np_ufunc(data, data)
        self.assertTrue(np.allclose(gold, result))

    test(np.double)
    test(np.float32)
    test(np.int32)
    test(np.uint32)
示例#7
0
class _DataAnalyzer:
    def __init__(self):
        self.params = None  #params for the current solution of fit_Spectral_Profile. contains [v0,a,b,sigma,gamma]
        #where v0: center frequency, a: height, b: offset from zero, sigma: standard deviation of temperature
        #disitribution, gamma: FWHM of natural linewidth plus other lorentzian broadening effects
        self.vTMaxLens = None
        self.gammaMin = None  #provided
        self.T = None  #temperature, kelvin
        self.F0 = None  #center frequency of the profile. This is not necesarily the peak of the fit because of the
        #underlying hyperfine structure
        self.fitPeak = None  #to keep track of peak value of fit. This is necesary because more or less sampling can cause
        #the peak to be bigger or smaller slightly. FOr example, the profile may be fitted with poor resolution, then
        #when the user attempts to use those fit values to scale the data up, the scaled data can be taller
        self.freqTMaxLens = None  #What the maximum transverse frequency shift of the atomic beam is from the geometric
        #'heating' from the lens, MHz
        self.peakMode = None  #wether using single or multi peak fitting. Multi peak utilizes the 6 transitions
        self.laserJitter = None  #laser jitter, MHz
        self.imageFreqArr = None  #1D array of frequency values corresponding to each image
        self.imageSignalArr = None  #1D array of means or sums of image regions of interest
        self.gammaLockedToNatural = None
        self.residuals = None

    def fit_Spectral_Profile(self, imageFreqArr, imageSignalArr, peakMode,
                             vTMaxLens, gammaMin, laserJitter,
                             gammaLockedToNatural):
        #Fit spectral data. Can be (single peak or multipeak) and include the velocity distribution of lens.
        #imageFreqArr: frequency, or anything else, scale
        #imageSignalArr: Flourescence signal
        #peakMode: Use either multipeak (6 peaks) or single peak.
        #vTMaxLens: transverse velocity maximum at lens output. This is used to calculate the geoemtric 'heating'.
        #If None don't apply anything'
        #gammeMin: value of gamma. Set to None to enable free fitting of gamma. Set to a specific value to lock to that
        #value, or allow to go above depending on gammaFloor
        #laserJitter: Jitter of laser, standard deviation, MHz.
        #gammaLockedToNatural: set gamma to natural linewidth

        if vTMaxLens is None:
            self.freqTMaxLens = None
        else:
            self.freqTMaxLens = (
                vTMaxLens / gv.cLight
            ) * gv.Li_D2_Freq / 1e6  #transverse frequency maximum lens, MhZ
        gammaMax = 30.0
        assert gammaMin < gammaMax
        if gammaLockedToNatural == True:
            assert gammaMin == gv.LiTau / 1e6
        self.gammaLockedToNatural = gammaLockedToNatural
        self.peakMode = peakMode
        self.laserJitter = laserJitter
        self.vTMaxLens = vTMaxLens
        self.gammaMin = gammaMin
        self.imageFreqArr = imageFreqArr
        self.imageSignalArr = imageSignalArr
        self.catch_Errors_And_Give_Warnings()

        def minimize(params):
            #wrapper function to be minimized. One cannot do least squares for the curve fit because if the lensHeating
            #feature is enable, there will be a convolution, which has to be evaluated all at once for every data point.
            #using the scipy curve_fit this would be far to slow. It's better to compare entire fits at once, rather
            #than compare a fit point by point which requires recomputing the convolution over an dover again.
            v0, a, b, sigma, gamma = params
            fit = self._spectral_Profile(self.imageFreqArr, v0, a, b, sigma,
                                         gamma)
            return self._cost(imageSignalArr, fit)

        deltaFMax = 100
        F0Guess = self.imageFreqArr[np.argmax(self.imageSignalArr)]
        aGuess = np.max(self.imageSignalArr) - np.min(self.imageSignalArr)
        bGuess = np.min(self.imageSignalArr)
        if self.gammaLockedToNatural == True:
            gammaMax = gammaMin + 1e-6
        bounds = [(F0Guess - deltaFMax, F0Guess + deltaFMax),
                  (0.0, aGuess * 2), (bGuess - aGuess, bGuess + aGuess),
                  (0, 30), (gammaMin, gammaMax)]
        np.random.seed(42)  #seed the generator to make results repeatable
        sol = spo.differential_evolution(
            minimize, bounds,
            polish=False)  #a type of genetic algorithm, very robust.
        np.random.seed(int(time.time()))  #resead to make pseudorandom
        self.params = sol.x
        self.T = self._calculate_Temp(self.params[3])
        self.F0 = self.params[0]
        self.residuals = self.spectral_Fit(imageFreqArr) - imageSignalArr
        # print(bounds)
        # print(self.params)
        # xTest=np.linspace(imageFreqArr[0],imageFreqArr[-1],num=1000)
        # plt.grid()
        # plt.plot(imageFreqArr,imageSignalArr,marker='x')
        # plt.plot(xTest,self.spectral_Fit(xTest))
        # plt.show()
    def catch_Errors_And_Give_Warnings(self):
        if self.peakMode != 'multi' and self.peakMode != 'single':
            raise Exception(
                'Invalid peak mode. Choose \'multi\' or \'single\'')
        if not 100.0 > self.gammaMin >= 0.0:
            raise Exception(
                'Gamma must be in MHz and positive and it appears not to be')
        if self.gammaMin < gv.LiTau / 1e6:
            warnings.warn(
                "Gamma is set to a value below the natural linewidth")
        if not 100.0 > self.laserJitter >= 0.0:
            raise Exception(
                'Laser jitter must be in MHz and positive and it appears not to be'
            )
        maxReasonableVTransLen = 20.0
        if self.vTMaxLens is not None and (
                not maxReasonableVTransLen > self.vTMaxLens >= 0.0):
            raise Exception(
                'Transverse particle velocity leaving lens is set to a value that does not make sense. \n'
                'must be m/s, positive and less than a reasonably sized value')
        maxExpectedFreqInMHz = 5000
        if np.max(np.abs(self.imageFreqArr)) > 1e6 * maxExpectedFreqInMHz:
            raise Exception(
                'It appears that the provided frequency data is not in MHz')
        if isinstance(self.imageFreqArr, np.ndarray) == False or isinstance(
                self.imageSignalArr, np.ndarray) == False:
            raise Exception(
                'Frequency data and signal data must be numpy arrays')

    def spectral_Fit(self, v):
        #the function that returns the fit to the data provided in self.fit_Spectral_Profile.
        #v: frequency, MHz
        #return: The "signal", in arbitrary units
        return self._spectral_Profile(v, *self.params)

    def _spectral_Profile(self, v, v0, a, b, sigma, gamma):
        #todo: this can be made more clean and logical with lens stuff
        #private method that returns the spectral profile. It may be convoluted with the geometric "heating" and or
        #contain multiple peaks

        profile = np.zeros(v.shape) + b
        sigma = np.sqrt(
            sigma**2 + self.laserJitter**2
        )  #gaussian standard deviation. laserJitter is assumed to be gaussian here

        if self.peakMode == 'multi':
            profile += self._multi_Voigt(v, v0, a, sigma, gamma=gamma)
        else:
            profile += self._voigt(v, v0, a, sigma, gamma)
        if self.vTMaxLens is not None:
            # plt.plot(v,profile)
            profileMax = profile.max(
            )  #to aid in normalizing and rescaling the profile after convolution
            profileMin = profile.min(
            )  #to aid in normalizing and rescaling the profile after convolution
            profile = (profile - profileMin) / (profileMax - profileMin)
            vLens = np.linspace(-(v.max() - v.min()) / 2,
                                (v.max() - v.min()) / 2,
                                num=v.shape[0])  #need to have the lens profile
            #centered for the convolution to preserve the center of the spectral profile that results
            lensVelProfile = [
                self.lens_Velocity_Spread(v, self.vTMaxLens) for v in vLens
            ]  #not very efficient
            # plt.plot(v,lensVelProfile)
            # plt.plot(v,profile)

            profile = np.convolve(
                lensVelProfile, profile, mode='same'
            )  #convolution has distributive property so don't need

            profile = (profile - profile.min()) / (profile.max() -
                                                   profile.min())

            # plt.plot(v,profile)
            # plt.show()
            #to perform on each peak individually
            profile = profile * (profileMax -
                                 profileMin) + profileMin  #rescale the peak
        return profile

    @staticmethod
    @numba.njit(numba.float64(numba.float64[:], numba.float64[:])
                )  #special compiler to make code run faster
    def _cost(imageSignalArr, fit):
        #cost function for the differential evolution analysis
        return np.sqrt(np.sum((imageSignalArr - fit)**2))

    @staticmethod
    @numba.njit(
        numba.float64(numba.float64, numba.float64, numba.float64,
                      numba.float64)
    )  #special compiler to make code run faster
    def _gauss(T, v, m=gv.massLi7, v0=0.0):
        t1 = np.sqrt(m / (2 * np.pi * gv.kB * T))
        t2 = np.exp(-m * np.power(v - v0, 2) / (2 * gv.kB * T))
        return t1 * t2

    @staticmethod
    def lens_Velocity_Spread(x, x0, a=1):
        #1D transvers velocity distribution for the output of the lens. This is because the lens a circular input
        #and so creates a unique semicircular distirbution. Can be derived considering the density and y velocity as a
        # function of y in the circular aperture easily.
        #x: either velocity or frequency value. Must have same units as x0
        #x0: maximum transverse velocity or frequency. Same units as x
        if np.abs(x) > x0:  #to prevent imaginary
            return 0
        else:
            return a * np.sqrt(1 - (x / x0)**2)

        #---------SINGLE VOIGT FIT---------
        #this voigt is normalized to a height of 1, then multiplied by the variable a
        #there is no yaxis offset in the voigt, that is done later
    def _voigt(self, f, f0, a, sigma, gammaFWHM):
        #units must be consistent!!
        #f, frequency value
        #f0, center frequency
        #a, height of voigt
        #sigma,standard deviation of the gaussian
        #gamma, FWHM of the lorentzian
        gammaHWHM = gammaFWHM / 2  #convert lorentzian FWHM to HWHM
        x = f - f0
        z = (x + gammaHWHM * 1.0J) / (sigma * np.sqrt(2.0))  #complex argument
        V = np.real(spf.wofz(z)) / (sigma * np.sqrt(2 * np.pi))  #voigt profile

        #now normalize to 1 at peak, makes fitting easier
        z0 = (gammaHWHM * 1.0J) / (sigma * np.sqrt(2.0))  #z at peak
        V0 = np.real(spf.wofz(z0)) / (sigma * np.sqrt(2 * np.pi)
                                      )  #height of voigt at peak
        return a * V / V0  #makes the height equal to a

        #------SEXTUPLE VOIGT FIT----------

    def _multi_Voigt(self, freq, freq0, a, sigma, gamma=gv.LiTau / 1E6):
        #units must be consitant!
        #freq: frequency
        #a: height constant
        #b: center frequency
        #c: vertical offset
        #sigma: standard deviation of gaussian profile
        #gamma: FWHM of lorentzian
        aRatio = 4 * gv.F2F1Ratio  #ratio of intensity of f=2 to f=1. First number is power ratio in sideband. Second
        # fraction is ratio of hyperfine transitions (see globalVariable.py for more info in the comments for
        #F2F1Ratio).
        a2 = (
            aRatio / (aRatio + 1)
        ) * a  #I do some funny business here to try and get the parameter "a" to more closely match
        #the total height. a2/a1 still equals the parameter "aRatio", but now they also add up the parameter "a"
        a1 = a * 1 / (aRatio + 1)  #same funny business
        val = 0.0

        #F=2 transition
        val += a2 * self._voigt(freq, freq0 + gv.F1Sep / 1E6, gv.S21, sigma,
                                gamma)
        val += a2 * self._voigt(freq, freq0 + gv.F2Sep / 1E6, gv.S22, sigma,
                                gamma)
        val += a2 * self._voigt(freq, freq0 + gv.F3Sep / 1E6, gv.S23, sigma,
                                gamma)
        #F=1 transition
        val += a1 * self._voigt(freq, freq0 + gv.F0Sep / 1E6, gv.S10, sigma,
                                gamma)
        val += a1 * self._voigt(freq, freq0 + gv.F1Sep / 1E6, gv.S11, sigma,
                                gamma)
        val += a1 * self._voigt(freq, freq0 + gv.F2Sep / 1E6, gv.S12, sigma,
                                gamma)
        return val

    def _calculate_Temp(self, sigma, f0=gv.Li_D2_Freq, MHz=True):
        if MHz == True:
            f0 = f0 / 1e6

        sigmaVelocity = (sigma / f0) * gv.cLight
        toMkelvin = 1e3
        return toMkelvin * sigmaVelocity**2 * gv.massLi7 / gv.kB

    def calculate_Temp(self, sigma, MHz=True):
        return self._calculate_Temp(sigma, MHz=MHz)

    def self_Test(self):

        #_spectral_Profile(self,v,v0,a,b,sigma,gamma,vTMaxLens,peakMode,laserJitter)
        numTestDataPoints = 100
        testDataFreq = np.linspace(-100, 100, num=numTestDataPoints)
        freq0 = 10
        sigma = 5.0
        gamma = 10.0
        b = 1000.0
        a = 500.0
        # testDataSignal=500.0*np.exp(-testDataFreq**2/100)+1000.0
        testDataSignal = spf.voigt_profile(testDataFreq - freq0, sigma,
                                           gamma) / spf.voigt_profile(
                                               0.0, sigma, gamma)
        testDataSignal = a * testDataSignal + b
        np.random.seed(42)  #set the seed to repeatable noise

        # testDataSignal+=30.0*np.random.random(numTestDataPoints) #add gaussion noise
        np.random.seed(int(time.time()))  #reseed the random generator

        tester1 = fit_Spectral_Data(testDataFreq, testDataSignal, 'multi',
                                    False, 10.0, gv.LiTau / 1e6,
                                    0.0)._dataAnalyzerObject
        tester2 = fit_Spectral_Data(testDataFreq, testDataSignal, 'single',
                                    False, 10.0, gv.LiTau / 1e6,
                                    0.0)._dataAnalyzerObject
        tester3 = fit_Spectral_Data(testDataFreq, testDataSignal, 'multi',
                                    True, 10.0, gv.LiTau / 1e6,
                                    0.0)._dataAnalyzerObject
        tester4 = fit_Spectral_Data(testDataFreq, testDataSignal, 'single',
                                    True, 10.0, gv.LiTau / 1e6,
                                    0.0)._dataAnalyzerObject
        tester5 = fit_Spectral_Data(testDataFreq, testDataSignal, 'single',
                                    True, 10.0, gv.LiTau / 1e6,
                                    1.0)._dataAnalyzerObject
        testerList = [tester1, tester2, tester3, tester4, tester5]
        # now test that saved results match with current results
        testResults = np.loadtxt('DataAnalyzer_TestData')
        for i in range(len(testerList)):
            assert np.all(testResults[i] == np.asarray(
                [*testerList[i].params, testerList[i].T]))
            print('test ' + str(i) + ' passed')
        print('self test passed')

    def save_Test_Data(self, testerList):
        gv.warning_Sound()
        # input("you are trying to save new test data, and overwrite the old. Press enter to proceed")
        data = []
        for tester in testerList:
            data.append([*tester.params, tester.T])
        np.savetxt('DataAnalyzer_TestData', np.asarray(data))
示例#8
0
from __future__ import print_function, absolute_import
import numpy as np
from numba import vectorize
from numba import cuda, int32, float32, float64
from timeit import default_timer as time
from numba import unittest_support as unittest
from numba.cuda.testing import skip_on_cudasim
from numba.cuda.testing import CUDATestCase
from numba import config

sig = [int32(int32, int32), float32(float32, float32), float64(float64, float64)]


target = "cuda"
if config.ENABLE_CUDASIM:
    target = "cpu"


test_dtypes = np.float32, np.int32


@skip_on_cudasim("ufunc API unsupported in the simulator")
class TestCUDAVectorize(CUDATestCase):
    def test_scalar(self):
        @vectorize(sig, target=target)
        def vector_add(a, b):
            return a + b

        a = 1.2
        b = 2.3
        c = vector_add(a, b)
示例#9
0
from __future__ import print_function, absolute_import

import numpy as np

from numba import vectorize
from numba import cuda, int32, float32, float64
from numba import unittest_support as unittest
from numba.cuda.testing import skip_on_cudasim
from numba.cuda.testing import CUDATestCase
from numba import config

sig = [int32(int32, int32),
       float32(float32, float32),
       float64(float64, float64)]


target='cuda'
if config.ENABLE_CUDASIM:
    target='cpu'


test_dtypes = np.float32, np.int32


@skip_on_cudasim('ufunc API unsupported in the simulator')
class TestCUDAVectorize(CUDATestCase):
    N = 1000001

    def test_scalar(self):

        @vectorize(sig, target=target)
示例#10
0
import lj_functions_c as ljc


def V_LJ(mag_r, sp):
    """Lennard-Jones potential, mag_r is a number.
    * sp: system params
    >>> from numpy.linalg import norm
    >>> V_LJ(norm([0.5, 0.5, 0.5]))
    12.99
    """
    V_rc = 4 * sp.eps * ((sp.sigma / sp.rc) ** 12 - (sp.sigma / sp.rc) ** 6)
    return 4 * sp.eps * ((sp.sigma / mag_r) ** 12 - (sp.sigma / mag_r) ** 6) - \
        V_rc if mag_r < sp.rc else 0.0


@jit(float64(float64, float64, float64, float64), nopython=True)
def V_LJ_numba(mag_r, eps, sigma, rc):
    V_rc = 4 * eps * ((sigma / rc) ** 12 - (sigma / rc) ** 6)
    return 4 * eps * ((sigma / mag_r) ** 12 - (sigma / mag_r) ** 6) - \
        V_rc if mag_r < rc else 0.0


def force(r, sp):
    """r is a vector"""
    mag_dr = norm(r)
    return 4 * sp.eps * (-12 * (sp.sigma / mag_dr) ** 12 + 6 * (sp.sigma / mag_dr) ** 6) * r / mag_dr**2 \
        if mag_dr < sp.rc else np.zeros(3)


@jit(float64(float64[:]), nopython=True)
def norm_numba(r):
示例#11
0
def extract_around_peak(
    waveforms, peak_index, width, shift, sampling_rate_ghz, sum_, peak_time
):
    """
    This function performs the following operations:

    - Sum the samples from the waveform using the window defined by a
    peak position, window width, and window shift.
    - Obtain the pulse time within a window defined by a peak finding
    algorithm, using the weighted average of the samples.

    This function is a numpy universal function which defines the operation
    applied on the waveform for every channel and pixel. Therefore in the
    code body of this function:
        - waveforms is a 1D array of size n_samples.
        - peak_index, width and shift are integers, corresponding to the
            correct value for the current pixel

    The ret argument is required by numpy to create the numpy array which is
    returned. It can be ignored when calling this function.

    Parameters
    ----------
    waveforms : ndarray
        Waveforms stored in a numpy array.
        Shape: (n_pix, n_samples)
    peak_index : ndarray or int
        Peak index for each pixel.
    width : ndarray or int
        Window size of integration window for each pixel.
    shift : ndarray or int
        Window size of integration window for each pixel.
    sampling_rate_ghz : float
        Sampling rate of the camera, in units of GHz
        Astropy units should have to_value('GHz') applied before being passed
    sum_ : ndarray
        Return argument for ufunc (ignore)
        Returns the sum of the waveform samples
    peak_time : ndarray
        Return argument for ufunc (ignore)
        Returns the peak_time in units "ns"

    Returns
    -------
    charge : ndarray
        Extracted charge.
        Shape: (n_pix)

    """
    n_samples = waveforms.size
    start = peak_index - shift
    end = start + width

    # reduce to valid range
    start = max(0, start)
    end = min(end, n_samples)

    i_sum = float64(0.0)
    time_num = float64(0.0)
    time_den = float64(0.0)

    for isample in prange(start, end):
        i_sum += waveforms[isample]
        if waveforms[isample] > 0:
            time_num += waveforms[isample] * isample
            time_den += waveforms[isample]

    peak_time[0] = time_num / time_den if time_den > 0 else peak_index
    # Convert to units of ns
    peak_time[0] /= sampling_rate_ghz
    sum_[0] = i_sum
示例#12
0
"""
    N-body simulation.
    
    1. Add @jit decorators and function signatures to all funcitons.
    2. Add afunction vec_deltas() that takes two NumPy arrays of floats and returns the difference between each element.
    
    Time: Average(74.060, 55.593, 68.151) = 65.93 sec
    Improvement: 310.4767/65.93 = 4.709187x
"""
import time
from itertools import combinations
import numpy as np
from numba import jit, void, int32, float64, vectorize


@vectorize([float64(float64, float64)])  ## update: decorator and signature
def vec_deltas(x, y):  ## update: add new function to replace compute_delta
    '''
    compute difference between each element in arrays. 
    '''
    return x - y


@jit("void(float64[:,:,:], char[:,:], float64[:,:,:], float64)"
     )  ## update: decorator and signature
def report_energy(BODIES, body1_2, val, e=0.0):
    '''
        compute the energy and return it so that it can be printed
    '''
    for (body1, body2) in body1_2:
        (x1y1z1, v1, m1) = BODIES[body1]  ## update first element
示例#13
0
from __future__ import absolute_import, print_function, division
import numpy as np
from numba import vectorize
from numba import cuda, float64
from numba import unittest_support as unittest
from numba.cuda.testing import skip_on_cudasim, SerialMixin
from numba import config

sig = [float64(float64, float64)]


target='cuda'
if config.ENABLE_CUDASIM:
    target='cpu'


@skip_on_cudasim('ufunc API unsupported in the simulator')
class TestCUDAVectorizeScalarArg(SerialMixin, unittest.TestCase):

    def test_vectorize_scalar_arg(self):
        @vectorize(sig, target=target)
        def vector_add(a, b):
            return a + b

        A = np.arange(10, dtype=np.float64)
        dA = cuda.to_device(A)
        v = vector_add(1.0, dA)

        np.testing.assert_array_almost_equal(
            v.copy_to_host(),
            np.arange(1, 11, dtype=np.float64))
示例#14
0
文件: random.py 项目: sklam/numba
def uint64_to_unit_float64(x):
    '''Convert uint64 to float64 value in the range [0.0, 1.0)'''
    x = uint64(x)
    return (x >> uint32(11)) * (float64(1) / (uint64(1) << uint32(53)))
示例#15
0
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# -*- mode: python -*-
# Time-stamp: <Sun, 23/07-2017 21:09:22 CST>
# Copyright (c) H.-K.Sun <spin.hk AT outlook DOT com>
"""A Sample From Vegas Tutorial."""

from numba import jit, float64
import numpy
import vegas


# Integrand: ridge of N Gaussian spread evenly along the diagonal
@jit(float64(float64), nopython=True, nogil=True)
def ridge(x):
    N = 2000
    x0 = numpy.arange(0.0, N) / (N - 1.)
    dx2 = 0.0
    for xd in x:
        dx2 += (xd - x0) ** 2
    return numpy.average(numpy.exp(-100. * dx2)) * (100. / numpy.pi) ** (len(x) / 2.)


def main():
    integ = vegas.Integrator(4 * [[0, 1]])
    # adapt
    integ(ridge, nitn=10, neval=1e4)
    # final result
    result = integ(ridge, nitn=10, neval=1e4)
    print('result = {0},  Q = {1:.2f}'.format(result, result.Q))
示例#16
0
 def _test_template_2(self, target):
     numba_sinc = vectorize([float64(float64), float32(float32)],
                            target=target)(sinc)
     numpy_sinc = np.vectorize(sinc)
     self._run_and_compare(numba_sinc, numpy_sinc)
示例#17
0
def interpolate(x, times, dfs, method):

    if type(x) is float or type(x) is np.float64:
        u = uinterpolate(x, times, dfs, method)
        return u
    elif type(x) is np.ndarray:
        v = vinterpolate(x, times, dfs, method)
        return v
    else:
        raise ValueError("Unknown input type", type(x))


###############################################################################


@njit(float64(float64, float64[:], float64[:], int64),
      fastmath=True,
      cache=True,
      nogil=True)
def uinterpolate(t, times, dfs, method):
    ''' Return the interpolated value of y given x and a vector of x and y.
    The values of x must be monotonic and increasing. The different schemes for
    interpolation are linear in y (as a function of x), linear in log(y) and
    piecewise flat in the continuously compounded forward y rate. '''

    small = 1e-10
    numPoints = times.size

    if t == times[0]:
        return dfs[0]
示例#18
0
import time
from scipy.stats import linregress
from EvoDose import parameters

"""
In this file the PSF (power spectrum function, the energy distribution of the beam) and the genetic (optimization)
algorithm are being defined. Uses Numba for performance optimization - Numba decorators are used in this file.
jit = just in time compilation. More: http://numba.pydata.org/numba-doc/0.35.0/user/jit.html
"""

# ---------- PSF -------------

normalization = 1


@njit(float64(float64, float64))
def _gauss(r, sigma):
    return (1 / (sigma ** 2)) * math.exp(-r ** 2 / sigma ** 2)


@njit(float64(float64))
def tripple_gaussian_improved(r):
    """
    According to eq. 5 from http://iopscience.iop.org/article/10.1143/JJAP.35.1929/pdf
    :param r: Radius from centre
    :return: Deposited power
    """
    return (1 / normalization) * (1 / (math.pi * (1 + parameters.eta_1 + parameters.eta_2))) * (
            _gauss(r, parameters.alpha) + parameters.eta_1 * _gauss(r, parameters.beta) + parameters.eta_2 / (
            24 * parameters.gamma ** 2) * math.exp(-math.sqrt(r / parameters.gamma)))
示例#19
0
def time_precision(mu, x, pi):
    # fit time 'x' to predicted time 'mu' with precision 'pi'
    # TODO: the scaling of sigma needs to be adapted to SoA empirical evidence
    diff = np_sqrt(np_dot(x - mu, x - mu))
    sig = 2 * (1 - pi) if pi < 1. else 0.1
    return np_e**(-(diff**2) / (sig**2)) + 0.001


# def gaussian(x, mu, sig):
#     """ Not normally distributed!
#     """
#     diff = x - mu
#     return np_exp((-np_sqrt(np_dot(diff, diff)) ** 2) / (2 * sig ** 2))


@vectorize([float64(float64, float64, float64)])
@jit(float64(float64, float64, float64), nogil=True, cache=True, nopython=True)
def gaussian(x, mu, sig):
    """ Not normally distributed!
    """
    diff = np.array([x - mu])
    return np_exp((-np_sqrt(np_dot(diff, diff))**2.) / (2. * sig**2.))


def distribution(mu, sig, size):
    return [[gaussian(i, mu, sig), i] for i in range(size)]


def extend_sequence(sequence, item):
    """ Simply add a new item to the given sequence.
    Simple version of extend_distribution_sequence, since not using distributions here makes
        
            # compute the probability
            probability = gamma_cdf(gamma_values[calendarMonth, 1],
                                    gamma_values[calendarMonth, 2],
                                    gamma_values[calendarMonth, 0],
                                    values[month_index])

            # convert the probability to a fitted value
            values[month_index] = inv_normal(probability)
        
    # return the fitted values clipped to the specified upper and lower limits
    return np.clip(values, lower_limit, upper_limit)
        
#-----------------------------------------------------------------------------------------------------------------------
#@profile
@jit(float64(float64, float64, float64, float64))
def gamma_cdf(beta,
              gamma,
              pzero,
              x):
    '''
    Compute the probability of alpha being less than or equal to a value using incomplete gamma parameters.
    
    :param beta:
    :param gamma:
    :param pzero:
    :param x:    
    :return: probability of alpha <= x
    '''

    if x <= 0.0:
示例#21
0
def uint64_to_unit_float64(x):
    '''Convert uint64 to float64 value in the range [0.0, 1.0)'''
    x = uint64(x)
    return (x >> uint32(11)) * (float64(1) / (uint64(1) << uint32(53)))
示例#22
0
from numba import jit, float64
import numpy as np

@jit(float64[:](float64[:], float64[:]), nopython=True, nogil=True, cache=True)
def _cross_vec3(a, b):
    c = np.zeros(3)
    c[0] = a[1]*b[2] - a[2]*b[1]
    c[1] = a[2]*b[0] - a[0]*b[2]
    c[2] = a[0]*b[1] - a[1]*b[0]
    return c
@jit(float64(float64[:]), nopython=True, nogil=True, cache=True)
def _norm(a):
    n_2 = np.dot(a, a)
    return np.sqrt(n_2)

@jit(float64[:,:](float64[:], float64), nopython=True, nogil=True, cache=True)
def _rotation_matrix(axis, angle):
    """
    This method produces a rotation matrix given an axis and an angle.
    """
    axis = axis/_norm(axis)
    axis_squared = axis**2
    cos_angle = np.cos(angle)
    sin_angle = np.sin(angle)

    rotation_matrix = np.zeros((3,3))
    rotation_matrix[0, :] = np.array([cos_angle+axis_squared[0]*(1-cos_angle),
                                   axis[0]*axis[1]*(1-cos_angle) - axis[2]*sin_angle,
                                   axis[0]*axis[2]*(1-cos_angle)+axis[1]*sin_angle])

    rotation_matrix[1, :] = np.array([axis[1]*axis[0]*(1-cos_angle)+axis[2]*sin_angle,
示例#23
0
from __future__ import print_function, absolute_import, division
import numpy as np
import time
from numba import cuda, config, float64, void
from numba.cuda.testing import unittest

# NOTE: CUDA kernel does not return any value

if config.ENABLE_CUDASIM:
    tpb = 4
else:
    tpb = 16
SM_SIZE = tpb, tpb


@cuda.jit(float64(float64, float64), device=True, inline=True)
def get_max(a, b):
    if a > b:
        return a
    else:
        return b


@cuda.jit(void(float64[:, :], float64[:, :], float64[:, :]))
def jocabi_relax_core(A, Anew, error):
    err_sm = cuda.shared.array(SM_SIZE, dtype=float64)

    ty = cuda.threadIdx.x
    tx = cuda.threadIdx.y
    bx = cuda.blockIdx.x
    by = cuda.blockIdx.y
示例#24
0
##############################################################################
# Copyright (C) 2018, 2019, 2020 Dominic O'Kane
###############################################################################

import numpy as np
from numba import njit, float64

from ..finutils.FinGlobalTypes import FinOptionTypes
from ..finutils.FinMath import N
from ..finutils.FinError import FinError

###############################################################################


@njit(float64(float64, float64, float64, float64, float64, float64, float64),
      fastmath=True, cache=True)
def blackVolFromSABR(alpha, beta, rho, nu, f, k, t):

    if abs(rho) >= 0.999999999:
        raise FinError("Rho is a correlation and must be less than 1.0")

    b = 1.0 - beta
    fk = f * k
    m = f / k

    if abs(m - 1.0) > 1e-6:
        sigma = 1.0
        numTerm1 = ((alpha * b)**2.0) / (fk**b) / 24.0
        numTerm2 = rho * beta * nu * alpha / (fk**(b / 2.0)) / 4.0
        numTerm3 = nu * nu * ((2.0 - 3.0 * (rho**2.0)) / 24.0)
        num = alpha * (1.0 + (numTerm1 + numTerm2 + numTerm3) * t)
示例#25
0
    xyz = np.vstack((xyz1, xyz2))
    return types, xyz


def gen_disordered_box(N1, N2):
    types = np.array([1] * N1 + [2] * N2)
    xyz = np.random.rand(N1+N2, 3) * L
    return types, xyz


def guess_box_size(xyz):
    """Infer box size from xyz matrix"""
    return np.round(np.max(xyz[:, 1] - np.min(xyz[:, 1]), 0))


@jit(float64(float64[:]), nopython=True)
def norm_numba(r):
    rn = 0.0
    for ri in r:
        rn += ri*ri
    return sqrt(rn)


#@jit(float64(int64[:], float64[:, :], float64, float64))
def contacts_naive(types, xyz, L, rc):
    N = len(xyz)
    box = L * np.eye(3)
    inv_box = np.linalg.pinv(box)
    dr, drn = np.zeros(3), np.zeros(3)
    G, Gn = np.zeros(3), np.zeros(3)
    N11, N22, N12 = 0, 0, 0
示例#26
0
    epsilonBelow = (1.0 - alpha) * diffAbove
    epsilonAbove = (1.0 - alpha) - epsilonBelow

    for iLossUnit in range(0, numLosses):
        indepDbn[iLossUnit] *= alpha

    indepDbn[int(meanBelow)] += epsilonBelow
    indepDbn[int(meanAbove)] += epsilonAbove

    return indepDbn


###############################################################################


@njit(float64(float64[:]), fastmath=True, cache=True)
def portfolioGCD(actualLosses):

    numCredits = len(actualLosses)
    scaling = 1000000

    temp = (int)(actualLosses[0] * scaling)

    for iCredit in range(1, numCredits):
        num2 = int(actualLosses[iCredit] * scaling)
        temp = pairGCD(temp, num2)

    portfolioGCD = float(temp / scaling)
    return portfolioGCD

示例#27
0
    def test_laplace_small(self):

        @cuda.jit(float64(float64, float64), device=True, inline=True)
        def get_max(a, b):
            if a > b:
                return a
            else:
                return b

        @cuda.jit(void(float64[:, :], float64[:, :], float64[:, :]))
        def jocabi_relax_core(A, Anew, error):
            err_sm = cuda.shared.array(SM_SIZE, dtype=float64)

            ty = cuda.threadIdx.x
            tx = cuda.threadIdx.y
            bx = cuda.blockIdx.x
            by = cuda.blockIdx.y

            n = A.shape[0]
            m = A.shape[1]

            i, j = cuda.grid(2)

            err_sm[ty, tx] = 0
            if j >= 1 and j < n - 1 and i >= 1 and i < m - 1:
                Anew[j, i] = 0.25 * ( A[j, i + 1] + A[j, i - 1] \
                                      + A[j - 1, i] + A[j + 1, i])
                err_sm[ty, tx] = Anew[j, i] - A[j, i]

            cuda.syncthreads()

            # max-reduce err_sm vertically
            t = tpb // 2
            while t > 0:
                if ty < t:
                    err_sm[ty, tx] = get_max(err_sm[ty, tx], err_sm[ty + t, tx])
                t //= 2
                cuda.syncthreads()

            # max-reduce err_sm horizontally
            t = tpb // 2
            while t > 0:
                if tx < t and ty == 0:
                    err_sm[ty, tx] = get_max(err_sm[ty, tx], err_sm[ty, tx + t])
                t //= 2
                cuda.syncthreads()

            if tx == 0 and ty == 0:
                error[by, bx] = err_sm[0, 0]



        if config.ENABLE_CUDASIM:
            NN, NM = 4, 4
            iter_max = 20
        else:
            NN, NM = 256, 256
            iter_max = 1000

        A = np.zeros((NN, NM), dtype=np.float64)
        Anew = np.zeros((NN, NM), dtype=np.float64)

        n = NN
        m = NM

        tol = 1.0e-6
        error = 1.0

        for j in range(n):
            A[j, 0] = 1.0
            Anew[j, 0] = 1.0

        timer = time.time()
        iter = 0

        blockdim = (tpb, tpb)
        griddim = (NN // blockdim[0], NM // blockdim[1])

        error_grid = np.zeros(griddim)

        stream = cuda.stream()

        dA = cuda.to_device(A, stream)          # to device and don't come back
        dAnew = cuda.to_device(Anew, stream)    # to device and don't come back
        derror_grid = cuda.to_device(error_grid, stream)

        while error > tol and iter < iter_max:
            self.assertTrue(error_grid.dtype == np.float64)

            jocabi_relax_core[griddim, blockdim, stream](dA, dAnew, derror_grid)

            derror_grid.copy_to_host(error_grid, stream=stream)


            # error_grid is available on host
            stream.synchronize()

            error = np.abs(error_grid).max()

            # swap dA and dAnew
            tmp = dA
            dA = dAnew
            dAnew = tmp

            iter += 1

        runtime = time.time() - timer
    else:
        tg, gammay, sdist = shapedist.elastic_linear_multilevel.find_gamma(t, p, q, mask, energy_dot, uniform, dim, neigh, strip_height)
    if distfunc is not None:
        sdist = distfunc(p, q, tg, gammay)

    if 'd' in dr.lower():
        if 't' in dr.lower():
            p = p.T
            q = q.T
        return sdist, p[mask[-1]], q[mask[-1]], tg, gammay
    else:
        return sdist



@jit(float64(float64[:], float64[:], float64[:]), cache=True, nopython=True)
def find_error(tg, gammar, gammat):
    """
    Calculates the difference between two gamma curves.
    Parameters
    ----------
    tg
    gammar
    gammat

    Returns
    -------

    """
    n = tg.size
    error = 1 / 2 * (tg[1] - tg[0]) * (gammar[1] - gammat[1]) ** 2 + 1 / 2 * (tg[n - 1] - tg[n - 2]) * (
示例#29
0
        func for func in llvm_module.functions
        if not func.name.startswith('_') and not func.is_declaration
    ]
    addr = engine.get_function_address(functions[1].name)
    assert addr > 0, 'addr == %d' % addr

    # Declare the ctypes function prototype
    # functype = cfunctype(c_double, c_double)

    path = os.path.expanduser(
        os.path.join('~', 'ibis-data', 'ibis-testing-data', 'ibis-testing.db')
    )
    con = sqlite3_connection(path.encode('utf8'))
    result = register(
        con,
        addr,
        func.__name__.encode('utf8'),
        len(func.nopython_signatures[0].args)
    )
    import ipdb; ipdb.set_trace()
    con.execute("select mysin(1.0230923)".encode('utf8'))


@jit(float64(float64))
def mysin(x):
    return math_sin(x)


sin = udf(mysin)

示例#30
0
    def getMolWt(self) -> float:
        m = 0
        for i in range(self.n):
            if self.substances[i].MolWt != 0:
                m += self.y[i] * self.substances[i].MolWt
            else:
                return 0
        return m


# ============ helper functions (for performance) =================


@njit(
    float64(float64, float64, float64, float64, float64, float64, float64), cache=True
)
def _getH_helper(Tref: float, T: float, a0, a1, a2, a3, a4) -> float:
    return R_IG * (
        a0 * (T - Tref)
        + (T ** 2 - Tref ** 2) * a1 / 2
        + (T ** 3 - Tref ** 3) * a2 / 3
        + (T ** 4 - Tref ** 4) * a3 / 4
        + (T ** 5 - Tref ** 5) * a4 / 5
    )


@njit(float64(float64, float64, float64, float64), cache=True)
def _getPvpAntoine_helper(T, Ant_A, Ant_B, Ant_C):
    ans = 10 ** (Ant_A - Ant_B / (T + Ant_C - 273.15))
    P = ans * 1e5
示例#31
0
        for k in range(1, 4):
            _sum += 2 * d[k] * math.cos(arg * k)

        lpw[i] *= _sum
        sumg += 2 * lpw[i]

    # normalization
    lpw = np.divide(lpw, sumg)

    # mirror related to 0
    answer = lpw[::-1].tolist()
    answer.extend(lpw[1:].tolist())
    return answer


@jit(float64(float64, float64, int64))
def HPF(Fcut: float, dT: float, m: int = 32) -> list:
    lpw = np.array(LPF(Fcut=Fcut, dT=dT, m=m))

    lpw = np.multiply(lpw, -1)

    lpw[m] = 1 + lpw[m]

    return lpw.tolist()


def BPF(Fcut1: float, Fcut2: float, dT: float, m: int = 32) -> list:
    """
	Band-pass filter

	:param Fcut1:
示例#32
0
from abc import ABCMeta, abstractmethod
import numpy as np
from numba import njit, vectorize, float64

__all__ = [
    "pmt_spe",
    "sipm_gentile_spe",
    "SPESpectrum",
    "SiPMGentileSPE",
]

SQRT2PI = np.sqrt(2.0 * np.pi)


@vectorize([float64(float64, float64, float64)], fastmath=True)
def normal_pdf(x, mean, std_deviation):
    """
    Probability density function of a normal distribution.
    Defined from first principles and vectorized so that it can be called from Numba

    Parameters
    ----------
    x : ndarray
        Charge (p.e.) to evaluate the pdf at
    mean : float
        Mean of the distribution
    std_deviation : float
        Standard deviation of the distribution
    Returns
    -------
    ndarray
示例#33
0
    caa = ma2 - ma * ma
    cab = mab - ma * mb
    cbb = mb2 - mb * mb

    m = [[0.0, 0.0], [0.0, 0.0]]
    m[0][0] = caa
    m[1][0] = cab
    m[0][1] = cab
    m[1][1] = cbb
    return m

###############################################################################


@njit(float64(float64, float64), fastmath=True, cache=True)
def pair_gcd(v1: float,
             v2: float):
    """ Determine the Greatest Common Divisor of two integers using Euclid's
    algorithm. TODO - compare this with math.gcd(a,b) for speed. Also examine
    to see if I should not be declaring inputs as integers for NUMBA. """

    if v1 == 0 or v2 == 0:
        return 0

    while v2 != 0:
        temp = v2
        factor = v1 / v2
        v2 = v1 - factor * v2
        v1 = temp
示例#34
0
##############################################################################
# Copyright (C) 2018, 2019, 2020 Dominic O'Kane
##############################################################################

from math import sqrt
from numpy import ndarray
from numba import njit, float64, int32

##########################################################################


@njit(float64(float64[:]), fastmath=True, cache=True)
def mean(x: float):
    """ Calculate the arithmetic mean of a vector of numbers x. """
    n = len(x)
    m = 0.0
    for i in range(0, n):
        m += x[i]
    m = m / n
    return m


##########################################################################


@njit(float64(float64[:]), fastmath=True, cache=True)
def stdev(x: ndarray):
    """ Calculate the standard deviation of a vector of numbers x. """
    n = len(x)
    m = mean(x)
    v = 0.0
示例#35
0
文件: sabr.py 项目: samtaba/FinancePy
from ..utils.error import FinError
from ..utils.helpers import label_to_string

###############################################################################
###############################################################################


@njit
def _x(rho, z):
    """ Return function x used in Hagan's 2002 SABR lognormal vol expansion."""
    a = (1.0 - 2.0 * rho * z + z**2)**.5 + z - rho
    b = 1.0 - rho
    return np.log(a / b)


@njit(float64(float64[:], float64, float64, float64),
      fastmath=True,
      cache=True)
def vol_function_sabr(params, f, k, t):
    """ Black volatility implied by SABR model. """

    alpha = params[0]
    beta = params[1]
    rho = params[2]
    nu = params[3]

    if alpha < 1e-10:
        alpha = 1e-10

    # Negative strikes or forwards
    if k <= 0:
示例#36
0
    dx[9, 0] = np.conjugate(dx[0, 0])
    dx[10, 0] = np.conjugate(dx[1, 0])
    dx[11, 0] = np.conjugate(dx[2, 0])

    dx[12, 0] = np.conjugate(dx[3, 0])
    dx[13, 0] = np.conjugate(dx[4, 0])
    dx[14, 0] = np.conjugate(dx[5, 0])

    dx[15, 0] = np.conjugate(dx[6, 0])
    dx[16, 0] = np.conjugate(dx[7, 0])
    dx[17, 0] = np.conjugate(dx[8, 0])

    return dx


@jit(float64(int64, float64, float64, float64, float64))
def Shallow(T, Q1, Q2, Q3, e):
    etol = 1e-1
    dt = 0.01

    a = 1
    b = 1
    g = 2

    #e=0.1

    y = np.zeros((18, 1), dtype='complex')

    w = np.sqrt(1 + a**2)

    ha = Q1 + 0 * 1j
示例#37
0
        self.BF.ABOOK[year] = 1.0
        self.BF.ARETS[year] = 1.0026
        self.BF.APOPN[year] = 1.0
        self.BF.ACPIU[year] = 1.0
        self.BF.APOPDEP[year] = 1.0
        self.BF.ASOCSEC[year] = 0.9941
        self.BF.ACPIM[year] = 1.0
        self.BF.AUCOMP[year] = 1.0034
        self.BF.APOPSNR[year] = 1.0
        self.BF.AIPD[year] = 1.0
        self._blowup(year)
        self.s006 = self.WT["WT" + str(year)] * 0.01


@vectorize([
    float64(float64, float64, float64, float64, float64, float64, float64,
            float64, float64)
])
def imputed_cmbtp_itemizer(e17500, e00100, e18400, e62100, e00700, p04470,
                           e21040, e18500, e20800):
    """
    Global function that calculates _cmbtp_itemizer values
    (uses vectorize decorator to speed up calculations with NumPy arrays)
    """
    # pylint: disable=too-many-arguments
    medical_limited = max(0., e17500 - max(0., e00100) * 0.075)
    medical_adjustment = min(medical_limited, 0.025 * max(0., e00100))
    state_adjustment = max(0, e18400)
    return (e62100 - medical_adjustment + e00700 + p04470 + e21040 -
            state_adjustment - e00100 - e18500 - e20800)
示例#38
0
文件: math.py 项目: draustin/otk
    # Shift corrections are difference between correct first order terms and quadratic approximation ones.
    delta_gxkz = gxkz - fx * gxkz_p
    delta_gykz = gykz - fy * gykz_p

    # Evaluate zeroth order kz for x and y quadratic approximation.
    kz_px = -kxc**2 / (2 * k)
    kz_py = -kyc**2 / (2 * k)

    # Zeroth order correction is what is required to match k_z at (kxc, kyc) after quadratic approximation propagation
    # (with factors fx and fy) and shift correction has been applied.
    delta_kz = kz - fx * kz_px - fy * kz_py - delta_gxkz * kxc - delta_gykz * kyc

    return fx, fy, delta_kz, delta_gxkz, delta_gykz


@numba.vectorize([numba.float64(numba.float64, numba.float64, numba.float64)])
def calc_kz_exact(k, kx, ky):
    return (k**2 - kx**2 - ky**2)**0.5


@numba.vectorize([numba.float64(numba.float64, numba.float64, numba.float64)])
def calc_kz_paraxial(k, kx, ky):
    return k - (kx**2 + ky**2) / (2 * k)


@numba.vectorize([numba.float64(numba.float64, numba.float64, numba.float64)])
def calc_kz_quadratic(k, kx, ky):
    return -(kx**2 + ky**2) / (2 * k)


@numba.vectorize([numba.float64(numba.float64, numba.float64)])
示例#39
0
    vec : ndarray
        point to rotate [x, y]
    cvec : ndarray
        point to rotate about [x, y]
    Returns
    -------
    ndarray
        numpy vector vec after being rotated by angle about cvec"""
    new_x = (vec[0] - cvec[0]) * math.cos(angle / 180 * math.pi) - (
        vec[1] - cvec[1]) * math.sin(angle / 180 * math.pi) + cvec[0]
    new_y = (vec[0] - cvec[0]) * math.sin(angle / 180 * math.pi) + (
        vec[1] - cvec[1]) * math.cos(angle / 180 * math.pi) + cvec[1]
    return np.array([new_x, new_y])


@njit(float64(float64[:]))
def angle(vec):
    """Determines the angle of the vector

    Parameters
    ----------
    vec : ndarray
        vector [x, y]
    Returns
    -------
    float
        Angle of vector"""
    if vec[0] == 0:
        return 90 * vec[1] / abs(vec[1])
    angl = math.atan(vec[1] / vec[0]) / math.pi * 180
    if angl < 0:
示例#40
0
    def test_laplace_small(self):

        @cuda.jit(float64(float64, float64), device=True, inline=True)
        def get_max(a, b):
            if a > b:
                return a
            else:
                return b

        @cuda.jit(void(float64[:, :], float64[:, :], float64[:, :]))
        def jocabi_relax_core(A, Anew, error):
            err_sm = cuda.shared.array(SM_SIZE, dtype=float64)

            ty = cuda.threadIdx.x
            tx = cuda.threadIdx.y
            bx = cuda.blockIdx.x
            by = cuda.blockIdx.y

            n = A.shape[0]
            m = A.shape[1]

            i, j = cuda.grid(2)

            err_sm[ty, tx] = 0
            if j >= 1 and j < n - 1 and i >= 1 and i < m - 1:
                Anew[j, i] = 0.25 * ( A[j, i + 1] + A[j, i - 1] \
                                      + A[j - 1, i] + A[j + 1, i])
                err_sm[ty, tx] = Anew[j, i] - A[j, i]

            cuda.syncthreads()

            # max-reduce err_sm vertically
            t = tpb // 2
            while t > 0:
                if ty < t:
                    err_sm[ty, tx] = get_max(err_sm[ty, tx], err_sm[ty + t, tx])
                t //= 2
                cuda.syncthreads()

            # max-reduce err_sm horizontally
            t = tpb // 2
            while t > 0:
                if tx < t and ty == 0:
                    err_sm[ty, tx] = get_max(err_sm[ty, tx], err_sm[ty, tx + t])
                t //= 2
                cuda.syncthreads()

            if tx == 0 and ty == 0:
                error[by, bx] = err_sm[0, 0]



        if config.ENABLE_CUDASIM:
            NN, NM = 4, 4
            iter_max = 20
        else:
            NN, NM = 256, 256
            iter_max = 1000

        A = np.zeros((NN, NM), dtype=np.float64)
        Anew = np.zeros((NN, NM), dtype=np.float64)

        n = NN
        m = NM

        tol = 1.0e-6
        error = 1.0

        for j in range(n):
            A[j, 0] = 1.0
            Anew[j, 0] = 1.0

        timer = time.time()
        iter = 0

        blockdim = (tpb, tpb)
        griddim = (NN // blockdim[0], NM // blockdim[1])

        error_grid = np.zeros(griddim)

        stream = cuda.stream()

        dA = cuda.to_device(A, stream)          # to device and don't come back
        dAnew = cuda.to_device(Anew, stream)    # to device and don't come back
        derror_grid = cuda.to_device(error_grid, stream)

        while error > tol and iter < iter_max:
            self.assertTrue(error_grid.dtype == np.float64)

            jocabi_relax_core[griddim, blockdim, stream](dA, dAnew, derror_grid)

            derror_grid.copy_to_host(error_grid, stream=stream)


            # error_grid is available on host
            stream.synchronize()

            error = np.abs(error_grid).max()

            # swap dA and dAnew
            tmp = dA
            dA = dAnew
            dAnew = tmp

            iter += 1

        runtime = time.time() - timer
示例#41
0
def extract_sliding_window(waveforms, width, sampling_rate_ghz, sum_, peak_time):
    """
    This function performs the following operations:

    - Find the largest sum of width consecutive slices
    - Obtain the pulse time within a window defined by a peak finding
    algorithm, using the weighted average of the samples.

    This function is a numpy universal function which defines the operation
    applied on the waveform for every channel and pixel. Therefore in the
    code body of this function:
        - waveforms is a 1D array of size n_samples.
        - width is integer

    The ret argument is required by numpy to create the numpy array which is
    returned. It can be ignored when calling this function.

    Parameters
    ----------
    waveforms : ndarray
        Waveforms stored in a numpy array.
        Shape: (n_pix, n_samples)
    width : ndarray or int
        Window size of integration window for each pixel.
    sampling_rate_ghz : float
        Sampling rate of the camera, in units of GHz
        Astropy units should have to_value('GHz') applied before being passed
    sum_ : ndarray
        Return argument for ufunc (ignore)
        Returns the sum of the waveform samples
    peak_time : ndarray
        Return argument for ufunc (ignore)
        Returns the peak_time in units "ns"

    Returns
    -------
    charge : ndarray
        Extracted charge.
        Shape: (n_pix)

    """

    # first find the cumulative waveform
    cwf = np.cumsum(waveforms)
    # add zero at the begining so it is easier to substract the two arrays later
    cwf = np.concatenate((np.zeros(1), cwf))
    sums = cwf[width:] - cwf[:-width]
    maxpos = np.argmax(sums)  # start of the window with largest sum
    sum_[0] = sums[maxpos]

    time_num = float64(0.0)
    time_den = float64(0.0)
    # now compute the timing as the average of non negative slices
    for isample in prange(maxpos, maxpos + width):
        if waveforms[isample] > 0:
            time_num += waveforms[isample] * isample
            time_den += waveforms[isample]

    peak_time[0] = time_num / time_den if time_den > 0 else maxpos + 0.5 * width
    # Convert to units of ns
    peak_time[0] /= sampling_rate_ghz
示例#42
0
#Both as multiples of C
alpha = 0  #Long-range pyramidal-pyramidal coupling
beta = 0  #Connectivity between inhibitory pop. and excitatory interneuron pop. (short-range)

#Sigmoid function parameters
e0 = 2.5  #Half of the maximum firing rate
v0 = 6  #V1/2
r0, r1, r2 = 0.56, 0.56, 0.56  #Slopes of sigmoid functions

#Initial conditions
ic = np.ones((1, nnodes)) * np.array(
    [0.131, 0.171, 0.343, 0.21, 3.07, 2.96, 25.36, 2.42])[:, None]


@vectorize([float64(float64, float64)], nopython=True)
#Sigmoid function
def s(v, r0):
    return (2 * e0) / (1 + np.exp(r0 * (v0 - v)))


@jit(float64[:, :](float64[:, :], float64), nopython=True)
#Jansen & Rit multicolumn model (intra-columnar outputs)
def f1(y, t):
    x0, x1, x2, x3, y0, y1, y2, y3 = y

    noise = np.random.normal(0, sigma, nnodes)

    x0_dot = y0
    y0_dot = A * a * (s(C2 * x1 - C4 * x2 + C * alpha * x3, r0)) - \
             2 * a * y0 - a**2 * x0
示例#43
0
from math import exp, sin, cos
from numba import jit, float64, int32
import time
import socket


@jit(nopython=True)
def trapz(F, a, b, n):
    h = (b - a) / n
    sum = 0.5 * (F(a) + F(b))
    for i in range(1, n):
        sum += F(i * h)
    return sum * h


@jit(float64(float64), nopython=True)
def f(x):
    return exp(-x) * x * x


@jit(float64(float64), nopython=True)
def g(x):
    if x < 0.5:
        h = -exp(-x)
    else:
        h = exp(x)
    return h * x * x


@jit(float64(float64), nopython=True)
def implicit(t):
    ]))
Electrons = Collection(
    t.arrays([
        "Electron_pt", "Electron_eta", "Electron_phi", "Electron_mass",
        "Electron_jetIdx", "Electron_cutBased", "Electron_miniPFRelIso_all"
    ]))
Muons = Collection(
    t.arrays([
        "Muon_pt", "Muon_eta", "Muon_phi", "Muon_mass", "Muon_jetIdx",
        "Muon_mediumId", "Muon_miniPFRelIso_all"
    ]))
MET = Collection(t.arrays(["MET_pt"]))
print(Jets)


@jit(numba.float64(numba.float64, numba.float64))
def dPhi(phi1, phi2):
    dphi = np.abs(phi2 - phi1)
    if dphi >= np.pi: dphi -= 2.0 * np.pi
    if dphi < -np.pi: dphi += 2.0 * np.pi
    return dphi


@jit(numba.float64(numba.float64, numba.float64, numba.float64, numba.float64))
def deltaR(eta1, phi1, eta2, phi2):
    dphi = dPhi(phi1, phi2)
    deta = eta2 - eta1
    return (deta * deta + dphi * dphi)**0.5


@jit
示例#45
0
#dists = [0]
#radius = [15,20,25,30,35,40,45,50,60,70,80,90,100,120,140,160,180,200]
#n =      [12,12,12,12,12,14,14,16,16,18,18,20, 22, 24, 26, 28, 30, 32]
#centre = [ 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
#inner =  [ 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]


outfilename = 'pillars_r'+str(radius[0])+'nm_dose'+str(target_dose)+'_broader.txt'
#outfilename = 'test'



normalization = 1
#http://iopscience.iop.org/article/10.1143/JJAP.35.1929/pdf
@jit(float64(float64),nopython=True)
def calc_prox(r):
    return (1/normalization) * (1/(math.pi*(1+eta_1+eta_2))) * ( (1/(alpha**2))*math.exp(-r**2/alpha**2) + (eta_1/beta**2)*math.exp(-r**2/beta**2) + (eta_2/(24*gamma**2))*math.exp(-math.sqrt(r/gamma)) )
# [return] = C/nm !!!
normalization = integrate.quad(lambda x: 2*np.pi*x*calc_prox(x), 0, np.inf)
print('norm:'+str(normalization))
#normalization = 2.41701729505915



@jit(float64(float64,float64,float64,float64))#,nopython=True)
def dist(x0,y0,x,y):
    return math.sqrt( (x0-x)*(x0-x)+(y0-y)*(y0-y) )


@jit(float64[:](float64[:],float64[:],float64[:],float64[:],float64[:]),nopython=True)#,parallel=True)
示例#46
0

# helper functions
def boys(n, T):
    return hyp1f1(n + 0.5, n + 1.5, -T) / (2.0 * n + 1.0)


def gaussian_product_center(a, A, b, B):
    return (a * A + b * B) / (a + b)


@nb.njit(
    nb.float64(
        nb.int64,
        nb.int64,
        nb.int64,
        nb.float64,
        nb.float64,
        nb.float64,
    ))
def E(i, j, t, Qx, a, b):
    ''' Recursive definition of Hermite Gaussian coefficients.
        Returns a float.
        a: orbital exponent on Gaussian 'a' (e.g. alpha in the text)
        b: orbital exponent on Gaussian 'b' (e.g. beta in the text)
        i,j: orbital angular momentum number on Gaussian 'a' and 'b'
        t: number nodes in Hermite (depends on type of integral,
           e.g. always zero for overlap integrals)
        Qx: distance between origins of Gaussian 'a' and 'b'
    '''

    p = a + b
示例#47
0
                                         birthing_split_goodness=split_goodness,
                                         birthing_split_fraction=np.sum(hits['area'][split_i:]) / peak.area)
                return self.cluster_peak(peak_l) + self.cluster_peak(peak_r)
            else:
                self.log.debug("Proposed split at %d not good enough (%0.3f < %0.3f)" % (
                    split_i, split_goodness, split_threshold))

            # If we get here, no clustering was needed
            peak.interior_split_goodness = split_goodness
            peak.interior_split_fraction = min(np.sum(hits['area'][:max_split_ii]),
                                               np.sum(hits['area'][max_split_ii:])) / peak.area

        return [peak]


@numba.jit(numba.float64(numba.float64[:], numba.float64[:], numba.float64[:]),
           nopython=True)
def _sad_fallback(x, areas, fallback):
    # While there is a one-pass algorithm for variance, I haven't found one for sad.. maybe it doesn't exists
    # First calculate the weighted mean.
    mean = 0
    sum_weights = 0
    for i in range(len(x)):
        mean += x[i] * areas[i]
        sum_weights += areas[i]
    mean /= sum_weights

    # Now calculate the sum abs dev, ensuring each x contributes at least fallback
    sad = 0
    for i in range(len(x)):
        sad += max(fallback[i], abs(x[i] - mean)) * areas[i]
示例#48
0
            cy = sy // 2 + ys
            rx = min(cx, sx - cx, maxr)
            ry = min(cy, sy - cy, maxr)
            err = 0
            cn = 0
            for x in range(-rx + 1, 0):
                for y in range(-ry + 1, ry):
                    if msk[cx + x, cy + y] == 1 and msk[cx - x, cy - y] == 1:
                        cn += 1
                        err += abs(img[cx + x, cy + y] - img[cx - x, cy - y])
            out[xs + d, ys + d] = err / cn
    return out.argmin()


@_numba.vectorize(
    [_numba.float64(_numba.complex128),
     _numba.float32(_numba.complex64)],
    target="parallel",
)
def abs2(x):
    return x.real * x.real + x.imag * x.imag


@_numba.vectorize(
    [_numba.complex128(_numba.complex128),
     _numba.complex64(_numba.complex64)],
    target="parallel",
)
def abs2c(x):
    return x.real * x.real + x.imag * x.imag + 0j
示例#49
0
import sqlite3
import random

import pytest

from slumba import create_function, sqlite_udf
from numba import int64, float64, optional


@sqlite_udf(float64(float64))
def add_one(x):
    return x + 1.0


@sqlite_udf(optional(float64)(float64))
def add_one_optional(x):
    return x + 1.0 if x is not None else None


@sqlite_udf(int64(int64, float64))
def add_each_other(x, y):
    return x + int(y)


@pytest.fixture
def con():
    con = sqlite3.connect(':memory:')
    con.execute("""
        CREATE TABLE t (
            id INTEGER PRIMARY KEY,
            key VARCHAR(1),
示例#50
0
def add_cubic_field_from_mo(universe, rmin, rmax, nr, vector=None):
    '''
    Create a cubic field from a given vector (molecular orbital).

    Args:
        universe (:class:`~exatomic.universe.Universe`): Atomic universe
        rmin (float): Starting point for field dimensions
        rmax (float): Ending point for field dimensions
        nr (float): Discretization of the field dimensions
        vector: None, list, or int corresponding to vector index to generate (None will generate all fields)

    Returns:
        fields (list): List of cubic fields corresponding to vectors
    '''
    vectors = universe.momatrix.groupby('orbital')
    if isinstance(vector, int):
        vector = [vector]
    elif vector is None:
        vector = [key for key in vectors.groups.keys()]
    elif not isinstance(vector, list):
        raise TypeError()
    x = np.linspace(rmin, rmax, nr)
    y = np.linspace(rmin, rmax, nr)
    z = np.linspace(rmin, rmax, nr)
    dxi = x[1] - x[0]
    dyj = y[1] - y[0]
    dzk = z[1] - z[0]
    dv = dxi * dyj * dzk
#    x, y, z = meshgrid3d(x, y, z)
    # Get symbolic representations of the basis functions
    basis_funcs = _voluminate_gtfs(universe, x, y, z)
    if config['pkg_numba']:
        from numba import vectorize, float64
        nb = vectorize([float64(float64, float64, float64)], nopython=True)
        for i, func in enumerate(basis_funcs):
            func = sy.lambdify(('x', 'y', 'z'), func, 'numpy')
            basis_funcs[i] = nb(func)
    else:
        basis_funcs = [np.vectorize(func) for func in basis_funcs]
    nn = len(basis_funcs)
    n = len(vector)
    # At this point, basis_funcs contains non-normalized ufunc.
    # Now discretize and normalize the basis function values.
    bf_values = np.empty((nn, nr**3), dtype=np.float64)
    for i in range(nn):
        v = basis_funcs[i](x, y, z)
        v /= np.sqrt((v**2 * dv).sum())
        bf_values[i, :] = v
    # Finally, add basis function values to form vectors
    # (normalized molecular orbitals).
    values = np.empty((n, nr**3), dtype=np.float64)
    dxi = [dxi] * n
    dyj = [dyj] * n
    dzk = [dzk] * n
    dxj = [0.0] * n
    dxk = [0.0] * n
    dyi = [0.0] * n
    dyk = [0.0] * n
    dzi = [0.0] * n
    dzj = [0.0] * n
    nx = [nr] * n
    ny = [nr] * n
    nz = [nr] * n
    ox = [rmin] * n
    oy = [rmin] * n
    oz = [rmin] * n
    frame = np.empty((n, ), dtype=np.int64)
    label = np.empty((n, ), dtype=np.int64)
    i = 0
    print('n', n)
    print('nn', nn)
    print('vectors')
    print(type(vectors))
    print(len(vectors))
    for vno, vec in vectors:
        if vno in vector:
            #frame[i] = universe.orbital.ix[vno, 'frame']
            label[i] = vno
            v = 0
            for c, f in zip(vec['coefficient'], vec['basis_function']):
                v += c * bf_values[f]
            v /= np.sqrt((v**2 * dv).sum())
            values[i, :] = v
            i += 1
    data = pd.DataFrame.from_dict({'dxi': dxi, 'dxj': dxj, 'dxk': dxk, 'dyi': dyi,
                                   'dyj': dyj, 'dyk': dyk, 'dzi': dzi, 'dzj': dzj,
                                   'dzk': dzk, 'nx': nx, 'ny': ny, 'nz': nz, 'label': label,
                                   'ox': ox, 'oy': oy, 'oz': oz, 'frame': [0] * n})#frame})
    values = [Series(v) for v in values.tolist()]
    return AtomicField(data, field_values=values)
'''
Demonstrate the vectorize API with automatical memory transfer and
manual memory transfer.
'''
from __future__ import print_function
from timeit import default_timer as timer
from numba import vectorize, float64, cuda
import numpy


@vectorize([float64(float64, float64)], target='cuda')
def vector_mul(a, b):
    return  a * b

a = numpy.random.rand(10000000)
b = numpy.random.rand(10000000)

# Let NumbaPro automatically convert host memory to device memory
ts = timer()
for i in range(10):
    result = vector_mul(a, b)
te = timer()

print('auto', te - ts)


# Manual conversion between host and device memory
ts = timer()
for i in range(10):
    # copy host memory to device
    da = cuda.to_device(a)
示例#52
0
def min_levenstein_distance(l1, l2):
    symbols = list(set(l1+l2))
    symbol2ix = {v: k for k, v in enumerate(symbols)}
    l1_arr = np.fromiter((symbol2ix[s] for s in l1), dtype=np.uint32)
    l2_arr = np.fromiter((symbol2ix[s] for s in l2), dtype=np.uint32)
    return nb_min_levestein_distance(l1_arr, l2_arr)


def levenstein_distance(l1, l2):
    symbols = list(set(l1+l2))
    symbol2ix = {v: k for k, v in enumerate(symbols)}
    l1_arr = np.fromiter((symbol2ix[s] for s in l1), dtype=np.uint32)
    l2_arr = np.fromiter((symbol2ix[s] for s in l2), dtype=np.uint32)
    return nb_levestein_distance(l1_arr, l2_arr)[len(l1_arr), len(l2_arr)]

@nb.vectorize([nb.float64(nb.float64, nb.float64)])
def nb_levestein_distance(l1, l2):
    n = len(l1)
    m = len(l2)
    print('{} {}'.format(n,m))
    d = np.empty((n+1, m+1), dtype=np.int32)
    d[:, 0] = np.arange(n+1)
    d[0, :] = np.arange(m+1)
    for i in range(1, n+1):
        for j in range(1, m+1):
            if l1[i-1] == l2[j-1]:
                d[i, j] = d[i-1, j-1]
            else:
                d[i, j] = min(d[i, j-1] + 1,
                              d[i-1, j] + 1,
                              d[i-1, j-1] + 1)
示例#53
0
    b_x = interpolate_particle(fields_k.B_x, fields_k_1.B_x, i, j,
                               a000, a001, a010, a011, a100, a101, a110, a111)
    b_y = interpolate_particle(fields_k.B_y, fields_k_1.B_y, i, j,
                               a000, a001, a010, a011, a100, a101, a110, a111)
    b_z = interpolate_particle(fields_k.B_z, fields_k_1.B_z, i, j,
                               a000, a001, a010, a011, a100, a101, a110, a111)
                               
    e_vec = np.array((e_x, e_y, e_z))
    b_vec = np.array((b_x, b_y, b_z))
    return e_vec, b_vec


# Functions like in beam_calculate.py in lcode2dPy


@nb.vectorize([nb.float64(nb.float64, nb.float64, nb.float64)], cache=True)
def beam_substepping_step(q_m, p_z, substepping_energy):
    dt = 1.0
    max_dt = np.sqrt(np.sqrt(1 / q_m ** 2 + p_z ** 2) / substepping_energy)
    while dt > max_dt:
        dt /= 2.0
    return dt


@nb.njit
def cross_nb(a, b):
    c = np.zeros_like(a)
    c[0] = a[1] * b[2] - a[2] * b[1]
    c[1] = a[2] * b[0] - a[0] * b[2]
    c[2] = a[0] * b[1] - a[1] * b[0]
    return c
示例#54
0
            for j in xrange(des_ngb):
                q = neighbor_dists[i, j]/h
                if q <= 0.5:
                    n_ngb += (1 - 6*q**2 + 6*q**3)
                elif q <= 1.0:
                    n_ngb += 2*(1-q)**3
            n_ngb *= norm
            if n_ngb > des_ngb:
                upper = h
            else:
                lower = h
            error = np.fabs(n_ngb-des_ngb)
        hsml[i] = h
    return hsml
        
@vectorize([float32(float32), float64(float64)])
def Kernel(q):
    if q <= 0.5:
        return 1 - 6*q**2 + 6*q**3
    elif q <= 1.0:
        return 2 * (1-q)**3
    else: return 0.0
        
@jit
def DF(f, ngb):
    df = np.empty(ngb.shape)
    for i in xrange(ngb.shape[0]):
        for j in xrange(ngb.shape[1]):
            df[i,j] = f[ngb[i,j]] - f[i]
    return df
    
示例#55
0
文件: gen.py 项目: cpcloud/cysqlite3
        raise_string = 'raise {}'.format(self.visit(node.exc))
        cause = getattr(node, 'cause', None)

        if cause is not None:
            return '{} from {}'.format(raise_string, self.visit(cause))
        return raise_string

    def visit_Subscript(self, node):
        return '{}[{}]'.format(self.visit(node.value), self.visit(node.slice))

    def visit_Index(self, node):
        return self.visit(node.value)

    def visit_Module(self, node):
        return '\n'.join(map(self.visit, node.body))


def sourcify(mod):
    return SourceVisitor().visit(mod)


if __name__ == '__main__':
    from numba import jit

    @jit(float64(int64, int64), nopython=True)
    def g(x, y):
        return x + y * 1.0

    # this shows what the compiled function looks like
    print(SourceVisitor().visit(gen_scalar(g, 'g_unit')))
Created on Mon Sep 18 23:47:03 2017

@author: Rui Silva

"""
from numpy import random
from random import shuffle
from random import choice
import matplotlib.pyplot as plt
import scipy.stats as stats
import time
import pandas as pd
import numpy as np
from numba import jit, prange, float64, int32

@jit(float64(float64))
def quality_cdf(u):
    return u

@jit(float64(float64, int32))
def share_proba(alpha, repetitions):
    #return ((repetitions + 5) ** 2) * (alpha ** (repetitions + 5))
    return np.exp(-((repetitions-40)/50)**2 )

@jit(float64(float64))
def time_distrib(l):                                                                                                                                     
    return 1 + random.poisson(l,1)

def publishTime():
    return 3000
示例#57
0
import numpy as np
import numba as nb

@nb.jit(nb.float64(nb.int32, nb.int32))
def solve(L, R):
    A = np.zeros((R + 6, R + 6), dtype=np.float64)
    v = np.zeros(R + 6, dtype=np.float64)
    for x in range(R + 6):
        A[x, x] = (R - L + 1) * (R - L + 1)
        v[x] = 5 * (R - L + 1) * (R - L + 1)

        Lp = max(x - 5, L)
        v[x] += (R - L + 1) * (Lp - L) * (L + Lp - 1) / 2.

        for a in range(Lp, R + 1):
            for b in range(L + x, R + x + 1):
                if a <= b:
                    v[x] += a
                    A[x, b - a] -= 1.
                else:
                    v[x] += b
                    A[x, a - b] -= 1.
    f = np.linalg.solve(A, v)
    return f[0] - 5

import time
ans = 0
K = 100
t0 = time.time()
# for i in range(10):
#     solve(1, 100)
示例#58
0
interpTable4 = np.load("ISWL4Z0.npz")
interpTable5 = np.load("ISWL5Z0.npz")
isw20 = interpTable20['isw']
isw3 = interpTable3['isw']
isw4 = interpTable4['isw']
isw5 = interpTable5['isw']
kk0 = interpTable3['kVals']

ISWinterp2 = interpolate.RectBivariateSpline(kk, zz, isw2)
ISWinterp20 = interpolate.interp1d(kk0, isw20)
ISWinterp3 = interpolate.interp1d(kk0, isw3)
ISWinterp4 = interpolate.interp1d(kk0, isw4)
ISWinterp5 = interpolate.interp1d(kk0, isw5)


@jit(float64(int32, float64, float64))
def interpolation(l, k, z):
    if (l == 2):
        if (z == 0):
            return ISWinterp20(k)
        else:
            return ISWinterp2(k, z)[0][0]

    if (l == 3):
        return ISWinterp3(k)

    if (l == 4):
        return ISWinterp4(k)

    if (l == 5):
        return ISWinterp5(k)
示例#59
0
        self.e00900p = self._earning_split * self.e00900
        self.e00900s = (1 - self._earning_split) * self.e00900

        self.e02100p = self._earning_split * self.e02100
        self.e02100s = (1 - self._earning_split) * self.e02100

    def _imputed_cmbtp_itemizer(self):
        return imputed_cmbtp_itemizer(self.e17500, self.e00100, self.e18400,
                                      self.e62100, self.e00700,
                                      self.p04470, self.e21040,
                                      self.e18500, self.e20800)


@vectorize([float64(float64, float64, float64,
                    float64, float64,
                    float64, float64,
                    float64, float64)])
def imputed_cmbtp_itemizer(e17500, e00100, e18400,
                           e62100, e00700,
                           e04470, e21040,
                           e18500, e20800):
    """
    Calculates _cmbtp_itemizer values
    Uses vectorize decorator to speed-up calculations with NumPy arrays
    """
    medical_limited = max(0., e17500 - max(0., e00100) * 0.075)
    medical_adjustment = min(medical_limited, 0.025 * max(0., e00100))
    state_adjustment = max(0, e18400)
    return (e62100 - medical_adjustment + e00700 + e04470 + e21040 -
            state_adjustment - e00100 - e18500 - e20800)
示例#60
0
import unittest
from typing import Callable, List, Union, Dict
import numpy as np
from numba import jit, vectorize, float64
import math
import QuantLib as ql


@vectorize([float64(float64, float64, float64, float64, float64, float64)])
def alpha_root(atm_forward_rate, atm_forward_vol, time_to_maturity, beta, nu,
               rho):
    """
    Solver for alpha parameter as function of beta, nu, rho. Returns the smallest
    positive roots of the alpha equation such that the at-the-money basis point
    volatility is recovered exactly.
    """
    p2 = 0.25 * (beta * rho * nu * time_to_maturity) / (
        (atm_forward_rate**(1 - beta)) * atm_forward_vol)
    p2 -= beta * (2 -
                  beta) * time_to_maturity / (24 *
                                              (atm_forward_rate**(2 - beta)))

    p1 = (1 / atm_forward_vol) * (
        1 + (2 - 3 * rho * rho) * nu * nu * time_to_maturity / 24)

    p0 = -(1 / (atm_forward_rate**beta))

    coeffs = np.array([p2, p1, p0])
    roots = np.roots(coeffs)

    return np.where(roots <= 0, 1e5, roots).min()