Example #1
0
def _vector_sequence_similarity_dot(x, y, locality=0.5):
    nx = len(x)
    ny = len(y)
    z = np.dot(x, y.T)
    m2 = 0.
    for i in prange(nx):
        for j in prange(ny):
            seye = 1. / (np.abs(i - j) + 1)
            zij = z[i, j] * (locality * (seye - 1) + 1)
            z[i, j] = zij
        m2 += z[i, :].max()
    m1 = 0.
    for j in prange(ny):
        m1 += z[:, j].max()
    return 0.5 * (m1 + m2) / (nx + ny)
def loops_NumbaJit_parallel(csm, e, h, r0, rm, kj):
    """ Workaround for the prange error in jit. See documentation comment of
    'loops_NumbaJit_parallel_FirstWritingOfSteer'.
    For infos on the numba decorator see 'vectorizedOptimized_NumbaJit_Parallel'
    """
    nFreqs = csm.shape[0]
    nGridPoints = r0.shape[0]
    nMics = csm.shape[1]
    beamformOutput = np.zeros((nFreqs, nGridPoints), np.float64)
    
    for cntFreqs in xrange(nFreqs):
        kjj = kj[cntFreqs].imag
        for cntGrid in prange(nGridPoints):
            r01 = r0[cntGrid]
            rs = r01 ** 2
            
            temp1 = 0.0
            for cntMics in xrange(nMics): 
                temp2 = 0.0
                rm1 = rm[cntGrid, cntMics]
                temp3 = np.float32(kjj * (rm1 - r01))
                steerVec = (np.cos(temp3) - 1j * np.sin(temp3)) * rm1
                
                for cntMics2 in xrange(cntMics):
                    rm1 = rm[cntGrid, cntMics2]
                    temp3 = np.float32(kjj * (rm1 - r01))
                    steerVec1 = (np.cos(temp3) - 1j * np.sin(temp3)) * rm1  # Steering vec is calculated redundantly--> very slow
                    temp2 += csm[cntFreqs, cntMics2, cntMics] * steerVec1
                temp1 += 2 * (temp2 * steerVec.conjugate()).real
                temp1 += (csm[cntFreqs, cntMics, cntMics] * steerVec.conjugate() * steerVec).real
            beamformOutput[cntFreqs, cntGrid] = temp1 / rs
    return beamformOutput
Example #3
0
def lu_parallel_2(x):
    upper = np.asfortranarray(np.zeros(x.shape, dtype=np.float64))
    n = len(x)
    lower = np.ascontiguousarray(np.eye(n))

    for i in range(n):

        for k in prange(i, n):
            total = lower[i, :] @ upper[:, k]
            upper[i][k] = x[i][k] - total

        for k in prange(i + 1, n):
            total = lower[k, :] @ upper[:, i]
            lower[k][i] = (x[k][i] - total) / upper[i][i]

    return lower, upper
    def _eval_metric(clickthroughs, results_lookup, results_lookup_idx, metric):
        """Apply metric to clickthroughs

        Parameters
        ----------
        clickthroughs : np.ndarray
            return value of self._simplify_df
        results_lookup : np.ndarray
            return value of self._build_results_lookup
        results_lookup_idx : np.ndarray
            return value of self._build_results_lookup_indexes
        metric : callable
            Must be decorated with numba.njit. Will be called with two
            arguments. First a 2-d ndarray with the first dimension
            representing the character length of the prefix search and the
            second containing the page_id of the top-k results in rank order.
            The second argument is the clicked page_id. The metric must
            return a single float.

        Returns
        -------
        np.ndarray
            result of applying metric to each row of clickthroughs
        """
        out = np.empty(clickthroughs.shape[0], dtype=np.float32)
        for i in numba.prange(clickthroughs.shape[0]):
            cat_id, clickpage = clickthroughs[i]
            # list of cat_ids for prefix searches on cat_id
            # from shortest to longest
            results_list_idx = results_lookup_idx[cat_id]
            # the search results for all prefix searches of cat_id
            # up to self.max_prefix_len
            searchterm_results = results_lookup[results_list_idx]
            out[i] = metric(searchterm_results, clickpage)
        return out
Example #5
0
def gini_coefficient(y):
    r"""
    Implements the Gini inequality index

    Parameters
    -----------
    y : array_like(float)
        Array of income/wealth for each individual. Ordered or unordered is fine

    Returns
    -------
    Gini index: float
        The gini index describing the inequality of the array of income/wealth

    References
    ----------

    https://en.wikipedia.org/wiki/Gini_coefficient
    """
    n = len(y)
    i_sum = np.zeros(n)
    for i in prange(n):
        for j in range(n):
            i_sum[i] += abs(y[i] - y[j])
    return np.sum(i_sum) / (2 * n * np.sum(y))
Example #6
0
def loops_NumbaJit_parallelSlow(csm, r0, rm, kj):
    nFreqs = csm.shape[0]
    nGridPoints = len(r0)
    nMics = csm.shape[1]
    beamformOutput = np.zeros((nFreqs, nGridPoints), np.float64)
    
    for cntFreqs in xrange(nFreqs):
        kjj = kj[cntFreqs].imag
        for cntGrid in prange(nGridPoints):
            r01 = r0[cntGrid]
            rs = r01 ** 2
            
            temp1 = 0.0
            for cntMics in xrange(nMics): 
                temp2 = 0.0
                rm1 = rm[cntGrid, cntMics]
                temp3 = np.float32(kjj * (rm1 - r01))
                steerVec = (np.cos(temp3) - 1j * np.sin(temp3)) * rm1
                
                for cntMics2 in xrange(cntMics):
                    rm1 = rm[cntGrid, cntMics2]
                    temp3 = np.float32(kjj * (rm1 - r01))
                    steerVec1 = (np.cos(temp3) - 1j * np.sin(temp3)) * rm1
                    temp2 += csm[cntFreqs, cntMics2, cntMics] * steerVec1
                temp1 += 2 * (temp2 * steerVec.conjugate()).real
                temp1 += (csm[cntFreqs, cntMics, cntMics] * steerVec.conjugate() * steerVec).real
            
            beamformOutput[cntFreqs, cntGrid] = (temp1 / rs).real
    return beamformOutput
Example #7
0
def neighbor_average_waveform(waveforms, neighbors, lwt):
    """
    Obtain the average waveform built from the neighbors of each pixel

    Parameters
    ----------
    waveforms : ndarray
        Waveforms stored in a numpy array.
        Shape: (n_chan, n_pix, n_samples)
    neighbors : ndarray
        2D array where each row is [pixel index, one neighbor of that pixel].
        Changes per telescope.
        Can be obtained from
        `ctapipe.instrument.CameraGeometry.neighbor_matrix_where`.
    lwt: int
        Weight of the local pixel (0: peak from neighbors only,
        1: local pixel counts as much as any neighbor)

    Returns
    -------
    average_wf : ndarray
        Average of neighbor waveforms for each pixel.
        Shape: (n_chan, n_pix, n_samples)

    """
    n_neighbors = neighbors.shape[0]
    sum_ = waveforms * lwt
    n = np.zeros(waveforms.shape)
    for i in prange(n_neighbors):
        pixel = neighbors[i, 0]
        neighbor = neighbors[i, 1]
        for channel in range(waveforms.shape[0]):
            sum_[channel, pixel] += waveforms[channel, neighbor]
            n[channel, pixel] += 1
    return sum_ / n
def calc_fitness(population, exposure_indices, target, v_alpha, h_alpha, v_beta, h_beta):
    fitness = np.zeros(population.shape[1],dtype=np.float32)
    exposure = np.zeros(target.shape,dtype=np.float32)
    field = np.zeros(target.shape, dtype=np.float32)
    for p in range(population.shape[1]):
        field *= 0
        set_doses_field(field, exposure_indices, population[:, p])
        exposure = calc_exposure(field, v_alpha, h_alpha, v_beta, h_beta)
        #fitness[p] = np.sum(np.abs(np.subtract(target,exposure)))#/exposure_indices.shape[0]
        buf = 0
        for i in prange(target.shape[0]):
            for j in range(target.shape[1]):
                if target[i,j] > 0:
                    buf += np.abs(exposure[i,j]-target[i,j])**2
        fitness[p] = fitness[p] + buf
        # #fitness[p] += np.sum(population[:,p]>np.var(population[:,p]))#/exposure_indices.shape[0]
        # buf = 0
        # for i in range(field.shape[0]):
        #     for j in range(field.shape[1]):
        #         if field[i,j] > 0:
        #             neighbors = np.array([field[i+1,j],field[i-1,j],field[i,j+1],field[i,j-1],field[i+1,j+1],field[i+1,j-1],field[i-1,j+1],field[i-1,j-1]],dtype=np.float32)
        #             notzero = np.sum(neighbors>0)+1
        #             buf += np.abs(field[i,j]- (field[i,j] +field[i+1,j]+field[i,j+1]+field[i-1,j]+field[i,j-1]+field[i+1,j+1]+field[i+1,j-1]+field[i-1,j+1]+field[i-1,j-1])/notzero)**2
        # fitness[p] = fitness[p] + buf

    fitness /= exposure_indices.shape[0]
    #fitness /= fitness.mean()
    return fitness
Example #9
0
def get_tem_potential_numba(time, R0, vnorm, vdir, center, rcut, inte1, 
        rr, dr, fr_val, conj_c2r, l, m, jmx, ind_lm, ind_lmm, V_time):
    """
        Numba version of the computation of the external potential in time
        for tem calculations
    """
    for it in nb.prange(time.shape[0]):
        R_sub = R0 + vnorm*vdir*(time[it] - time[0]) - center
        norm = np.sqrt(np.dot(R_sub, R_sub))

        if norm > rcut:
            I1 = inte1/(norm**(l+1))
            I2 = 0.0
        else:
            rsub_max = (np.abs(rr - norm)).argmin() # find_nearrest_index(rr, norm)

            I1 = np.sum(fr_val[0:rsub_max+1]*
                    rr[0:rsub_max+1]**(l+2)*rr[0:rsub_max+1])
            I2 = np.sum(fr_val[rsub_max+1:]*
                    rr[rsub_max+1:]/(rr[rsub_max+1:]**(l-1)))


            I1 = I1*dr/(norm**(l+1))
            I2 = I2*(norm**l)*dr
        clm_tem = csphar_numba(R_sub, l)
        clm = (4*np.pi/(2*l+1))*clm_tem[ind_lm]*(I1 + I2)
        clmm = (4*np.pi/(2*l+1))*clm_tem[ind_lmm]*(I1 + I2)
        rlm = c2r_lm(conj_c2r, jmx, clm, clmm, m)
        V_time[it] = rlm + 0.0j
def kernel_run(array, alpha, result):
    for i in prange(array.shape[0]):
        sqr_norm = 0.
        for d in range(array.shape[1]):
            sqr_norm += array[i, d]**2 * alpha[d]

        result[i] = math.sqrt(sqr_norm)
Example #11
0
def __vector_sequence_similarity_euclid(x, y, z, nx, ny, locality=0.5):
    nx = len(x)
    ny = len(y)
    m1 = 0.
    m2 = 0.
    for i in prange(nx):
        xi = x[i]
        for j in prange(ny):
            yj = y[j]
            zij = (1. - (((xi - yj) ** 2).sum() ** 0.5))
            seye = 1. / (np.abs(i - j) + 1)
            zij *= locality * (seye - 1) + 1
            z[i, j] = zij
        m2 += z[i, :].max()
    for j in prange(ny):
        m1 += z[:, j].max()
    return (m1 + m2) / (nx + ny)
Example #12
0
 def prange_reduction2():
     """
     >>> prange_reduction2()
     49999995000000.0
     """
     sum = 0.0
     for i in numba.prange(10000000):
         sum += i
     return sum
Example #13
0
 def simple_prange_lastprivate():
     """
     >>> simple_prange_lastprivate()
     10
     """
     var = 20
     for i in numba.prange(1):
         var = 10
     return var
Example #14
0
def calc_pi(NUM):
    counter = 0
    for i in prange(NUM):
        x = random.random()
        y = random.random()
        if x*x+y*y < 1.0:
            counter += 1
    pi = 4.0*counter/NUM
    return pi
Example #15
0
def convolve_with_vector(field,exposure,v,h):
    buf = np.zeros(field.shape,dtype=np.float32)

    for j in prange(field.shape[1]):
        for i in range(field.shape[0]):
            if field[i,j] > 0:
                for k in range(v.shape[0]):
                    fi = i+k-int((v.shape[0]-1)/2)
                    if fi >= 0 and fi < field.shape[0]:
                        buf[fi,j] += field[i,j]*v[k]

    for i in prange(field.shape[0]):
        for j in range(field.shape[1]):
            if buf[i,j] < 0:
                for k in range(h.shape[0]):
                    fj = j+k-int((h.shape[0])/2)
                    if fj >= 0 and fj < field.shape[1]:
                        exposure[i,fj] += buf[i,j]*h[k]
Example #16
0
 def simple_prange_reduction():
     """
     >>> simple_prange_reduction()
     15
     """
     var = 10
     for i in numba.prange(1):
         var += 5
     return var
def loops_Njit_Parallel_Prange(csm, SpecAllChn):
    nFreqs = csm.shape[0]
    nMics = csm.shape[1]
    for cntFreq in range(nFreqs):
        for cntRow in prange(nMics):
            temp = np.conj(SpecAllChn[cntFreq, cntRow])
            for cntColumn in range(nMics):
                csm[cntFreq, cntRow, cntColumn] += temp * SpecAllChn[cntFreq, cntColumn]
    return csm
Example #18
0
def _downsample_2d_std_var(src, mask, use_mask, method, fill_value, mode_rank, out):
    src_w, src_h, out_w, out_h = _get_dimensions(src, out)

    if src_w == out_w and src_h == out_h:
        return src

    if out_w > src_w or out_h > src_h:
        raise ValueError("invalid target size")

    scale_x = src_w / out_w
    scale_y = src_h / out_h

    for out_y in prange(out_h):
        src_yf0 = scale_y * out_y
        src_yf1 = src_yf0 + scale_y
        src_y0 = int(src_yf0)
        src_y1 = int(src_yf1)
        wy0 = 1.0 - (src_yf0 - src_y0)
        wy1 = src_yf1 - src_y1
        if wy1 < _EPS:
            wy1 = 1.0
            if src_y1 > src_y0:
                src_y1 -= 1
        for out_x in range(out_w):
            src_xf0 = scale_x * out_x
            src_xf1 = src_xf0 + scale_x
            src_x0 = int(src_xf0)
            src_x1 = int(src_xf1)
            wx0 = 1.0 - (src_xf0 - src_x0)
            wx1 = src_xf1 - src_x1
            if wx1 < _EPS:
                wx1 = 1.0
                if src_x1 > src_x0:
                    src_x1 -= 1
            v_sum = 0.0
            w_sum = 0.0
            wv_sum = 0.0
            wvv_sum = 0.0
            for src_y in range(src_y0, src_y1 + 1):
                wy = wy0 if (src_y == src_y0) else wy1 if (src_y == src_y1) else 1.0
                for src_x in range(src_x0, src_x1 + 1):
                    wx = wx0 if (src_x == src_x0) else wx1 if (src_x == src_x1) else 1.0
                    v = src[src_y, src_x]
                    if np.isfinite(v) and not (use_mask and mask[src_y, src_x]):
                        w = wx * wy
                        v_sum += v
                        w_sum += w
                        wv_sum += w * v
                        wvv_sum += w * v * v
            if w_sum < _EPS:
                out[out_y, out_x] = fill_value
            else:
                out[out_y, out_x] = (wvv_sum * w_sum - wv_sum * wv_sum) / w_sum / w_sum
    if method == DS_STD:
        out = np.sqrt(out)
    return out
Example #19
0
def lnlike_normal_v(o, m, e, lcids):
    m = atleast_2d(m)
    npv = m.shape[0]
    npt = o.size
    lnl = zeros(npv)
    for i in prange(npv):
        for j in range(npt):
            k = lcids[j]
            lnl[i] += -log(e[i,k]) - 0.5*log(2*pi) - 0.5*((o[j]-m[i,j])/e[i,k])**2
    return lnl
Example #20
0
 def prange_reduction_and_privates():
     """
     >>> prange_reduction_and_privates()
     100.0
     """
     sum = 10.0
     for i in numba.prange(10):
         j = i * 2
         sum += j
     return sum
Example #21
0
 def calculate_beta_sobolev(tau_sobolevs, beta_sobolevs):
     for i in prange(len(tau_sobolevs)):
         if tau_sobolevs[i] > 1e3:
             beta_sobolevs[i] = tau_sobolevs[i]**-1
         elif tau_sobolevs[i] < 1e-4:
             beta_sobolevs[i] = 1 - 0.5 * tau_sobolevs[i]
         else:
             beta_sobolevs[i] = (1 - np.exp(-tau_sobolevs[i])) / (
                 tau_sobolevs[i])
     return beta_sobolevs
Example #22
0
def ea_iter_v(t, t0, p, e, w):
    Ma = mean_anomaly(t, t0, p, e, w)
    ec = e*sin(Ma)/(1.0 - e*cos(Ma))
    for j in prange(len(t)):
        for k in range(15):
            ect   = ec[j]
            ec[j] = e*sin(Ma[j]+ec[j])
            if (abs(ect-ec[j]) < 1e-4):
                break
    Ea  = Ma + ec
    return Ea
Example #23
0
def check_limits(population):
    """
    Check if all individuals of the are bigger than 0, meaning that no negativ exposure is allowed
    :param population: Population matrix
    :return: corrected population
    """
    for i in prange(population.shape[1]):
        for j in range(population.shape[0]):
            if population[j, i] < 0:
                population[j, i] = 0
    return population
Example #24
0
def _fj(j, l, m, a, b):
    """From Handbook of Computational Quantum Chemistry by David B. Cook
    in chapter 7.7.1 -- Essentially a FOILing of the pre-exponential
    cartesian power dependence in one dimension."""
    tot, i, f = 0., max(0, j - m), min(j, l) + 1
    for k in prange(i, f):
        tot += (choose(l, k) *
                choose(m, int(j - k)) *
                a ** (l - k) *
                b ** (m + k - j))
    return tot
Example #25
0
def _nin(l, m, pa, pb, p, N):
    """From Handbook of Computational Quantum Chemistry by David B. Cook
    in chapter 7.7.1 -- Sums the result of _fj over the total angular momentum
    in one dimension."""
    ltot = l + m
    if not ltot: return N
    tot = 0.
    for j in prange(int(ltot // 2 + 1)):
        tot += (_fj(2 * j, l, m, pa, pb) *
                dfac21(j) / (2 * p) ** j)
    return tot * N
Example #26
0
    def simple_prange_shared():
        """
        >>> simple_prange_shared()
        20L
        """
        result = np.empty(1, dtype=np.int64)
        shared = 20

        for i in numba.prange(1):
            result[0] = shared
        return result[0]
Example #27
0
 def simple_prange_private():
     """
     >>> simple_prange_private()
     10L
     """
     result = np.empty(1, dtype=np.int64)
     var = 20
     for i in numba.prange(1):
         var = 10
         result[0] = var
     return result[0]
def kde(X):
    b = 0.5
    points = np.array([-1.0, 2.0, 5.0])
    N = points.shape[0]
    n = X.shape[0]
    exps = 0
    for i in prange(n):
        p = X[i]
        d = (-(p-points)**2)/(2*b**2)
        m = np.min(d)
        exps += m-np.log(b*N)+np.log(np.sum(np.exp(d-m)))
    return exps
Example #29
0
def mutate(arr,sigma,mutation_rate):
    for i in prange(arr.shape[0]):
        if np.random.random() < mutation_rate:
            #mutation = np.random.normal()*sigma
            #if mutation > sigma*1.0:
            #    mutation = sigma
            #if mutation < -sigma*1.0:
            #    mutation = -sigma
            mutation = (np.random.random()-0.5) * sigma
            # mutation = arr[i]*2*(np.random.random()-0.5) * 0.05
            arr[i] = arr[i] + mutation
    return arr
Example #30
0
    def prange_shared_privates_reductions(shared):
        """
        >>> prange_shared_privates_reductions(2.0)
        100.0
        """
        sum = 10.0

        for i in numba.prange(10):
            j = i * shared
            sum += j
        shared = 3.0
        return sum
Example #31
0
def make1Drec(wt, lenwt1d, band, scale2d, nx, ny, nz, mode='haar'):
    # 1d wavelet decomposition

    if mode == 'haar':
        print "Using Haar recomp"
        F = np.array([0.5, 0.5])
        p = 1
        Lo_R = np.sqrt(2) * F / np.sum(F)
        Hi_R = Lo_R[::-1].copy()
        first = 2 - p % 2
        Hi_R[first::2] = -Hi_R[first::2]

    if mode == '9/7':
        print "Using 9/7 recomp"
        Df = np.array([0.0267487574110000,-0.0168641184430000,-0.0782232665290000,0.266864118443000,\
                           0.602949018236000,0.266864118443000,-0.0782232665290000,-0.0168641184430000,\
                           0.0267487574110000])
        Rf = np.array([-0.0456358815570000,-0.0287717631140000,0.295635881557000,0.557543526229000,\
                           0.295635881557000,-0.0287717631140000,-0.0456358815570000])

        lr = len(Rf)
        ld = len(Df)
        lmax = max(lr, ld)
        if lmax % 2:
            lmax += 1
        Rf = np.hstack(
            [np.zeros((lmax - lr) / 2), Rf,
             np.zeros((lmax - lr + 1) / 2)])
        Df = np.hstack(
            [np.zeros((lmax - ld) / 2), Df,
             np.zeros((lmax - ld + 1) / 2)])

        p = 1
        first = 2 - p % 2
        Lo_R1 = np.sqrt(2) * Df / np.sum(Df)
        Hi_R1 = Lo_R1[::-1].copy()
        Hi_R1[first::2] = -Hi_R1[first::2]
        #Hi_D1=Hi_R1[::-1].copy()
        #Lo_D1=Lo_R1[::-1].copy()

        Lo_R2 = np.sqrt(2) * Rf / np.sum(Rf)
        #Hi_R2 = Lo_R2[::-1].copy()
        #Hi_R2[first::2] = -Hi_R2[first::2]
        #Hi_D2=Hi_R2[::-1].copy()
        #Lo_D2=Lo_R2[::-1].copy()
        Lo_R = Lo_R2
        Hi_R = Hi_R1

    (h1, g1) = (Lo_R, Hi_R)  #(h0,g0)=(Lo_D1,Hi_D2)
    tmp = np.zeros((nz * scale2d, nx, ny))
    for bd in prange(scale2d):
        for corx in prange(nx):
            for cory in prange(ny):
                tmpwt = wt[lenwt1d * bd:lenwt1d * (bd + 1), corx, cory].copy()
                sig = tmpwt[:np.int(band[0])].copy()  #JGMOD
                start = np.int(band[0])  #JGMOD
                for sc in np.arange(np.size(band) - 2):
                    last = start + np.int(band[sc + 1])  #JGMOD
                    detail = tmpwt[start:last].copy()
                    lsig = 2 * sig.size
                    s = band[sc + 2]
                    appInt = np.zeros(lsig - 1)
                    appInt[::2] = sig.copy()
                    appInt = fast_convolution(appInt, h1, 1)
                    first = np.int(np.floor(float(np.size(appInt) - s) /
                                            2.))  #JGMOD
                    last = np.int(
                        np.size(appInt) -
                        np.ceil(float(np.size(appInt) - s) / 2.))  #JGMOD
                    appInt = appInt[first:last]
                    detailInt = np.zeros(lsig - 1)
                    detailInt[::2] = detail.copy()
                    detailInt = fast_convolution(detailInt, g1, 1)
                    detailInt = detailInt[first:last]
                    sig = appInt + detailInt
                    start = last
                tmp[bd * nz:(bd + 1) * nz, corx, cory] = sig.copy()
    return tmp
Example #32
0
def _gadf(X_cos, X_sin, n_samples, image_size):
    X_gadf = np.empty((n_samples, image_size, image_size))
    for i in prange(n_samples):
        X_gadf[i] = np.outer(X_sin[i], X_cos[i]) - np.outer(X_cos[i], X_sin[i])
    return X_gadf
Example #33
0
def _process_proximity_line(source_line, x_coords, y_coords, pan_near_x,
                            pan_near_y, is_forward, line_id, width,
                            max_distance, line_proximity, values,
                            distance_metric):

    # Process proximity for a line of pixels in an image
    #
    # source_line: 1d ndarray, input data
    # pan_near_x:  1d ndarray
    # pan_near_y:  1d ndarray
    # is_forward: boolean, will we loop forward through pixel?
    # line_id: np.int64, index of the source_line in the image
    # width: np.int64, image width. It is the number of pixels in the
    #                source_line
    # max_distance: np.float64, maximum distance considered.
    # line_proximity: 1d numpy array of type np.float64,
    #                         calculated proximity from source_line
    # values: 1d numpy array of type np.uint8,
    #                 A list of target pixel values to measure the distance from.
    #                 If this option is not provided proximity will be computed
    #                 from non-zero pixel values.
    #                 Currently pixel values are internally processed as integers
    # Return: 1d numpy array of type np.float64.
    #          Corresponding proximity of source_line.

    start = width - 1
    end = -1
    step = -1
    if is_forward:
        start = 0
        end = width
        step = 1

    n_values = len(values)
    for pixel in prange(start, end, step):
        is_target = False
        # Is the current pixel a target pixel?
        if n_values == 0:
            is_target = source_line[pixel] != 0
        else:
            for i in prange(n_values):
                if source_line[pixel] == values[i]:
                    is_target = True

        if is_target:
            line_proximity[pixel] = 0.0
            pan_near_x[pixel] = pixel
            pan_near_y[pixel] = line_id
            continue

        # Are we near(er) to the closest target to the above (below) pixel?
        near_distance_square = max_distance**2 * 2.0
        if pan_near_x[pixel] != -1:
            # distance_square
            dist_sqr = _distance(x_coords[pan_near_x[pixel]], x_coords[pixel],
                                 y_coords[pan_near_y[pixel]],
                                 y_coords[line_id], distance_metric)
            if dist_sqr < near_distance_square:
                near_distance_square = dist_sqr
            else:
                pan_near_x[pixel] = -1
                pan_near_y[pixel] = -1

        # Are we near(er) to the closest target to the left (right) pixel?
        last = pixel - step
        if pixel != start and pan_near_x[last] != -1:
            dist_sqr = _distance(x_coords[pan_near_x[last]], x_coords[pixel],
                                 y_coords[pan_near_y[last]], y_coords[line_id],
                                 distance_metric)
            if dist_sqr < near_distance_square:
                near_distance_square = dist_sqr
                pan_near_x[pixel] = pan_near_x[last]
                pan_near_y[pixel] = pan_near_y[last]

        #  Are we near(er) to the closest target to the
        #  topright (bottom left) pixel?
        tr = pixel + step
        if tr != end and pan_near_x[tr] != -1:
            dist_sqr = _distance(x_coords[pan_near_x[tr]], x_coords[pixel],
                                 y_coords[pan_near_y[tr]], y_coords[line_id],
                                 distance_metric)
            if dist_sqr < near_distance_square:
                near_distance_square = dist_sqr
                pan_near_x[pixel] = pan_near_x[tr]
                pan_near_y[pixel] = pan_near_y[tr]

        # Update our proximity value.
        if pan_near_x[pixel] != -1 and not np.isnan(source_line[pixel])\
                and max_distance * max_distance >= near_distance_square\
                and (line_proximity[pixel] < 0 or
                     near_distance_square <
                     line_proximity[pixel] * line_proximity[pixel]):
            line_proximity[pixel] = sqrt(near_distance_square)
    return
Example #34
0
 def rbf(X, w):
     result = np.zeros((X.shape[0], ), dtype=np.float32)
     for idx in prange(X.shape[0]):
         result[idx] = -np.linalg.norm(X[idx, :] - w)**2 / (2 * gamma**2)
     return np.exp(result)
def construct_lower(L, U, nrow, ncol):
    for i in prange(ncol):
        for j in range(i, nrow):
            L[j, i] /= U[i, i]
            L[i, i] = 1
Example #36
0
def tensor_conv1d(
    out,
    out_shape,
    out_strides,
    out_size,
    input,
    input_shape,
    input_strides,
    weight,
    weight_shape,
    weight_strides,
    reverse,
):
    """
    1D Convolution implementation.

    Given input tensor of

       `batch, in_channels, width`

    and weight tensor

       `out_channels, in_channels, k_width`

    Computes padded output of

       `batch, out_channels, width`

    `reverse` decides if weight is anchored left (False) or right.
    (See diagrams)

    Args:
        out (array): storage for `out` tensor.
        out_shape (array): shape for `out` tensor.
        out_strides (array): strides for `out` tensor.
        out_size (int): size of the `out` tensor.
        input (array): storage for `input` tensor.
        input_shape (array): shape for `input` tensor.
        input_strides (array): strides for `input` tensor.
        weight (array): storage for `input` tensor.
        weight_shape (array): shape for `input` tensor.
        weight_strides (array): strides for `input` tensor.
        reverse (bool): anchor weight at left or right
    """
    batch_, out_channels, out_width = out_shape
    batch, in_channels, width = input_shape
    out_channels_, in_channels_, kw = weight_shape

    assert (
        batch == batch_
        and in_channels == in_channels_
        and out_channels == out_channels_
    )

    for p in prange(out_size):
        out_index = np.zeros(3, np.int32)
        pos1 = np.zeros(3, np.int32)
        pos2 = np.zeros(3, np.int32)
        count(p, out_shape, out_index)
        cur_batch, cur_out_channels, cur_n = out_index
        # input matrix - out_tensor[cur_batch, cur_out_channels: cur_out_channels + in_channels, cur_n: cur_n + kw]
        # shape of input matrix: (1, in_channels, kw)
        # weight matrix - (1, in_channels, kw)
        v = 0
        for i in range(in_channels):
            for j in range(kw):
                if not reverse:
                    pos1[0] = cur_out_channels
                    pos1[1] = i
                    pos1[2] = j
                    pos2[0] = cur_batch
                    pos2[1] = i
                    pos2[2] = cur_n + j
                    if cur_n + j >= width:
                        v += 0
                    else:
                        v += weight[index_to_position(pos1, weight_strides)] * input[index_to_position(
                            pos2, input_strides)]
                else:
                    if cur_n - j < 0:
                        v += 0
                    else:
                        pos1[0] = cur_out_channels
                        pos1[1] = i
                        pos1[2] = j
                        pos2[0] = cur_batch
                        pos2[1] = i
                        pos2[2] = cur_n - j
                        v += weight[index_to_position(pos1, weight_strides)] * input[index_to_position(
                            pos2, input_strides)]
        out[index_to_position(out_index, out_strides)] = v
Example #37
0
def _astype_numba(arr, result):
    for i in nb.prange(len(arr)):
        # conversion occurs implicitly, and numba only supports conversion
        # between arrays of numeric types.
        result[i] = arr[i]
Example #38
0
def find_node_split(context, sample_indices):
    """For each feature, find the best bin to split on at a given node.

    Returns the best split info among all features, and the histograms of
    all the features. The histograms are computed by scanning the whole
    data.

    Parameters
    ----------
    context : SplittingContext
        The splitting context
    sample_indices : array of int
        The indices of the samples at the node to split.

    Returns
    -------
    best_split_info : SplitInfo
        The info about the best possible split among all features.
    histograms : array of HISTOGRAM_DTYPE, shape=(n_features, max_bins)
        The histograms of each feature. A histogram is an array of
        HISTOGRAM_DTYPE of size ``max_bins`` (only
        ``n_bins_per_features[feature]`` entries are relevant).
    """

    ctx = context  # shorter name to avoid various line breaks
    n_samples = sample_indices.shape[0]

    # Need to declare local variables, else they're not updated
    # (see numba issue 3459)
    ordered_gradients = ctx.ordered_gradients
    ordered_hessians = ctx.ordered_hessians

    # Populate ordered_gradients and ordered_hessians. (Already done for root)
    # Ordering the gradients and hessians helps to improve cache hit.
    # This is a parallelized version of the following vanilla code:
    # for i range(n_samples):
    #     ctx.ordered_gradients[i] = ctx.gradients[samples_indices[i]]
    if sample_indices.shape[0] != ctx.gradients.shape[0]:
        starts, ends, n_threads = get_threads_chunks(n_samples)
        if ctx.constant_hessian:
            for thread_idx in prange(n_threads):
                for i in range(starts[thread_idx], ends[thread_idx]):
                    ordered_gradients[i] = ctx.gradients[sample_indices[i]]
        else:
            for thread_idx in prange(n_threads):
                for i in range(starts[thread_idx], ends[thread_idx]):
                    ordered_gradients[i] = ctx.gradients[sample_indices[i]]
                    ordered_hessians[i] = ctx.hessians[sample_indices[i]]

    ctx.sum_gradients = ctx.ordered_gradients[:n_samples].sum()
    if ctx.constant_hessian:
        ctx.sum_hessians = ctx.constant_hessian_value * float32(n_samples)
    else:
        ctx.sum_hessians = ctx.ordered_hessians[:n_samples].sum()

    # Pre-allocate the results datastructure to be able to use prange:
    # numba jitclass do not seem to properly support default values for kwargs.
    split_infos = [
        SplitInfo(-1., 0, 0, 0., 0., 0., 0., 0, 0)
        for i in range(context.n_features)
    ]
    histograms = np.empty(shape=(np.int64(context.n_features),
                                 np.int64(context.max_bins)),
                          dtype=HISTOGRAM_DTYPE)
    for feature_idx in prange(context.n_features):
        split_info, histogram = _find_histogram_split(context, feature_idx,
                                                      sample_indices)
        split_infos[feature_idx] = split_info
        histograms[feature_idx, :] = histogram

    split_info = _find_best_feature_to_split_helper(split_infos)
    return split_info, histograms
Example #39
0
def find_node_split_subtraction(context, sample_indices, parent_histograms,
                                sibling_histograms):
    """For each feature, find the best bin to split on at a given node.

    Returns the best split info among all features, and the histograms of
    all the features.

    This does the same job as ``find_node_split()`` but uses the histograms
    of the parent and sibling of the node to split. This allows to use the
    identity: ``histogram(parent) = histogram(node) - histogram(sibling)``,
    which is significantly faster than computing the histograms from data.

    Returns the best SplitInfo among all features, along with all the feature
    histograms that can be latter used to compute the sibling or children
    histograms by substraction.

    Parameters
    ----------
    context : SplittingContext
        The splitting context
    sample_indices : array of int
        The indices of the samples at the node to split.
    parent_histograms : array of HISTOGRAM_DTYPE of shape(n_features, max_bins)
        The histograms of the parent
    sibling_histograms : array of HISTOGRAM_DTYPE of \
        shape(n_features, max_bins)
        The histograms of the sibling

    Returns
    -------
    best_split_info : SplitInfo
        The info about the best possible split among all features.
    histograms : array of HISTOGRAM_DTYPE, shape=(n_features, max_bins)
        The histograms of each feature. A histogram is an array of
        HISTOGRAM_DTYPE of size ``max_bins`` (only
        ``n_bins_per_features[feature]`` entries are relevant).
    """

    # We can pick any feature (here the first) in the histograms to
    # compute the gradients: they must be the same across all features
    # anyway, we have tests ensuring this. Maybe a more robust way would
    # be to compute an average but it's probably not worth it.
    context.sum_gradients = (parent_histograms[0]['sum_gradients'].sum() -
                             sibling_histograms[0]['sum_gradients'].sum())

    n_samples = sample_indices.shape[0]
    if context.constant_hessian:
        context.sum_hessians = \
            context.constant_hessian_value * float32(n_samples)
    else:
        context.sum_hessians = (parent_histograms[0]['sum_hessians'].sum() -
                                sibling_histograms[0]['sum_hessians'].sum())

    # Pre-allocate the results datastructure to be able to use prange
    split_infos = [
        SplitInfo(-1., 0, 0, 0., 0., 0., 0., 0, 0)
        for i in range(context.n_features)
    ]
    histograms = np.empty(shape=(np.int64(context.n_features),
                                 np.int64(context.max_bins)),
                          dtype=HISTOGRAM_DTYPE)
    for feature_idx in prange(context.n_features):
        split_info, histogram = _find_histogram_split_subtraction(
            context, feature_idx, parent_histograms, sibling_histograms,
            n_samples)
        split_infos[feature_idx] = split_info
        histograms[feature_idx, :] = histogram

    split_info = _find_best_feature_to_split_helper(split_infos)
    return split_info, histograms
Example #40
0
 def test_impl(t_obj, X):
     for i in prange(t_obj.T):
         X[i] = i
     return X.sum()
Example #41
0
 def sdc_take_array_impl(data, indices):
     res_size = len(indices)
     res_arr = numpy.empty(res_size, dtype=data_dtype)
     for i in numba.prange(res_size):
         res_arr[i] = data[indices[i]]
     return res_arr
Example #42
0
def etimated_cov_mat(vert, eta, eigenvalue_nk, alphalapl, u_nkl_norm):
    expressionAcc = np.zeros((vert, vert))
    for m in prange(0, vert):
        for n in range(0, vert):
            expressionAcc[m][n] = expressionAcc[m][n] + (eta ** 2) * (eigenvalue_nk ** (-alphalapl)) * u_nkl_norm[m] * u_nkl_norm[n]
    return expressionAcc
Example #43
0
 def test_impl(data):
     N = data.shape[0]
     sums = np.zeros(N)
     for i in prange(N):
         sums[i] = np.sum(data[np.int32(0):np.int32(1)])
     return sums
Example #44
0
def make1Dtf(tmp, scale1d, scale2d, nx, ny, nz, size, lenwt1d, mode='haar'):
    # 1d wavelet decomposition
    scale = scale1d

    if mode == 'haar':
        print "Using Haar decomp"
        F = np.array([0.5, 0.5])
        p = 1
        Lo_R = np.sqrt(2) * F / np.sum(F)
        Hi_R = Lo_R[::-1].copy()
        first = 2 - p % 2
        Hi_R[first::2] = -Hi_R[first::2]
        Hi_D = Hi_R[::-1].copy()
        Lo_D = Lo_R[::-1].copy()

    if mode == '9/7':
        print "Using 9/7 decomp"
        Df = np.array([0.0267487574110000,-0.0168641184430000,-0.0782232665290000,0.266864118443000,\
                           0.602949018236000,0.266864118443000,-0.0782232665290000,-0.0168641184430000,\
                           0.0267487574110000])
        Rf = np.array([-0.0456358815570000,-0.0287717631140000,0.295635881557000,0.557543526229000,\
                           0.295635881557000,-0.0287717631140000,-0.0456358815570000])

        lr = len(Rf)
        ld = len(Df)
        lmax = max(lr, ld)
        if lmax % 2:
            lmax += 1
        Rf = np.hstack(
            [np.zeros((lmax - lr) / 2), Rf,
             np.zeros((lmax - lr + 1) / 2)])
        Df = np.hstack(
            [np.zeros((lmax - ld) / 2), Df,
             np.zeros((lmax - ld + 1) / 2)])

        p = 1
        first = 2 - p % 2
        Lo_R1 = np.sqrt(2) * Df / np.sum(Df)
        Hi_R1 = Lo_R1[::-1].copy()
        Hi_R1[first::2] = -Hi_R1[first::2]
        Hi_D1 = Hi_R1[::-1].copy()
        Lo_D1 = Lo_R1[::-1].copy()

        Lo_R2 = np.sqrt(2) * Rf / np.sum(Rf)
        Hi_R2 = Lo_R2[::-1].copy()
        Hi_R2[first::2] = -Hi_R2[first::2]
        Hi_D2 = Hi_R2[::-1].copy()
        Lo_D2 = Lo_R2[::-1].copy()
        Lo_D = Lo_D1
        Hi_D = Hi_D2

    (h0, g0) = (Lo_D, Hi_D)

    lf = h0.size
    band = np.zeros(scale + 1)
    band[-1] = size
    end = size
    start = 1

    wt2d1d = np.zeros((scale2d * lenwt1d, nx, ny), dtype=np.float64)
    #print wt2d1d.shape
    wt = np.array([], dtype=np.float64)
    for bd in prange(scale2d):
        for corx in prange(nx):
            for cory in prange(ny):
                x = tmp[bd * nz:(bd + 1) * nz, corx, cory].copy()
                #pdb.set_trace()
                wt = np.array([], dtype=np.float64)
                #print "HELLO"
                for sc in np.arange(scale - 1):
                    #print "Hello in scale",sc
                    lsig = x.size
                    end = lsig + lf - 1
                    lenExt = lf - 1
                    #xExt=np.array([1,2,3],dtype=np.float64)
                    #                    xExt = np.lib.pad(x, (lenExt,lenExt), 'symmetric')
                    #xExt=np.concatenate((x[lenExt-1::-1],x,x[-lenExt+1:-lenExt-1:-1]),axis=0)
                    xExt = np.concatenate(
                        (x[0:lenExt][::-1], x, x[::-1][0:lenExt]), axis=0)
                    #print x
                    #print xExt
                    #pdb.set_trace()
                    app = fast_convolution(xExt, h0,
                                           0)  #np.convolve(xExt,h0,'valid')
                    x = app[start:end:2].copy()
                    detail = fast_convolution(xExt, g0,
                                              0)  #np.convolve(xExt,g0,'valid')
                    #print app
                    #print detail
                    #pdb.set_trace()
                    #detail=np.array([1,2,3,4,5])
                    wt = np.hstack((detail[start:end:2], wt))
                    band[-2 - sc] = len(detail[start:end:2])
                #print wt
#print wt
#pdb.set_trace()
                wt = np.hstack((x, wt))
                band[0] = len(x)
                #print "wt=",wt.shape
                #pdb.set_trace()
                index1 = lenwt1d * bd
                index2 = lenwt1d * (bd + 1)
                a = wt.copy()
                wt2d1d[index1:index2, corx, cory] = a
#print index1,index2,corx,cory,wt2d1d[index1:index2,corx,cory]
#print "Before return in make1Dtf"

    return (wt2d1d, band)
Example #45
0
    def base_calc_semblance(
            calc_nmo_func,
            seismogram,
            times,
            offsets,
            velocity,
            sample_rate,  # pylint: disable=too-many-arguments
            win_size,
            t_min,
            t_max):
        """ Calculate semblance for specified velocity in the preset time window from `t_min` to `t_max`.

        Parameters
        ----------
        calc_nmo_func : njitted callable
            Callable that calculates normal moveout corrected seismogram for specified time and velocity values
            and range of offsets.
        seismogram : np.ndarray
            Data for calculating semblance.
        times : array-like
            An array containing the recording time for each trace value.
        offsets : array-like
            The distance from the source to the receiver for each trace.
        velocity : array-like
            Velocity value for semblance computation.
        sample_rate : int
            Step in milliseconds between signal amplitude measurements during shooting.
        _win_size : int
            Window size for smoothing the semblance.
            Measured in samples.
        t_min : int
            Time value to start compute semblance from.
            Measured in samples.
        t_max : int
            The last time value for semblance computation.
            Measured in samples.

        Returns
        -------
        slice_semblance : 1d array
            Semblance values for a specified `veloicty` in time range from `t_min` to `t_max`.
        """
        t_win_size_min = max(0, t_min - win_size)
        t_win_size_max = min(len(times) - 1, t_max + win_size)

        nmo = np.empty(
            (t_win_size_max - t_win_size_min + 1, seismogram.shape[1]))
        for i in prange(t_win_size_min, t_win_size_max):
            nmo[i - t_win_size_min] = calc_nmo_func(seismogram, times[i],
                                                    offsets, velocity,
                                                    sample_rate)

        numerator = np.sum(nmo, axis=1)**2
        denominator = np.sum(nmo**2, axis=1)
        slice_semblance = np.zeros(t_max - t_min)
        for t in prange(t_min, t_max):
            t_rel = t - t_win_size_min
            ix_from = max(0, t_rel - win_size)
            ix_to = min(len(nmo) - 1, t_rel + win_size)
            slice_semblance[t - t_min] = (
                np.sum(numerator[ix_from:ix_to]) /
                (len(offsets) * np.sum(denominator[ix_from:ix_to]) + 1e-6))
        return slice_semblance
Example #46
0
def tensor_conv2d(
    out,
    out_shape,
    out_strides,
    out_size,
    input,
    input_shape,
    input_strides,
    weight,
    weight_shape,
    weight_strides,
    reverse,
):
    """
    2D Convolution implementation.

    Given input tensor of

       `batch, in_channels, height, width`

    and weight tensor

       `out_channels, in_channels, k_height, k_width`

    Computes padded output of

       `batch, out_channels, height, width`

    `Reverse` decides if weight is anchored top-left (False) or bottom-right.
    (See diagrams)


    Args:
        out (array): storage for `out` tensor.
        out_shape (array): shape for `out` tensor.
        out_strides (array): strides for `out` tensor.
        out_size (int): size of the `out` tensor.
        input (array): storage for `input` tensor.
        input_shape (array): shape for `input` tensor.
        input_strides (array): strides for `input` tensor.
        weight (array): storage for `input` tensor.
        weight_shape (array): shape for `input` tensor.
        weight_strides (array): strides for `input` tensor.
        reverse (bool): anchor weight at top-left or bottom-right
    """
    batch_, out_channels, _, _ = out_shape
    batch, in_channels, height, width = input_shape
    out_channels_, in_channels_, kh, kw = weight_shape

    assert (
        batch == batch_
        and in_channels == in_channels_
        and out_channels == out_channels_
    )

    for p in prange(out_size):
        out_index = np.zeros(4, np.int32)
        pos1 = np.zeros(4, np.int32)
        pos2 = np.zeros(4, np.int32)
        count(p, out_shape, out_index)
        cur_batch, cur_out_channels, cur_h, cur_w = out_index
        v = 0

        for i in range(in_channels):
            for j in range(kh):
                for k in range(kw):
                    if not reverse:
                        pos1[0] = cur_out_channels
                        pos1[1] = i
                        pos1[2] = j
                        pos1[3] = k
                        pos2[0] = cur_batch
                        pos2[1] = i
                        pos2[2] = cur_h + j
                        pos2[3] = cur_w + k
                        if cur_h + j >= height or cur_w + k >= width:
                            v += 0
                        else:
                            v += weight[index_to_position(pos1, weight_strides)] * input[index_to_position(
                                pos2, input_strides)]
                    else:
                        pos1[0] = cur_out_channels
                        pos1[1] = i
                        pos1[2] = j
                        pos1[3] = k
                        pos2[0] = cur_batch
                        pos2[1] = i
                        pos2[2] = cur_h - j
                        pos2[3] = cur_w - k
                        if cur_h - j < 0 or cur_w - k < 0:
                            v += 0
                        else:
                            v += weight[index_to_position(pos1, weight_strides)] * input[index_to_position(
                                pos2, input_strides)]
        out[index_to_position(out_index, out_strides)] = v
Example #47
0
def split_indices(context, split_info, sample_indices):
    """Split samples into left and right arrays.

    Parameters
    ----------
    context : SplittingContext
        The splitting context
    split_ingo : SplitInfo
        The SplitInfo of the node to split
    sample_indices : array of int
        The indices of the samples at the node to split. This is a view on
        context.partition, and it is modified inplace by placing the indices
        of the left child at the beginning, and the indices of the right child
        at the end.

    Returns
    -------
    left_indices : array of int
        The indices of the samples in the left child. This is a view on
        context.partition.
    right_indices : array of int
        The indices of the samples in the right child. This is a view on
        context.partition.
    """
    # This is a multi-threaded implementation inspired by lightgbm.
    # Here is a quick break down. Let's suppose we want to split a node with
    # 24 samples named from a to x. context.partition looks like this (the *
    # are indices in other leaves that we don't care about):
    # partition = [*************abcdefghijklmnopqrstuvwx****************]
    #                           ^                       ^
    #                     node_position     node_position + node.n_samples

    # Ultimately, we want to reorder the samples inside the boundaries of the
    # leaf (which becomes a node) to now represent the samples in its left and
    # right child. For example:
    # partition = [*************abefilmnopqrtuxcdghjksvw*****************]
    #                           ^              ^
    #                   left_child_pos     right_child_pos
    # Note that left_child_pos always takes the value of node_position, and
    # right_child_pos = left_child_pos + left_child.n_samples. The order of
    # the samples inside a leaf is irrelevant.

    # 1. samples_indices is a view on this region a..x. We conceptually
    #    divide it into n_threads regions. Each thread will be responsible for
    #    its own region. Here is an example with 4 threads:
    #    samples_indices = [abcdef|ghijkl|mnopqr|stuvwx]
    # 2. Each thread processes 6 = 24 // 4 entries and maps them into
    #    left_indices_buffer or right_indices_buffer. For example, we could
    #    have the following mapping ('.' denotes an undefined entry):
    #    - left_indices_buffer =  [abef..|il....|mnopqr|tux...]
    #    - right_indices_buffer = [cd....|ghjk..|......|svw...]
    # 3. We keep track of the start positions of the regions (the '|') in
    #    ``offset_in_buffers`` as well as the size of each region. We also keep
    #    track of the number of samples put into the left/right child by each
    #    thread. Concretely:
    #    - left_counts =  [4, 2, 6, 3]
    #    - right_counts = [2, 4, 0, 3]
    # 4. Finally, we put left/right_indices_buffer back into the
    #    samples_indices, without any undefined entries and the partition looks
    #    as expected
    #    partition = [*************abefilmnopqrtuxcdghjksvw*****************]

    # Note: We here show left/right_indices_buffer as being the same size as
    # sample_indices for simplicity, but in reality they are of the same size
    # as partition.

    X_binned = context.X_binned.T[split_info.feature_idx]

    n_threads = numba.config.NUMBA_DEFAULT_NUM_THREADS
    n_samples = sample_indices.shape[0]

    # Note: we could probably allocate all the arrays of size n_threads in the
    # splitting context as well, but gains are probably going to be minimal
    sizes = np.full(n_threads, n_samples // n_threads, dtype=np.int32)
    if n_samples % n_threads > 0:
        # array[:0] will cause a bug in numba 0.41 so we need the if. Remove
        # once issue numba 3554 is fixed.
        sizes[:n_samples % n_threads] += 1
    offset_in_buffers = np.zeros(n_threads, dtype=np.int32)
    offset_in_buffers[1:] = np.cumsum(sizes[:-1])

    left_counts = np.empty(n_threads, dtype=np.int32)
    right_counts = np.empty(n_threads, dtype=np.int32)

    # Need to declare local variables, else they're not updated :/
    # (see numba issue 3459)
    left_indices_buffer = context.left_indices_buffer
    right_indices_buffer = context.right_indices_buffer

    # map indices from samples_indices to left/right_indices_buffer
    for thread_idx in prange(n_threads):
        left_count = 0
        right_count = 0

        start = offset_in_buffers[thread_idx]
        stop = start + sizes[thread_idx]
        for i in range(start, stop):
            sample_idx = sample_indices[i]
            if X_binned[sample_idx] <= split_info.bin_idx:
                left_indices_buffer[start + left_count] = sample_idx
                left_count += 1
            else:
                right_indices_buffer[start + right_count] = sample_idx
                right_count += 1

        left_counts[thread_idx] = left_count
        right_counts[thread_idx] = right_count

    # position of right child = just after the left child
    right_child_position = left_counts.sum()

    # offset of each thread in samples_indices for left and right child, i.e.
    # where each thread will start to write.
    left_offset = np.zeros(n_threads, dtype=np.int32)
    left_offset[1:] = np.cumsum(left_counts[:-1])
    right_offset = np.full(n_threads, right_child_position, dtype=np.int32)
    right_offset[1:] += np.cumsum(right_counts[:-1])

    # map indices in left/right_indices_buffer back into samples_indices. This
    # also updates context.partition since samples_indice is a view.
    for thread_idx in prange(n_threads):

        for i in range(left_counts[thread_idx]):
            sample_indices[left_offset[thread_idx] + i] = \
                left_indices_buffer[offset_in_buffers[thread_idx] + i]
        for i in range(right_counts[thread_idx]):
            sample_indices[right_offset[thread_idx] + i] = \
                right_indices_buffer[offset_in_buffers[thread_idx] + i]

    return (sample_indices[:right_child_position],
            sample_indices[right_child_position:])
Example #48
0
def fastfield(coordinates, r_p, k, phase, ab, result, bohren, cartesian):
    '''
    Returns the field scattered by the particle at each coordinate

    Arguments
    ----------
    coordinates : numpy.ndarray of dtype numpy.complex128
        [3, npts] coordinate system for scattered field calculation
    r_p : numpy.ndarray
        [3] position of scatterer
    k : float
        Wavenumber of the light in medium of refractive index n_m
    phase : np.complex128
        Complex exponential phase to attach to Lorenz-Mie scattering
        function. See equation XXX
    ab : numpy.ndarray of dtype numpy.complex128
        [2, norders] Mie scattering coefficients
    result : numpy.ndarray of dtype numpy.complex128
        [3, npts] buffer for final scattered field
    cartesian : bool
        If set, return field projected onto Cartesian coordinates.
        Otherwise, return polar projection.
    bohren : bool
        If set, use sign convention from Bohren and Huffman.
        Otherwise, use opposite sign convention.
    '''
    length = coordinates.shape[1]

    norders = ab.shape[0]  # number of partial waves in sum

    # GEOMETRY
    # 1. particle displacement [pixel]
    # Note: The sign convention used here is appropriate
    # for illumination propagating in the -z direction.
    # This means that a particle forming an image in the
    # focal plane (z = 0) is located at positive z.
    # Accounting for this by flipping the axial coordinate
    # is equivalent to using a mirrored (left-handed)
    # coordinate system.

    for idx in prange(length):
        kx = k * (coordinates[0, idx] - r_p[0])
        ky = k * (coordinates[1, idx] - r_p[1])
        kz = k * (coordinates[2, idx] - r_p[2])
        # 2. geometric factors
        kz *= -1.  # z convention
        krho = math.sqrt(kx**2 + ky**2)
        kr = math.sqrt(krho**2 + kz**2)

        theta = math.atan2(krho, kz)
        phi = math.atan2(ky, kx)
        sintheta = math.sin(theta)
        costheta = math.cos(theta)
        sinphi = math.sin(phi)
        cosphi = math.cos(phi)
        sinkr = math.sin(kr)
        coskr = math.cos(kr)

        # SPECIAL FUNCTIONS
        # starting points for recursive function evaluation ...
        # 1. Riccati-Bessel radial functions, page 478.
        # Particles above the focal plane create diverging waves
        # described by Eq. (4.13) for $h_n^{(1)}(kr)$. These have z > 0.
        # Those below the focal plane appear to be converging from the
        # perspective of the camera. They are descrinbed by Eq. (4.14)
        # for $h_n^{(2)}(kr)$, and have z < 0. We can select the
        # appropriate case by applying the correct sign of the imaginary
        # part of the starting functions...
        if kz > 0:
            factor = 1. * 1.j
        elif kz < 0:
            factor = -1. * 1.j
        else:
            factor = 0. * 1.j
        if not bohren:
            factor = -1. * factor

        xi_nm2 = coskr + factor * sinkr  # \xi_{-1}(kr)
        xi_nm1 = sinkr - factor * coskr  # \xi_0(kr)

        # 2. Angular functions (4.47), page 95
        pi_nm1 = 0.  # \pi_0(\cos\theta)
        pi_n = 1.  # \pi_1(\cos\theta)

        # 3. Vector spherical harmonics: [r,theta,phi]
        mo1nr = 0.j
        mo1nt = 0.j
        mo1np = 0.j
        ne1nr = 0.j
        ne1nt = 0.j
        ne1np = 0.j

        # storage for scattered field
        esr = 0.j
        est = 0.j
        esp = 0.j

        # COMPUTE field by summing partial waves
        for n in range(1, norders):
            n = np.float64(n)
            # upward recurrences ...
            # 4. Legendre factor (4.47)
            # Method described by Wiscombe (1980)

            swisc = pi_n * costheta
            twisc = swisc - pi_nm1
            tau_n = pi_nm1 - n * twisc  # -\tau_n(\cos\theta)

            # ... Riccati-Bessel function, page 478
            xi_n = (2. * n - 1.) * \
                (xi_nm1 / kr) - xi_nm2  # \xi_n(kr)

            # ... Deirmendjian's derivative
            dn = (n * xi_n) / kr - xi_nm1

            # vector spherical harmonics (4.50)
            mo1nt = pi_n * xi_n  # ... divided by cosphi/kr
            mo1np = tau_n * xi_n  # ... divided by sinphi/kr

            # ... divided by cosphi sintheta/kr^2
            ne1nr = n * (n + 1.) * pi_n * xi_n
            ne1nt = tau_n * dn  # ... divided by cosphi/kr
            ne1np = pi_n * dn  # ... divided by sinphi/kr

            mod = n % 4
            if mod == 1:
                fac = 1.j
            elif mod == 2:
                fac = -1. + 0.j
            elif mod == 3:
                fac = -0. - 1.j
            else:
                fac = 1. + 0.j

            # prefactor, page 93
            en = fac * (2. * n + 1.) / \
                n / (n + 1.)

            # the scattered field in spherical coordinates (4.45)
            esr += (1.j * en * ab[int(n), 0]) * ne1nr
            est += (1.j * en * ab[int(n), 0]) * ne1nt
            esp += (1.j * en * ab[int(n), 0]) * ne1np
            esr -= (en * ab[int(n), 1]) * mo1nr
            est -= (en * ab[int(n), 1]) * mo1nt
            esp -= (en * ab[int(n), 1]) * mo1np

            # upward recurrences ...
            # ... angular functions (4.47)
            # Method described by Wiscombe (1980)
            pi_nm1 = pi_n
            pi_n = swisc + ((n + 1.) / n) * twisc

            # ... Riccati-Bessel function
            xi_nm2 = xi_nm1
            xi_nm1 = xi_n

        # n: multipole sum

        # geometric factors were divided out of the vector
        # spherical harmonics for accuracy and efficiency ...
        # ... put them back at the end.
        radialfactor = 1. / kr
        esr *= cosphi * sintheta * radialfactor**2
        est *= cosphi * radialfactor
        esp *= sinphi * radialfactor

        # By default, the scattered wave is returned in spherical
        # coordinates.  Project components onto Cartesian coordinates.
        # Assumes that the incident wave propagates along z and
        # is linearly polarized along x

        if cartesian:
            ecx = esr * sintheta * cosphi
            ecx += est * costheta * cosphi
            ecx -= esp * sinphi

            ecy = esr * sintheta * sinphi
            ecy += est * costheta * sinphi
            ecy += esp * cosphi
            ecz = (esr * costheta - est * sintheta)
            result[0, idx] += ecx * phase
            result[1, idx] += ecy * phase
            result[2, idx] += ecz * phase
        else:
            result[0, idx] += esr * phase
            result[1, idx] += est * phase
            result[2, idx] += esp * phase
Example #49
0
def inner(u, v):
    sumUV = 0
    for i in nb.prange(len(u)):
        sumUV += u[i] * v[i]
    return sumUV
Example #50
0
def monte_carlo_slip_tendency(pole, pole_unc, stress_tensor, stress_unc, axis, axis_unc, pf, mu,
                              mu_unc, pf_l=0.5, pf_u=2.5):
    """
    Computes fault slip tendency for a specific planar node defined by a pole.
    Parameters
    ----------
    pole : numpy.ndarray
        Pole to fault plane (1x3)
    pole_unc : numpy.ndarray
        Pole uncertainty (1x3)
    stress_tensor : numpy.ndarray
        3x3 array with principal stresses
    stress_unc : numpy.ndarray
        3x3 array with stress uncertainty
    axis : numpy.ndarray
        1x3 vector with sigma-1 orientation
    axis_unc : numpy.ndarray
        uncertainty for sigma-1 orientation
    pf : float
        Best Guess pore fluid pressure at depth
    mu : float
        Coeffecient of static friction for fault
    mu_unc : float
        Uncertainty for mu

    Returns
    -------
    out_data : numpy.ndarray
        nx3 array [fluid pressure, mu, slip tendency]
    """

    # n_sims = 10000
    # pf_range = 25.
    # lower_pf = -0.2
    # upper_pf = 5.9
    # lower_pf = 0.
    # upper_pf = 5.
    upper_pf = pf_u
    lower_pf = pf_l
    # initialize uncertainty bounds
    princ_stress_vec = np.array([stress_tensor[0, 0], stress_tensor[1, 1], stress_tensor[2, 2]])
    princ_stress_unc = np.array([stress_unc[0, 0], stress_unc[1, 1], stress_unc[2, 2]])

    # main simulation loop
    out_data = np.empty((nsims, 3))
    for i in numba.prange(nsims):
        pole_rand = np.random.randn(3)
        pole1 = (pole_unc * pole_rand) + pole
        pole1 = pole1 / np.linalg.norm(pole1)

        stress_rand = np.random.randn(3)
        stress_rand = (princ_stress_unc * stress_rand) + princ_stress_vec

        stress1 = stress_rand * np.identity(3)

        axis_rand = np.random.randn(3)
        axis1 = (axis_unc * axis_rand) + axis
        axis1 = axis1 / np.linalg.norm(axis1)
        # pf1 = np.random.random() * (pf + pf_range)
        hydro1 = pf - lower_pf
        hydro2 = pf + upper_pf
        pf1 = (hydro2 - hydro1) * np.random.random() + hydro1
        mu1 = (np.random.randn() * mu_unc) + mu

        plane_stress = rotate_plane_stress(axis1, pole1, stress1)
        pole_N_centered = np.array([1., 0., 0.])
        pole_N_centered = pole_N_centered / np.linalg.norm(pole_N_centered)
        # Leave this in for SHAME
        sigma_n = plane_stress[0, 0]
        sigma_tyx = plane_stress[1, 0]
        sigma_tzx = plane_stress[2, 0]
        sigma_t = np.sqrt((sigma_tyx ** 2) + (sigma_tzx ** 2))
        # sigma_n_vec = plane_stress @ pole_N_centered
        # sigma_n = np.sqrt(sigma_n_vec.dot(sigma_n_vec))
        # sigma_t_mat = np.sqrt(np.abs(plane_stress @ plane_stress) - (sigma_n ** 2))
        # sigma_t = np.nansum(sigma_t_mat[:])
        # slip_tendency = sigma_t / sigma_n
        sigma_n_eff = sigma_n - pf1
        slip_tendency_eff = sigma_t / sigma_n_eff
        out_data[i, 0] = pf1
        out_data[i, 1] = mu1
        out_data[i, 2] = slip_tendency_eff
    return out_data
Example #51
0
def transform(X, parameters):

    num_examples, input_length = X.shape

    dilations, num_features_per_dilation, biases = parameters

    # equivalent to:
    # >>> from itertools import combinations
    # >>> indices = np.array([_ for _ in combinations(np.arange(9), 3)], dtype = np.int32)
    indices = np.array(
        (0, 1, 2, 0, 1, 3, 0, 1, 4, 0, 1, 5, 0, 1, 6, 0, 1, 7, 0, 1, 8, 0, 2,
         3, 0, 2, 4, 0, 2, 5, 0, 2, 6, 0, 2, 7, 0, 2, 8, 0, 3, 4, 0, 3, 5, 0,
         3, 6, 0, 3, 7, 0, 3, 8, 0, 4, 5, 0, 4, 6, 0, 4, 7, 0, 4, 8, 0, 5, 6,
         0, 5, 7, 0, 5, 8, 0, 6, 7, 0, 6, 8, 0, 7, 8, 1, 2, 3, 1, 2, 4, 1, 2,
         5, 1, 2, 6, 1, 2, 7, 1, 2, 8, 1, 3, 4, 1, 3, 5, 1, 3, 6, 1, 3, 7, 1,
         3, 8, 1, 4, 5, 1, 4, 6, 1, 4, 7, 1, 4, 8, 1, 5, 6, 1, 5, 7, 1, 5, 8,
         1, 6, 7, 1, 6, 8, 1, 7, 8, 2, 3, 4, 2, 3, 5, 2, 3, 6, 2, 3, 7, 2, 3,
         8, 2, 4, 5, 2, 4, 6, 2, 4, 7, 2, 4, 8, 2, 5, 6, 2, 5, 7, 2, 5, 8, 2,
         6, 7, 2, 6, 8, 2, 7, 8, 3, 4, 5, 3, 4, 6, 3, 4, 7, 3, 4, 8, 3, 5, 6,
         3, 5, 7, 3, 5, 8, 3, 6, 7, 3, 6, 8, 3, 7, 8, 4, 5, 6, 4, 5, 7, 4, 5,
         8, 4, 6, 7, 4, 6, 8, 4, 7, 8, 5, 6, 7, 5, 6, 8, 5, 7, 8, 6, 7, 8),
        dtype=np.int32).reshape(84, 3)

    num_kernels = len(indices)
    num_dilations = len(dilations)

    num_features = num_kernels * np.sum(num_features_per_dilation)

    features = np.zeros((num_examples, num_features), dtype=np.float32)

    for example_index in prange(num_examples):

        _X = X[example_index]

        A = -_X  # A = alpha * X = -X
        G = _X + _X + _X  # G = gamma * X = 3X

        feature_index_start = 0

        for dilation_index in range(num_dilations):

            _padding0 = dilation_index % 2

            dilation = dilations[dilation_index]
            padding = ((9 - 1) * dilation) // 2

            num_features_this_dilation = num_features_per_dilation[
                dilation_index]

            C_alpha = np.zeros(input_length, dtype=np.float32)
            C_alpha[:] = A

            C_gamma = np.zeros((9, input_length), dtype=np.float32)
            C_gamma[9 // 2] = G

            start = dilation
            end = input_length - padding

            for gamma_index in range(9 // 2):

                C_alpha[-end:] = C_alpha[-end:] + A[:end]
                C_gamma[gamma_index, -end:] = G[:end]

                end += dilation

            for gamma_index in range(9 // 2 + 1, 9):

                C_alpha[:-start] = C_alpha[:-start] + A[start:]
                C_gamma[gamma_index, :-start] = G[start:]

                start += dilation

            for kernel_index in range(num_kernels):

                feature_index_end = feature_index_start + num_features_this_dilation

                _padding1 = (_padding0 + kernel_index) % 2

                index_0, index_1, index_2 = indices[kernel_index]

                C = C_alpha + C_gamma[index_0] + C_gamma[index_1] + C_gamma[
                    index_2]

                if _padding1 == 0:
                    for feature_count in range(num_features_this_dilation):
                        features[example_index,
                                 feature_index_start + feature_count] = _PPV(
                                     C, biases[feature_index_start +
                                               feature_count]).mean()
                else:
                    for feature_count in range(num_features_this_dilation):
                        features[example_index,
                                 feature_index_start + feature_count] = _PPV(
                                     C[padding:-padding],
                                     biases[feature_index_start +
                                            feature_count]).mean()

                feature_index_start = feature_index_end

    return features
Example #52
0
def _proximity(img, x_coords, y_coords, target_values, distance_metric):

    height, width = img.shape

    max_distance = x_coords[width - 1] + y_coords[height - 1]

    pan_near_x = np.zeros(width, dtype=np.int64)
    pan_near_y = np.zeros(width, dtype=np.int64)

    # output of the function
    img_proximity = np.zeros(shape=(height, width), dtype=np.float64)

    # Loop from top to bottom of the image.
    for i in prange(width):
        pan_near_x[i] = -1
        pan_near_y[i] = -1

    scan_line = np.zeros(width, dtype=img.dtype)

    for line in prange(height):
        # Read for target values.
        for i in prange(width):
            scan_line[i] = img[line][i]

        line_proximity = np.zeros(width, dtype=np.float64)
        for i in prange(width):
            line_proximity[i] = -1.0

        # left to right
        _process_proximity_line(scan_line, x_coords, y_coords, pan_near_x,
                                pan_near_y, True, line, width, max_distance,
                                line_proximity, target_values, distance_metric)

        # right to left
        _process_proximity_line(scan_line, x_coords, y_coords, pan_near_x,
                                pan_near_y, False, line, width, max_distance,
                                line_proximity, target_values, distance_metric)

        for i in prange(width):
            img_proximity[line][i] = line_proximity[i]

    # Loop from bottom to top of the image.
    for i in prange(width):
        pan_near_x[i] = -1
        pan_near_y[i] = -1

    for line in prange(height - 1, -1, -1):
        # Read first pass proximity.
        for i in prange(width):
            line_proximity[i] = img_proximity[line][i]

        # Read pixel target_values.
        for i in prange(width):
            scan_line[i] = img[line][i]

        # Right to left
        _process_proximity_line(scan_line, x_coords, y_coords, pan_near_x,
                                pan_near_y, False, line, width, max_distance,
                                line_proximity, target_values, distance_metric)

        # Left to right
        _process_proximity_line(scan_line, x_coords, y_coords, pan_near_x,
                                pan_near_y, True, line, width, max_distance,
                                line_proximity, target_values, distance_metric)

        # final post processing of distances
        for i in prange(width):
            if line_proximity[i] < 0 or np.isnan(scan_line[i]):
                # this corresponds the the nan value of input raster.
                line_proximity[i] = np.nan

        for i in prange(width):
            img_proximity[line][i] = line_proximity[i]
    return img_proximity
Example #53
0
def _outer_dot(v, X, n_samples, window_size, n_windows):
    X_new = np.empty((n_samples, window_size, window_size, n_windows))
    for i in prange(n_samples):
        for j in prange(window_size):
            X_new[i, j] = np.dot(np.outer(v[i, :, j], v[i, :, j]), X[i])
    return X_new
Example #54
0
def calc_trans(data_i_s_):
    for i in nb.prange(num_pop_assigned):
        for t in range(1, sim_time):
            data_i_s_[i, t] = transit(data_i_s_[i, t-1], data_rnd[i, t])
Example #55
0
def define_principal_stresses(sv1, depth, shmin1, shmax1, hminaz, hmaxaz, sv_unc=10, shmin_unc=10, shmax_unc=10,
                              v_tilt_unc=10, h_az_unc=10, is_3d=False):
    """
    Generates cauchy stress tensor from principal stress directions, assumes vertical stress in one direction
    rotates sigma1 (maximum principal stress) direction to plane normal

    Parameters
    ----------
    sv : float
        vertical stress at depth
    depth : float
        Depth of analysis (sv = sv_grad * depth)
    shmin : float
        minimum horizontal stress at depth
    shmax : float
        maximum horizontal stress at depth
    hminaz : float
        minimum horizontal stress direction
    hmaxaz : float
        maximum horizontal stress direction
    sv_unc : float
        Vertical stress uncertainty at depth
    shmax_unc : float
        maximum horizontal stress uncertainty
    shmin_unc : float
        Minimum horizontal stress uncertainty
    v_tilt_unc : float
        tilt uncertainty for vertical stress
    h_az_unc : float
        Horizontal stress orientation uncertainty
    is_3d : bool
        Is this model 3d? returns gradients if so

    Returns
    -------
    princ_stress_tensor :  numpy.ndarray
        3x3 stress tensor aligned to principal stress orientations
    princ_stress_unc : numpy.ndarray
    axis : numpy.ndarray
        Sigma-1 stress unit axis (which is rotated to align with plane for normal / shear stress analysis)
    axis_unc : numpy.ndarray
        uncertainty for sigma-1 unit axis
    axis_out : numpy.ndarray

    """
    nsim = nsims
    h_az_unc = math.radians(h_az_unc)
    v_tilt_unc = math.radians(v_tilt_unc)
    if round(abs(hmaxaz - hminaz), 0) != 90.:
        raise ValueError('hmin and hmax are not orthogonal')
    # TODO: Truly fix azimuth issue with stress. Currently add 90 degrees to max stress direction.
    # hmaxaz = math.radians(hmaxaz) + (math.pi/2)
    hmaxaz = math.radians(hmaxaz)

    if is_3d:
        sv1 = sv1 / depth
        shmax1 = shmax1 / depth
        shmin1 = shmin1 / depth
        DeprecationWarning("Stresses should be provided in terms of gradients. Otherwise may break things.")
    axis_out = np.zeros((nsim, 3))
    stress_out = np.zeros((nsim, 3))
    for i in numba.prange(nsim):
        sv = sv_unc * np.random.randn() + sv1
        shmax = shmax_unc * np.random.randn() + shmax1
        shmin = shmin_unc * np.random.randn() + shmin1

        if sv1 > shmax1 > shmin1:
            sigma1 = sv
            sigma2 = shmax
            sigma3 = shmin
            az_rad = h_az_unc * np.random.randn() + hmaxaz
            az_dip_rad = v_tilt_unc * np.random.randn() + math.pi / 2.
            rotated_axis = np.asarray([math.sin(az_rad) * math.cos(az_dip_rad), math.cos(az_rad) * math.cos(az_dip_rad),
                                       math.sin(az_dip_rad)])

        else:
            sigma1 = shmax
            az_rad = h_az_unc * np.random.randn() + hmaxaz
            az_dip_rad = v_tilt_unc * np.random.randn() + 0.  # average dip is 0
            if shmax1 > sv1 > shmin1:
                sigma2 = sv
                sigma3 = shmin
                rotated_axis = np.asarray(
                    [math.sin(az_rad) * math.cos(az_dip_rad), math.cos(az_rad) * math.cos(az_dip_rad),
                     math.sin(az_dip_rad)])
            elif shmax1 > shmin1 > sv1:
                sigma2 = shmin
                sigma3 = sv
                rotated_axis = np.asarray(
                    [math.sin(az_rad) * math.cos(az_dip_rad), math.cos(az_rad) * math.cos(az_dip_rad),
                     math.sin(az_dip_rad)])
            else:
                raise ValueError('Unable to resolve principal stress orientations')
        axis_out[i, :] = rotated_axis
        stress_out[i, :] = np.asarray([sigma1, sigma2, sigma3])
    sigma1_mean = np.mean(stress_out[:, 0])
    sigma2_mean = np.mean(stress_out[:, 1])
    sigma3_mean = np.mean(stress_out[:, 2])
    sigma1_std = np.std(stress_out[:, 0])
    sigma2_std = np.std(stress_out[:, 1])
    sigma3_std = np.std(stress_out[:, 2])
    rotated_axis = np.array([np.mean(axis_out[:, 0]), np.mean(axis_out[:, 1]), np.mean(axis_out[:, 2])])
    rotated_axis = rotated_axis / np.linalg.norm(rotated_axis)
    axis_std = np.array([np.std(axis_out[:, 0]), np.std(axis_out[:, 1]), np.std(axis_out[:, 2])])
    princ_stress_tensor = np.array([[sigma1_mean, 0., 0.], [0., sigma2_mean, 0.], [0., 0., sigma3_mean]])
    princ_stress_tensor_unc = np.array([[sigma1_std, 0., 0.], [0., sigma2_std, 0.], [0., 0., sigma3_std]])
    # rotated_axis = np.array(rotated_axis)
    return princ_stress_tensor, rotated_axis, princ_stress_tensor_unc, axis_std, axis_out
Example #56
0
def _prescraamp(
    T_A,
    T_B,
    m,
    T_A_subseq_isfinite,
    T_B_subseq_isfinite,
    p,
    indices,
    s,
    excl_zone=None,
):
    """
    A Numba JIT-compiled implementation of the non-normalized (i.e., without
    z-normalization) preSCRIMP algorithm.

    Parameters
    ----------
    T_A : numpy.ndarray
        The time series or sequence for which to compute the matrix profile

    T_B : numpy.ndarray
        The time series or sequence that will be used to annotate T_A. For every
        subsequence in T_A, its nearest neighbor in T_B will be recorded.

    m : int
        Window size

    T_A_subseq_isfinite : numpy.ndarray
        A boolean array that indicates whether a subsequence in `T_A` contains a
        `np.nan`/`np.inf` value (False)

    T_B_subseq_isfinite : numpy.ndarray
        A boolean array that indicates whether a subsequence in `T_B` contains a
        `np.nan`/`np.inf` value (False)

    p : float, default 2.0
        The p-norm to apply for computing the Minkowski distance.

    i : int
        The subsequence index in `T_B` that corresponds to `Q`

    s : int
        The sampling interval that defaults to
        `int(np.ceil(m / config.STUMPY_EXCL_ZONE_DENOM))`

    p_norm_profile : numpy.ndarray
        A reusable array to store the computed p-norm distance profile

    P : numpy.ndarray
        The squared matrix profile

    I : numpy.ndarray
        The matrix profile indices

    excl_zone : int
        The half width for the exclusion zone relative to the `i`.

    Notes
    -----
    `DOI: 10.1109/ICDM.2018.00099 \
    <https://www.cs.ucr.edu/~eamonn/SCRIMP_ICDM_camera_ready_updated.pdf>`__

    See Algorithm 2
    """
    n_threads = numba.config.NUMBA_NUM_THREADS
    l = T_A.shape[0] - m + 1
    P_NORM = np.full((n_threads, l), np.inf, dtype=np.float64)
    I = np.full((n_threads, l), -1, dtype=np.int64)

    idx_ranges = core._get_ranges(len(indices), n_threads, truncate=False)
    for thread_idx in prange(n_threads):
        _compute_PI(
            T_A,
            T_B,
            m,
            T_A_subseq_isfinite,
            T_B_subseq_isfinite,
            p,
            indices,
            idx_ranges[thread_idx, 0],
            idx_ranges[thread_idx, 1],
            thread_idx,
            s,
            P_NORM,
            I,
            excl_zone,
        )

    for thread_idx in range(1, n_threads):
        for i in range(l):
            if P_NORM[thread_idx, i] < P_NORM[0, i]:
                P_NORM[0, i] = P_NORM[thread_idx, i]
                I[0, i] = I[thread_idx, i]

    return np.power(P_NORM[0], 1.0 / p), I[0]
Example #57
0
def _aamp(T_A, T_B, m, T_A_subseq_isfinite, T_B_subseq_isfinite, p, diags,
          ignore_trivial):
    """
    A Numba JIT-compiled version of AAMP for parallel computation of the matrix
    profile and matrix profile indices.

    Parameters
    ----------
    T_A : numpy.ndarray
        The time series or sequence for which to compute the matrix profile

    T_B : numpy.ndarray
        The time series or sequence that will be used to annotate T_A. For every
        subsequence in T_A, its nearest neighbor in T_B will be recorded.

    m : int
        Window size

    T_A_subseq_isfinite : numpy.ndarray
        A boolean array that indicates whether a subsequence in `T_A` contains a
        `np.nan`/`np.inf` value (False)

    T_B_subseq_isfinite : numpy.ndarray
        A boolean array that indicates whether a subsequence in `T_B` contains a
        `np.nan`/`np.inf` value (False)

    p : float
        The p-norm to apply for computing the Minkowski distance.

    diags : numpy.ndarray
        The diag of diagonals to process and compute

    ignore_trivial : bool
        Set to `True` if this is a self-join. Otherwise, for AB-join, set this to
        `False`. Default is `True`.

    Returns
    -------
    P : numpy.ndarray
        Matrix profile

    I : numpy.ndarray
        Matrix profile indices

    Notes
    -----
    `DOI: 10.1109/ICDM.2018.00099 \
    <https://www.cs.ucr.edu/~eamonn/SCRIMP_ICDM_camera_ready_updated.pdf>`__

    See Algorithm 1
    """
    n_A = T_A.shape[0]
    n_B = T_B.shape[0]
    l = n_A - m + 1
    n_threads = numba.config.NUMBA_NUM_THREADS
    P = np.full((n_threads, l, 3), np.inf, dtype=np.float64)
    I = np.full((n_threads, l, 3), -1, dtype=np.int64)

    ndist_counts = core._count_diagonal_ndist(diags, m, n_A, n_B)
    diags_ranges = core._get_array_ranges(ndist_counts, n_threads, False)

    for thread_idx in prange(n_threads):
        # Compute and update P, I within a single thread while avoiding race conditions
        _compute_diagonal(
            T_A,
            T_B,
            m,
            T_A_subseq_isfinite,
            T_B_subseq_isfinite,
            p,
            diags,
            diags_ranges[thread_idx, 0],
            diags_ranges[thread_idx, 1],
            thread_idx,
            P,
            I,
            ignore_trivial,
        )

    # Reduction of results from all threads
    for thread_idx in range(1, n_threads):
        for i in prange(l):
            if P[0, i, 0] > P[thread_idx, i, 0]:
                P[0, i, 0] = P[thread_idx, i, 0]
                I[0, i, 0] = I[thread_idx, i, 0]
            # left matrix profile and left matrix profile indices
            if P[0, i, 1] > P[thread_idx, i, 1]:
                P[0, i, 1] = P[thread_idx, i, 1]
                I[0, i, 1] = I[thread_idx, i, 1]
            # right matrix profile and right matrix profile indices
            if P[0, i, 2] > P[thread_idx, i, 2]:
                P[0, i, 2] = P[thread_idx, i, 2]
                I[0, i, 2] = I[thread_idx, i, 2]

    return np.power(P[0, :, :], 1.0 / p), I[0, :, :]
Example #58
0
def dotest(n):
    x = np.random.rand(n)
    y = np.random.rand(n)
    z = np.empty_like(x)
    for i in numba.prange(x.shape[0]):
        z[i] = math.sqrt(x[i] * x[i] + y[i] * y[i])
Example #59
0
def dists(col_map, pxl):
    dists = np.empty(col_map.shape[0], dtype=np.double)
    for i in prange(col_map.shape[0]):
        dists[i] = col_dist(col_map[i], pxl)
    return dists
Example #60
0
 def f(n):
     A = empty(n)
     for i in prange(n):
         S = np.arange(i)
         A[i] = S.sum()
     return A