Beispiel #1
0
def blstd(data, mask):
    """ Calculate std over baselines (ignoring zeros).
    Expects masked array as input
    """

    nint, nbl, nchan, npol = data.shape
    # getting "data type not understood" if this typed as float32
    blstd = np.zeros((nint, nchan, npol), dtype=complex64)

    for i in range(nint):
        for j in range(nchan):
            for k in range(npol):
                ss = complex64(0)
                weight = int64(0)
                for l in range(nbl):
                    ss += data[i, l, j, k]
                    if mask[i, l, j, k] is False:
                        weight += 1
                if weight > 0:
                    mean = ss / weight
                    ss = complex64(0)
                    for l in range(nbl):
                        if mask[i, l, j, k] is False:
                            ss += np.abs((data[i, l, j, k] - mean)**2)
                    blstd[i, j, k] = np.sqrt(ss / weight)
                else:
                    blstd[i, j, k] = complex64(0)

    return blstd.real
Beispiel #2
0
def _meantsub_jit(data):
    """ Calculate mean in time (ignoring zeros) and subtract in place

    Could ultimately parallelize by computing only on subset of data.
    """

    nint, nbl, nchan, npol = data.shape

    for i in range(nbl):
        for j in range(nchan):
            for k in range(npol):
                ss = complex64(0)
                weight = int64(0)
                for l in range(nint):
                    ss += data[l, i, j, k]
                    if data[l, i, j, k] != 0j:
                        weight += 1
                if weight > 0:
                    mean = ss/weight
                else:
                    mean = complex64(0)

                if mean:
                    for l in range(nint):
                        if data[l, i, j, k] != 0j:
                            data[l, i, j, k] -= mean
Beispiel #3
0
def _meantsub_jit(data):
    """ Calculate mean in time (ignoring zeros) and subtract in place

    Could ultimately parallelize by computing only on subset of data.
    """

    nint, nbl, nchan, npol = data.shape

    for i in range(nbl):
        for j in range(nchan):
            for k in range(npol):
                ss = complex64(0)
                weight = int64(0)
                for l in range(nint):
                    ss += data[l, i, j, k]
                    if data[l, i, j, k] != 0j:
                        weight += 1
                if weight > 0:
                    mean = ss / weight
                else:
                    mean = complex64(0)

                if mean:
                    for l in range(nint):
                        if data[l, i, j, k] != 0j:
                            data[l, i, j, k] -= mean
Beispiel #4
0
def _sinewave(num, den):
    """Generate a complex sine wave of frequency sample_rate*num/den.
    Length is chosen such that a continuous sine wave
    can be made by repeating the returned signal."""

    # The code below fails for num=0, so handle that as a special case
    if num == 0:
        return nb.complex64([1.0])

    # "% den" is not absolutely necessary here, but wrapping the phase
    # using integers may avoid loss of floating point precision.
    phase = \
        (np.arange(0, num*den, num, dtype = np.int64) % den) \
        .astype(np.float32) * nb.float32(2.0 * np.pi / den)
    return np.cos(phase) + np.sin(phase) * nb.complex64(1j)
Beispiel #5
0
def _meantsub_gu(data):
    b""" Subtract time mean while ignoring zeros.
    Vectorizes over time axis.
    Assumes time axis is last so use np.swapaxis(0,3) when passing visibility array in
    **CURRENTLY NOT WORKING WITH FLAGS**
    """

    ss = complex64(0)
    weight = int64(0)
    for i in range(data.shape[0]):
        ss += data[i]
        if data[i] != complex64(0):
            weight += 1
    mean = ss / weight
    for i in range(data.shape[0]):
        data[i] -= mean
Beispiel #6
0
def _meantsub_gu(data):
    b""" Subtract time mean while ignoring zeros.
    Vectorizes over time axis.
    Assumes time axis is last so use np.swapaxis(0,3) when passing visibility array in
    **CURRENTLY NOT WORKING WITH FLAGS**
    """

    ss = complex64(0)
    weight = int64(0)
    for i in range(data.shape[0]):
        ss += data[i]
        if data[i] != complex64(0):
            weight += 1
    mean = ss/weight
    for i in range(data.shape[0]):
        data[i] -= mean
Beispiel #7
0
def _2ptsub_jit(data):
    """ Calculate 2-pt time trend and evaluate to subtract at each time.
    """

    nint, nbl, nchan, npol = data.shape

    for i in range(nbl):
        for j in range(nchan):
            for k in range(npol):
                # first half mean
                ss1 = complex64(0)
                weight1 = int64(0)
                for l in range(0, nint // 2):
                    ss1 += data[l, i, j, k]
                    if data[l, i, j, k] != 0j:
                        weight1 += 1
                if weight1 > 0:
                    mean1 = ss1 / weight1
                else:
                    mean1 = complex64(0)

                # second half mean
                ss2 = complex64(0)
                weight2 = int64(0)
                for l in range(nint // 2, nint):
                    ss2 += data[l, i, j, k]
                    if data[l, i, j, k] != 0j:
                        weight2 += 1
                if weight2 > 0:
                    mean2 = ss2 / weight2
                else:
                    mean2 = complex64(0)

                # calc mean per int
                if mean1 and mean2:
                    slope = (mean2 - mean1) / (nint // 2)
                    mean0 = (mean2 + mean1) / 2
                    for l in range(nint):
                        if data[l, i, j, k] != 0j:
                            data[l, i, j, k] -= slope * (l - nint // 2) + mean0
                else:  # or just blank data
                    for l in range(nint):
                        data[l, i, j, k] = 0j
Beispiel #8
0
    def execute(self, input):
        firlen = self.firlen
        interpolation = self.interpolation
        decimation = self.decimation

        lo_phase = self.lo_phase
        fir_phase = self.fir_phase
        fir_i = self.fir_i

        #output = np.zeros_like(input)
        output = np.zeros(1000, dtype=np.complex64)  # TODO: size of the array
        outn = 0  # number of output samples produced
        for sample_in in input:
            # Mix input with local oscillator
            mixed = sample_in * self.lo_table[lo_phase]

            #lo_phase = (lo_phase + 1) % len(self.lo_table)
            lo_phase += 1
            if lo_phase >= len(self.lo_table):
                lo_phase = 0

            # Store to buffer.
            # Fake circular buffering by storing the samples twice.
            self.firbuf[fir_i         ] = \
            self.firbuf[fir_i + firlen] = mixed

            #fir_i = (fir_i + 1) % firlen
            fir_i += 1
            if fir_i >= firlen:
                fir_i = 0

            fir_phase += interpolation
            while fir_phase >= decimation:
                fir_phase -= decimation
                assert fir_phase >= 0
                assert fir_phase < interpolation

                t = self.taps[fir_phase::interpolation]
                fb = self.firbuf[fir_i:fir_i + firlen]

                # This loop seems to be faster than calling np.dot
                o = nb.complex64(0.0)
                for j in range(firlen):
                    o += t[j] * fb[j]

                output[outn] = o
                outn += 1
                assert outn <= len(output)

        self.lo_phase = lo_phase
        self.fir_phase = fir_phase
        self.fir_i = fir_i
        return output[0:outn]
Beispiel #9
0
def _2ptinterp_jit(data):
    """ Calculate 2-pt time trend and evaluate to subtract at each time.
    """

    nint, nbl, nchan, npol = data.shape

    for i in range(nbl):
        for j in range(nchan):
            for k in range(npol):
                # first half mean
                ss1 = complex64(0)
                weight1 = int64(0)
                for l in range(0, nint // 2):
                    ss1 += data[l, i, j, k]
                    if data[l, i, j, k] != 0j:
                        weight1 += 1
                if weight1 > 0:
                    mean1 = ss1 / weight1
                else:
                    mean1 = complex64(0)

                # second half mean
                ss2 = complex64(0)
                weight2 = int64(0)
                for l in range(nint // 2, nint):
                    ss2 += data[l, i, j, k]
                    if data[l, i, j, k] != 0j:
                        weight2 += 1
                if weight2 > 0:
                    mean2 = ss2 / weight2
                else:
                    mean2 = complex64(0)

                ff = interpolate.interp1d([nint // 4, 3 * nint // 4],
                                          [mean1, mean2],
                                          fill_value='extrapolate')
                for l in range(nint):
                    if data[l, i, j, k] != 0j:
                        data[l, i, j, k] -= slope * (l - nint // 2) + mean0
Beispiel #10
0
def _cssub1_jit(data, dataavginterp):
    """ Use interpolated data to subtract while ignoring zeros
    """

    nint, nbl, nchan, npol = data.shape

    for i in range(nbl):
        for j in range(nchan):
            for k in range(npol):
                for l in range(nint):
                    if data[l, i, j, k] == 0j:
                        dataavginterp[l, i, j, k] = complex64(0)
                    data[l, i, j, k] -= dataavginterp[l, i, j, k]
Beispiel #11
0
def meantsub_cuda(data, cache=True):
    """ Calculate mean in time (ignoring zeros) and subtract in place """

    x, y, z = cuda.grid(3)
    nint, nbl, nchan, npol = data.shape
    if x < nbl and y < nchan and z < npol:
        sum = complex64(0)
        weight = 0
        for i in range(nint):
            sum = sum + data[i, x, y, z]
            if data[i, x, y, z] == 0j:
                weight = weight + 1
        mean = sum/weight
        for i in range(nint):
            data[i, x, y, z] = data[i, x, y, z] - mean
Beispiel #12
0
def meantsub_cuda(data, cache=True):
    """ Calculate mean in time (ignoring zeros) and subtract in place """

    x, y, z = cuda.grid(3)
    nint, nbl, nchan, npol = data.shape
    if x < nbl and y < nchan and z < npol:
        sum = complex64(0)
        weight = 0
        for i in range(nint):
            sum = sum + data[i, x, y, z]
            if data[i, x, y, z] == 0j:
                weight = weight + 1
        mean = sum / weight
        for i in range(nint):
            data[i, x, y, z] = data[i, x, y, z] - mean
Beispiel #13
0
def _cssub0_jit(data, dataavg):
    """ Use scipy calculate 4-pt mean as input to spline estimate.
    zeroed data is treated as flagged
    """

    nint, nbl, nchan, npol = data.shape
    piece = nint // 4

    for i in range(nbl):
        for j in range(nchan):
            for k in range(npol):
                # mean in each piece
                for pp in range(4):
                    ss = complex64(0)
                    weight = int64(0)
                    for l in range(pp * piece, (pp + 1) * piece):
                        ss += data[l, i, j, k]
                        if data[l, i, j, k] != 0j:
                            weight += 1
                    if weight > 0:
                        dataavg[pp, i, j, k] = ss / weight
                    else:
                        dataavg[pp, i, j,
                                k] = complex64(0)  # TODO: instead use nearest?
Beispiel #14
0
import numpy as np
from numba import vectorize, complex64
from numpy import fft
from scipy.fftpack import next_fast_len
from scipy.ndimage import gaussian_filter1d

try:
    from mkl_fft import fft2, ifft2
except ModuleNotFoundError:
    warnings.warn(
        "mkl_fft not installed.  Install it with conda: conda install mkl_fft",
        ImportWarning)


@vectorize([complex64(complex64, complex64)], nopython=True, target='parallel')
def apply_dotnorm(Y, cfRefImg):
    return Y / (np.complex64(1e-5) + np.abs(Y)) * cfRefImg


@vectorize([
    'complex64(int16, float32, float32)',
    'complex64(float32, float32, float32)'
],
           nopython=True,
           target='parallel',
           cache=True)
def addmultiply(x, mul, add):
    return np.complex64(np.float32(x) * mul + add)

Beispiel #15
0
import numba as nb
import numpy as np
from numba import jit, prange


@nb.vectorize([nb.complex64(nb.float32), nb.complex128(nb.float64)])
def complex_exponential(x):
    return np.cos(x) + 1.j * np.sin(x)


@nb.vectorize([nb.float32(nb.complex64), nb.float64(nb.complex128)])
def abs2(x):
    return x.real**2 + x.imag**2


@jit(nopython=True, nogil=True, parallel=True)
def interpolate_radial_functions(array, disc_indices, positions, v, r, dvdr,
                                 sampling):
    n = r.shape[0]
    dt = np.log(r[-1] / r[0]) / (n - 1)
    for i in range(positions.shape[0]):
        for j in prange(disc_indices.shape[0]):
            k = int(round(positions[i, 0] / sampling[0]) + disc_indices[j, 0])
            l = int(round(positions[i, 1] / sampling[1]) + disc_indices[j, 1])

            if ((k < array.shape[0]) & (l < array.shape[1]) & (k >= 0) &
                (l >= 0)):
                r_interp = np.sqrt((k * sampling[0] - positions[i, 0])**2 +
                                   (l * sampling[1] - positions[i, 1])**2)

                idx = int(np.floor(np.log(r_interp / r[0] + 1e-7) / dt))
Beispiel #16
0
    @classmethod
    def from_hdf5(cls, hdf5: h5py.File) -> 'PrimaryBeam':
        model_format = models.get_hdf5_attr(hdf5.attrs, 'model_format',
                                            str) or ''
        if model_format == 'aperture_plane':
            return PrimaryBeamAperturePlane.from_hdf5(hdf5)
        else:
            raise models.ModelFormatError(
                f'Unknown model_format {model_format!r} for {cls.model_type}')

    def to_hdf5(self, hdf5: h5py.File) -> None:
        raise NotImplementedError()  # pragma: nocover


@numba.vectorize([numba.complex64(numba.float32)])
def _expjm2pi(x):
    """Equivalent to ``exp(-2j * np.pi * x)`` where `x` is real.

    x is reduced to a small value before multiplication, which
    improves precision at a small cost in performance.
    """
    y = np.float32(-2 * np.pi) * (x - np.rint(x))
    return complex(np.cos(y), np.sin(y))


def _asarray(x: ArrayLike, dtype: Optional[DTypeLike] = None) -> np.ndarray:
    """Convert an array-like to an array.

    Unlike np.ndarray, this will reject astropy Quantities with dimensions
    and convert dimensionless quantities correctly even if they have scale.
from numba import vectorize, complex64, boolean, jit
import time
import numpy as np
from stats import cpu_stats,gpu_stats
from time import sleep	

@vectorize([boolean(complex64)])
def f(z):
    return (z.real*z.real + z.imag*z.imag) < 4.0

@vectorize([complex64(complex64, complex64)])
def g(z,c):
    return z*z + c 

@jit
def mandelbrot_numpy(c, maxiter):
    output = np.zeros(c.shape, np.int)
    z = np.empty(c.shape, np.complex64)
    for it in range(maxiter):
        notdone = f(z)
        output[notdone] = it
        z[notdone] = g(z[notdone],c[notdone]) 
    output[output == maxiter-1] = 0
    return output

def mandelbrot_set(width,height,xmin,xmax,ymin,ymax,maxiter):
    r1 = np.linspace(xmin, xmax, width, dtype=np.float32)
    r2 = np.linspace(ymin, ymax, height, dtype=np.float32)
    c = r1 + r2[:,None]*1j
    n3 = mandelbrot_numpy(c,maxiter)
    return (r1,r2,n3.T)
Beispiel #18
0
            out[xs + d, ys + d] = err / cn
    return out.argmin()


@_numba.vectorize(
    [_numba.float64(_numba.complex128),
     _numba.float32(_numba.complex64)],
    target="parallel",
)
def abs2(x):
    return x.real * x.real + x.imag * x.imag


@_numba.vectorize(
    [_numba.complex128(_numba.complex128),
     _numba.complex64(_numba.complex64)],
    target="parallel",
)
def abs2c(x):
    return x.real * x.real + x.imag * x.imag + 0j


def fill(data, invalid=None):
    """
    fill invalid values by closest valid value. invalid: mask of invalid values, default: np.isnan(data)
    """
    if invalid is None:
        invalid = _np.isnan(data)
    ind = _snd.distance_transform_edt(invalid,
                                      return_distances=False,
                                      return_indices=True)
Beispiel #19
0
    def expj(x):
        return math.cos(x) + 1j * math.sin(x)

    def abs_sqd(x):
        return np.real(x * np.conj(x))

    def sum_abs_sqd(x):
        return np.sum(np.real(x * np.conj(x)))

    def sum_abs_sqd_1d(x):
        return np.sum(np.real(x * np.conj(x)), axis=0)

else:

    @numba.vectorize([numba.complex64(numba.float64)],
                     nopython=True,
                     cache=True)
    def expj(x):
        return math.cos(x) + 1j * math.sin(x)

    @numba.vectorize(
        cache=True
    )  # ([numba.float64(numba.complex128),numba.float32(numba.complex64)])
    def abs_sqd(x):
        return x.real**2 + x.imag**2

    @numba.jit(cache=True)
    def sum_abs_sqd_1d(x, cache=True):
        r = 0
        for i in range(len(x)):
Beispiel #20
0
def smoothing_real_space(quantity,smoothing_scale):
    array_size = len(np.arange(0,cluster_grid,4))
    smoothing_array = np.zeros([array_size,array_size,array_size],dtype=np.float32)
    
    for ix,i in enumerate(np.arange(0,cluster_grid,4)):
        for iy,j in enumerate(np.arange(0,cluster_grid,4)):
            for iz,k in enumerate(np.arange(0,cluster_grid,4)):
                i = int(i)
                j = int(j)
                k = int(k)
           
                smoothing_array[ix,iy,iz] = np.mean(quantity[i:i+smoothing_scale,j:j+smoothing_scale,k:k+smoothing_scale])
    
    return smoothing_array

@jit(complex64(complex64,float32,int32,int32))
def smoothing_k_space(fft_array,smoothing_scale,cluster_length,cluster_grid):
    smoothing_array = copy.deepcopy(fft_array)
    cluster_length = 40
    # cluster_grid = 201

    for ix in range(int(fft_array.shape[0]/2)):
        for iy in range(int(fft_array.shape[1]/2)):
            for iz in range(int(fft_array.shape[2]/2)):

                k = np.sqrt( (ix/cluster_length)**2 + (iy/cluster_length)**2 + (iz/cluster_length)**2 )

                smoothing_array[ix,iy,iz] = fft_array[ix,iy,iz]*np.exp( (-(k**2)*smoothing_scale**2)/2)

        for iy in range(int(cluster_grid/2),cluster_grid-1):
            for iz in range(int(cluster_grid/2),cluster_grid-1):
Beispiel #21
0
__all__ = ['DadaReader', 'lookup_warn', '__version__', '__all__']


def lookup_warn(table, key, default=None):
    try:
        return table[key]
    except KeyError:
        if default is not None:
            print "#Warning: No key '%s'; using default value of %s" \
                  % (key, default)
            return default
        else:
            print "#Warning: No key '%s'" % key
            return None

@jit(complex64(float32), target='cpu')
def data2cplx(data):
    """ Convert data into complex form

    Helper function to allow for JIT compilation
    """
    #shape: (200, 2, 600, 2176)
    data = data[:, 0, :, :] + np.complex(-1j) * data[:, 1, :, :]
    return data

@jit(complex64(complex64), target='cpu')
def reorderTranspose(mat, n_int, n_chans, n_station, n_pol):
    """ Do reorder and transpose operation """
    # Reorder so that pol products change fastest
    mat = mat.reshape(n_int, n_chans, n_station, n_pol, n_station, n_pol)
    mat = mat.transpose([0, 2, 4, 1, 3, 5])
Beispiel #22
0
    return k - (kx**2 + ky**2) / (2 * k)


@numba.vectorize([numba.float64(numba.float64, numba.float64, numba.float64)])
def calc_kz_quadratic(k, kx, ky):
    return -(kx**2 + ky**2) / (2 * k)


@numba.vectorize([numba.float64(numba.float64, numba.float64)])
def calc_kz_quadratic_1d(k, q):
    """kz not included."""
    return -q**2 / (2 * k)


@numba.vectorize([
    numba.complex64(numba.float64, numba.float64, numba.float64, numba.float64)
])
def calc_propagator_exact(k, kx, ky, l):
    return mathx.expj(calc_kz_exact(k, kx, ky) * l)


@numba.vectorize([
    numba.complex64(numba.float64, numba.float64, numba.float64, numba.float64)
])
def calc_propagator_paraxial(k, kx, ky, l):
    return mathx.expj(calc_kz_paraxial(k, kx, ky) * l)


@numba.vectorize([
    numba.complex64(numba.float64, numba.float64, numba.float64, numba.float64)
])
Beispiel #23
0
# -*- coding: utf-8 -*-
"""
Created on Sat Jul 11 20:50:04 2015

@author: raghavsinghal
"""
import numpy as np
import matplotlib.pyplot as plt
from numba import jit, complex64, float64, int64
from math import pi


@jit(complex64(float64, complex64[:, :], float64, float64, float64, float64))
def f(t, u, a, b, g, e):
    dx = np.zeros((18, 1), dtype='complex')

    ua = u[0, 0]
    ub = u[1, 0]
    ug = u[2, 0]

    va = u[3, 0]
    vb = u[4, 0]
    vg = u[5, 0]

    ha = u[6, 0]
    hb = u[7, 0]
    hg = u[8, 0]

    u_a = u[9, 0]
    u_b = u[10, 0]
    u_g = u[11, 0]
Beispiel #24
0
    @numba.njit
    def norm_squared(x):
        return dot(x, x)


    @numba.njit
    def norm(x):
        return norm_squared(x)**0.5


    @numba.njit
    def normalize(x):
        return np.asarray(x)/norm(x)

    @numba.vectorize([numba.complex64(numba.float64, numba.float64, numba.float64)], cache=True)
    def calc_ideal_lens_phase_paraxial(x, y, k_on_f):
        return cmath.exp(-0.5j*(x**2 + y**2)*k_on_f)


    @numba.vectorize([numba.complex64(numba.float64, numba.float64, numba.float64, numba.float64)], cache=True)
    def calc_ideal_lens_phase(x, y, k, f):
        return cmath.exp(-1j*k*((x**2 + y**2 + f**2)**0.5 - f))


    @numba.vectorize([numba.complex64(numba.float64, numba.float64, numba.float64, numba.float64)], cache=True)
    def calc_ideal_square_lens_array_phase_paraxial(x, y, pitch, k_on_f):
        half_pitch = pitch/2
        u = (x%pitch) - half_pitch
        v = (y%pitch) - half_pitch
        return cmath.exp(-0.5j*(u**2 + v**2)*k_on_f)
Beispiel #25
0
"""Variants of standard maths functions written for speed."""

import numpy as np
import numba


@numba.vectorize(
    [numba.complex64(numba.float32),
     numba.complex128(numba.float64)])
def expj2pi(x):
    """Equivalent to ``expj(2 * np.pi * x)`` where `x` is real.

    x is reduced to a small value before multiplication, which
    improves precision at a small cost in performance.
    """
    y = 2 * np.pi * (x - np.rint(x))
    return complex(np.cos(y), np.sin(y))


def nansum(x, *args, **kwargs):
    """Like np.nansum, provided `x` is a floating-point type."""
    return np.sum(x, *args, where=~np.isnan(x), **kwargs)
Beispiel #26
0
                                                 num_args,
                                                 mat_dim_list,
                                                 num_reps,
                                                 dtype=np.csingle)

#%%
'''
NUMBA TESTS
'''
#%% Function declarations
import cmath
from numba import vectorize, complex128, complex64


@vectorize(
    [complex64(complex64, complex64),
     complex128(complex128, complex128)],
    target='parallel')
def nb_add(x, y):
    return x + y


@vectorize(
    [complex64(complex64, complex64),
     complex128(complex128, complex128)],
    target='parallel')
def nb_subtract(x, y):
    return x + y


@vectorize(