Example #1
0
def shift_and_pad(a, left=True):
    if left:
        a_shifted = a[..., 1:]
        pad_array = dsa.zeros_like(a[..., -2:-1])
        return concatenate([a_shifted, pad_array], axis=-1)
    else:
        a_shifted = a[..., :-1]
        pad_array = dsa.zeros_like(a[..., 0:1])
        return concatenate([pad_array, a_shifted], axis=-1)
Example #2
0
    def __init__(self, parameters: Parameter = None):

        self._type = 'acoustic'
        self._ndim = 2
        self._nx, self._nz = parameters['number-of-cells'][0], parameters[
            'number-of-cells'][1]

        self._sxx = da.zeros((self._nx, self._nz), dtype=DTYPE)
        self._vx = da.zeros_like(self._sxx)
        self._vz = da.zeros_like(self._sxx)
Example #3
0
def fill_correlations(vis, pol):
    """
    Expands single correlation produced by wsclean_predict to the
    full set of correlations.

    Parameters
    ----------
    vis : :class:`dask.array.Array`
        dask array of visibilities of shape :code:`(row, chan, 1)`
    pol : :class:`xarray.Dataset`
        MS Polarisation dataset.

    Returns
    -------
    vis : :class:`dask.array.Array`
        dask array of visibilities of shape :code:`(row, chan, corr)`
    """

    corrs = pol.NUM_CORR.data[0]

    assert vis.ndim == 3

    if corrs == 1:
        return vis
    elif corrs == 2:
        vis = da.concatenate([vis, vis], axis=2)
        return vis.rechunk({2: corrs})
    elif corrs == 4:
        zeros = da.zeros_like(vis)
        vis = da.concatenate([vis, zeros, zeros, vis], axis=2)
        return vis.rechunk({2: corrs})
    else:
        raise ValueError("MS Correlations %d not in (1, 2, 4)" % corrs)
Example #4
0
def test_add_datavars(ms, tmp_path_factory, prechunking, postchunking):
    store = tmp_path_factory.mktemp("zarr_store")
    ref_datasets = xds_from_ms(ms)

    for i, ds in enumerate(ref_datasets):
        chunks = ds.chunks
        row = sum(chunks["row"])
        chan = sum(chunks["chan"])
        corr = sum(chunks["corr"])

        ref_datasets[i] = ds.assign_coords(
            row=np.arange(row),
            chan=np.arange(chan),
            corr=np.arange(corr),
            dummy=np.arange(10)  # Orphan coordinate.
        )

    chunked_datasets = [ds.chunk(prechunking) for ds in ref_datasets]
    dask.compute(xds_to_zarr(chunked_datasets, store))

    rechunked_datasets = [ds.chunk(postchunking)
                          for ds in xds_from_zarr(store)]
    augmented_datasets = [ds.assign({"DUMMY": (("row", "chan", "corr"),
                                    da.zeros_like(ds.DATA.data))})
                          for ds in rechunked_datasets]
    dask.compute(xds_to_zarr(augmented_datasets, store, rechunk=True))

    augmented_datasets = xds_from_zarr(store)

    assert all([ds.DUMMY.chunks == cds.DATA.chunks
                for ds, cds in zip(augmented_datasets, chunked_datasets)])
Example #5
0
def test_index_with_int_dask_array_nanchunks(chunks):
    # Slice by array with nan-sized chunks
    a = da.from_array(cupy.arange(-2, 3), chunks=chunks)
    assert_eq(a[a.nonzero()], cupy.array([-2, -1, 1, 2]))
    # Edge case: the nan-sized chunks resolve to size 0
    a = da.zeros_like(cupy.array(()), shape=5, chunks=chunks)
    assert_eq(a[a.nonzero()], cupy.array([]))
Example #6
0
def test_dataset_add_column(ms, dtype):
    datasets = read_datasets(ms, [], [], [])
    assert len(datasets) == 1
    ds = datasets[0]

    # Create the dask array
    bitflag = da.zeros_like(ds.DATA.data, dtype=dtype)
    # Assign keyword attribute
    col_kw = {
        "BITFLAG": {
            'FLAGSETS': 'legacy,cubical',
            'FLAGSET_legacy': 1,
            'FLAGSET_cubical': 2
        }
    }
    # Assign variable onto the dataset
    nds = ds.assign(BITFLAG=(("row", "chan", "corr"), bitflag))
    writes = write_datasets(ms,
                            nds, ["BITFLAG"],
                            descriptor='ratt_ms',
                            column_keywords=col_kw)

    dask.compute(writes)

    del datasets, ds, writes, nds
    assert_liveness(0, 0)

    with pt.table(ms, readonly=False, ack=False, lockoptions='auto') as T:
        bf = T.getcol("BITFLAG")
        assert T.getcoldesc("BITFLAG")['keywords'] == col_kw['BITFLAG']
        assert bf.dtype == dtype
Example #7
0
 def test_wrong_shape_bad_pixel_array(self):
     data = np.ones((5, 10, 20, 30))
     dask_array = da.from_array(data, chunks=(5, 5, 5, 5))
     bad_pixel_array = da.zeros_like(dask_array)
     with pytest.raises(ValueError):
         dt._remove_bad_pixels(dask_array, bad_pixel_array[1, :, :, :])
     with pytest.raises(ValueError):
         dt._remove_bad_pixels(dask_array, bad_pixel_array[1, 1, :-2, :])
Example #8
0
def deconv_huber(data, fr, fr_npy, L):
    aux = da.zeros_like(data)
    print(n_iter)
    for it in range(n_iter):
        im = wiener(data, aux, fr, fr_npy, L)
        aux = min_gy(im)

    return im, aux
Example #9
0
def test_average_raises():
    d_a = da.arange(11, chunks=2)

    with pytest.raises(TypeError):
        da.average(d_a, weights=[1, 2, 3])

    with pytest.warns(RuntimeWarning):
        da.average(d_a, weights=da.zeros_like(d_a)).compute()
Example #10
0
def test_average_raises():
    d_a = da.arange(11, chunks=2)

    with pytest.raises(TypeError):
        da.average(d_a, weights=[1, 2, 3])

    with pytest.warns(RuntimeWarning):
        da.average(d_a, weights=da.zeros_like(d_a)).compute()
Example #11
0
def dmean(dsts):
    """Apply a mean reduction along of all arrays"""
    if len(dsts) == 1:
        return dsts[0]

    out = da.zeros_like(dsts[0])
    for d in dsts:
        out += d

    return out / len(dsts)
Example #12
0
def scheduling():

    global quad

    #Parametres du code
    lamb = 0.0005

    fr_npy = ir2fr(psf, sky.shape)
    fr = da.from_array(fr_npy, chunks=psf.shape)
    quad = wiener(dirty, da.zeros_like(sky), fr, fr_npy, lamb)
Example #13
0
    def apply_strategies(self, flag_windows, vis_windows):
        original = flag_windows.copy()

        # Run flagger strategies
        for strategy in self.strategies:
            try:
                task = strategy['task']
            except KeyError:
                raise ValueError("strategy has no 'task': %s" % strategy)

            if task == "sum_threshold":
                new_flags = sum_threshold_flagger(vis_windows, flag_windows,
                                                  **strategy['kwargs'])
                # sum threshold builds upon any flags that came previous
                flag_windows = da.logical_or(new_flags, flag_windows)
            elif task == "uvcontsub_flagger":
                new_flags = uvcontsub_flagger(vis_windows, flag_windows,
                                              **strategy['kwargs'])
                # this task discards previous flags by default during its
                # second iteration. The original flags from MS should be or'd
                # back in afterwards. Flags from steps prior to this one serves
                # only as a "initial guess"
                flag_windows = new_flags
            elif task == "flag_autos":
                new_flags = flag_autos(flag_windows, self.ubl)
                flag_windows = da.logical_or(new_flags, flag_windows)
            elif task == "combine_with_input_flags":
                # or's in original flags from the measurement set
                # (if -if option has not been specified,
                # in which case this option will do nothing)
                flag_windows = da.logical_or(flag_windows, original)
            elif task == "unflag":
                flag_windows = da.zeros_like(flag_windows)
            elif task == "flag_nans_zeros":
                flag_windows = flag_nans_and_zeros(vis_windows, flag_windows)
            elif task == "apply_static_mask":
                new_flags = apply_static_mask(flag_windows,
                                              self.ubl,
                                              self.ant_pos,
                                              self.masked_channels,
                                              self.chan_freq,
                                              self.chan_width,
                                              **strategy['kwargs'])
                # override option will override any flags computed previously
                # this may not be desirable so use with care or in combination
                # with combine_with_input_flags option!
                if strategy['kwargs']["accumulation_mode"].strip() == "or":
                    flag_windows = da.logical_or(new_flags, flag_windows)
                else:
                    flag_windows = new_flags

            else:
                raise ValueError("Task '%s' does not name a valid task", task)

        return flag_windows
Example #14
0
def deconv_huber(data, fr, fr_npy, L, n):
    aux = da.zeros_like(data)
    print(n_iter)
    for it in range(n_iter):
        im = wiener(data, aux, fr, fr_npy, L)
        im = im.rechunk(((data.shape[0]) / n), ((data.shape[1]) / n))
        aux = min_gy(im)
        aux = aux.rechunk(data.shape)

    im = im.rechunk(data.shape)

    return im, aux
Example #15
0
def scheduling():

    global quad
    
    #Parametres du code
    lamb = 0.0005
    quad_freq = []
    
    #Boucle pour faire le calcul de toutes les frequences du cube.
    fr_npy = ir2fr(psf, sky.shape[1:])
    fr = da.from_array(fr_npy, chunks = psf.shape)
    quad = wiener(dirty, da.zeros_like(sky), fr, fr_npy, lamb)
Example #16
0
def _make_dask_dummy(ds):
    """
    Helper function to make a zeros_like dataset that is backed by dask arrays
    no matter what sorts of arrays are found in the dataset.
    """
    dummy = xr.Dataset()
    for var, arr in ds.items():
        dummy_arr = xr.DataArray(da.zeros_like(arr),
                                 dims=arr.dims,
                                 coords=arr.coords)
        dummy[var] = dummy_arr

    return dummy
Example #17
0
def cm26_convert_boundary_flux(da, da_full, top=True):
    dummy = xr.DataArray(zeros_like(da_full.data),
                         coords=da_full.coords,
                         dims=da_full.dims)
    if top:
        da.coords['st_ocean'] = da_full['st_ocean'][0]
        dummy_cut = dummy.isel(st_ocean=slice(1, None))
        out = xr.concat([da, dummy_cut], dim='st_ocean')
    else:
        da.coords['st_ocean'] = da_full['st_ocean'][-1]
        dummy_cut = dummy.isel(st_ocean=slice(0, -1))
        out = xr.concat([dummy_cut, da], dim='st_ocean')
    return out
Example #18
0
def _remove_bad_pixels(dask_array, bad_pixel_array):
    """Replace values in bad pixels with mean of neighbors.

    Parameters
    ----------
    dask_array : Dask array
        Must be at least two dimensions
    bad_pixel_array : array-like
        Must either have the same shape as dask_array,
        or the same shape as the two last dimensions of dask_array.

    Returns
    -------
    data_output : Dask array

    Examples
    --------
    >>> import pyxem.utils.dask_tools as dt
    >>> s = pxm.dummy_data.dummy_data.get_dead_pixel_signal(lazy=True)
    >>> dead_pixels = dt._find_dead_pixels(s.data)
    >>> data_output = dt._remove_bad_pixels(s.data, dead_pixels)

    """
    if len(dask_array.shape) < 2:
        raise ValueError("dask_array {0} must be at least 2 dimensions".format(
            dask_array.shape))
    if bad_pixel_array.shape == dask_array.shape:
        pass
    elif bad_pixel_array.shape == dask_array.shape[-2:]:
        temp_array = da.zeros_like(dask_array)
        bad_pixel_array = da.add(temp_array, bad_pixel_array)
    else:
        raise ValueError(
            "bad_pixel_array {0} must either 2-D and have the same shape "
            "as the two last dimensions in dask_array {1}. Or be "
            "the same shape as dask_array {2}".format(bad_pixel_array.shape,
                                                      dask_array.shape[-2:],
                                                      dask_array.shape))
    dif0 = da.roll(dask_array, shift=1, axis=-2)
    dif1 = da.roll(dask_array, shift=-1, axis=-2)
    dif2 = da.roll(dask_array, shift=1, axis=-1)
    dif3 = da.roll(dask_array, shift=-1, axis=-1)

    dif = (dif0 + dif1 + dif2 + dif3) / 4
    dif = dif * bad_pixel_array

    data_output = da.multiply(dask_array, da.logical_not(bad_pixel_array))
    data_output = data_output + dif

    return data_output
Example #19
0
def scheduling():

    global quad

    #Parametres du code
    lamb = 0.0005

    #Boucle pour faire le calcul de toutes les frequences du cube.
    fr_npy = ir2fr(np.tile(psf_npy[np.newaxis], (n, 1, 1)),
                   shape=sky.shape[1:])
    fr = da.from_array(
        fr_npy,
        chunks=((1, ) + fr_npy.shape[1:]))  # Attention ici au chunk de "fr"

    quad = wiener(dirty, da.zeros_like(sky), fr, fr_npy, lamb)
Example #20
0
def scheduling():

    global quad
    
    #Parametres du code
    lamb = 0.0005
    quad_freq = []
    
    #Boucle pour faire le calcul de toutes les frequences du cube.
    for i in range(dirty.shape[0]):
        fr_npy = ir2fr(psf[i], sky[i].shape)
        fr = da.from_array(fr_npy, chunks = psf[i].shape)
        quad_freq.append(wiener(dirty[i], da.zeros_like(sky[i]), fr, fr_npy, lamb))

    quad = da.stack(quad_freq, axis=0)
Example #21
0
def deconv_huber(data, fr, lamb):
    aux = da.zeros_like(data)
    print(n_iter)
    for it in range(n_iter):
        im = delayed(wiener)(data, aux, fr, lamb)
        aux = delayed(min_gy)(im)
        '''
        aux = []
        for pix in im:
            aux_pix = delayed(min_gy)(pix)
            aux.append(aux_pix)
        '''
#   total = delayed(inc)(im)
#    total.visualize()
    
    return im, aux
Example #22
0
def scheduling():

    global n_iter
    global huber
    global hub
    global quad

    #Parametres du code
    n_iter = 50
    huber = {'threshold': 0.01, 'inf': 1}
    lamb = 0.0005

    fr_npy = ir2fr(psf, sky.shape)
    fr = da.from_array(fr_npy, psf.shape)
    quad = wiener(dirty, da.zeros_like(sky), fr, fr_npy, lamb)
    hub, aux_hub = deconv_huber(dirty, fr, fr_npy, lamb)
Example #23
0
def create_new_diagnostic_cube(name,
                               units,
                               coordinate_template,
                               attributes=None,
                               data=None,
                               dtype=np.float32):
    """
    Creates a template for a new diagnostic cube with suitable metadata.

    Args:
        name (str):
            Standard or long name for output cube
        units (str or cf_units.Unit):
            Units for output cube
        coordinate_template (iris.cube.Cube):
            Cube from which to copy dimensional and auxiliary coordinates
        attributes (dict or None):
            Dictionary of attribute names and values
        data (numpy.ndarray or None):
            Data array.  If not set, cube is filled with zeros using a lazy
            data object, as this will be overwritten later by the caller
            routine.
        dtype (numpy.dtype):
            Datatype for dummy cube data if "data" argument is None.

    Returns:
        iris.cube.Cube:
            Cube with correct metadata to accommodate new diagnostic field
    """
    if data is None:
        data = da.zeros_like(coordinate_template.core_data(), dtype=dtype)

    aux_coords_and_dims, dim_coords_and_dims = [[
        (coord, coordinate_template.coord_dims(coord))
        for coord in getattr(coordinate_template, coord_type)
    ] for coord_type in ('aux_coords', 'dim_coords')]

    cube = iris.cube.Cube(data,
                          units=units,
                          attributes=attributes,
                          dim_coords_and_dims=dim_coords_and_dims,
                          aux_coords_and_dims=aux_coords_and_dims)
    cube.rename(name)

    return cube
Example #24
0
def test_ufunc_where(dtype, left_is_da, right_is_da, where_kind):
    left = np.arange(12).reshape((3, 4))
    right = np.arange(4)
    out = np.zeros_like(left, dtype=dtype)
    d_out = da.zeros_like(left, dtype=dtype)

    if where_kind in (True, False):
        d_where = where = where_kind
    else:
        d_where = where = np.array([False, True, True, False])
        if where_kind == "dask":
            d_where = da.from_array(where, chunks=2)

    d_left = da.from_array(left, chunks=2) if left_is_da else left
    d_right = da.from_array(right, chunks=2) if right_is_da else right

    expected = np.add(left, right, where=where, out=out, dtype=dtype)
    result = da.add(d_left, d_right, where=d_where, out=d_out, dtype=dtype)
    assert result is d_out
    assert_eq(expected, result)
Example #25
0
def zeros_like(arr):
    """
    Smooth over the differences zeros_likes for different
    array types

    Parameters
    ----------
    arr : array-like

    Returns
    -------
    An zeroed array of the same array type as *arr*
    """
    if isinstance(arr, np.ndarray):
        return np.zeros_like(arr)
    elif isinstance(arr, xr.DataArray):
        return xr.zeros_like(arr)
    elif isinstance(arr, zarr.Array):
        return zarr.zeros_like(arr)
    elif isinstance(arr, da.Array):
        return da.zeros_like(arr)
Example #26
0
def shift_and_pad(a):
    a_shifted = a[..., 1:]
    pad_array = dsa.zeros_like(a[..., -2:-1])
    return concatenate([a_shifted, pad_array], axis=-1)
Example #27
0
def lengths_and_angles_to_box_vectors(a_length, b_length, c_length, alpha, beta, gamma):
    """Convert from the lengths/angles of the unit cell to the box
    vectors (Bravais vectors). The angles should be in degrees.

    Mimics mdtraj.core.unitcell.lengths_and_angles_to_box_vectors()

    Parameters
    ----------
    a_length : scalar or ndarray
        length of Bravais unit vector **a**
    b_length : scalar or ndarray
        length of Bravais unit vector **b**
    c_length : scalar or ndarray
        length of Bravais unit vector **c**
    alpha : scalar or ndarray
        angle between vectors **b** and **c**, in degrees.
    beta : scalar or ndarray
        angle between vectors **c** and **a**, in degrees.
    gamma : scalar or ndarray
        angle between vectors **a** and **b**, in degrees.

    Returns
    -------
    a : dask.array
        If the inputs are scalar, the vectors will one dimensional (length 3).
        If the inputs are one dimension, shape=(n_frames, ), then the output
        will be (n_frames, 3)
    b : dask.array
        If the inputs are scalar, the vectors will one dimensional (length 3).
        If the inputs are one dimension, shape=(n_frames, ), then the output
        will be (n_frames, 3)
    c : dask.array
        If the inputs are scalar, the vectors will one dimensional (length 3).
        If the inputs are one dimension, shape=(n_frames, ), then the output
        will be (n_frames, 3)

    This code is adapted from gyroid, which is licensed under the BSD
    http://pythonhosted.org/gyroid/_modules/gyroid/unitcell.html
    """
    # Fix for da that requires angles and lengths to be arrays
    lengths = [a_length, b_length, c_length]
    for i, e in enumerate(lengths):
        # Use python logic shortcutting to not compute dask Arrays
        if not isinstance(e, da.core.Array) and np.isscalar(e):
            lengths[i] = np.array([e])
    a_length, b_length, c_length = tuple(lengths)

    angles = [alpha, beta, gamma]
    for i, e in enumerate(angles):
        if not isinstance(e, da.core.Array) and np.isscalar(e):
            angles[i] = np.array([e])
    alpha, beta, gamma = tuple(angles)

    if da.all(alpha < 2 * np.pi) and (
        da.all(beta < 2 * np.pi) and da.all(gamma < 2 * np.pi)
    ):
        warnings.warn(
            "All your angles were less than 2*pi."
            " Did you accidentally give me radians?"
        )

    alpha = alpha * np.pi / 180
    beta = beta * np.pi / 180
    gamma = gamma * np.pi / 180

    a = da.stack([a_length, da.zeros_like(a_length), da.zeros_like(a_length)])
    b = da.stack(
        [b_length * da.cos(gamma), b_length * da.sin(gamma), da.zeros_like(b_length)]
    )
    cx = c_length * da.cos(beta)
    cy = c_length * (da.cos(alpha) - da.cos(beta) * da.cos(gamma)) / da.sin(gamma)
    cz = da.sqrt(c_length * c_length - cx * cx - cy * cy)
    c = da.stack([cx, cy, cz])
    if not a.shape == b.shape == c.shape:
        raise TypeError("Shape is messed up.")

    # Make sure that all vector components that are _almost_ 0 are set exactly
    # to 0
    tol = 1e-6
    a[da.logical_and(a > -tol, a < tol)] = 0.0
    b[da.logical_and(b > -tol, b < tol)] = 0.0
    c[da.logical_and(c > -tol, c < tol)] = 0.0

    return a.T, b.T, c.T
Example #28
0
    def volume_curvature(self,
                         darray_il,
                         darray_xl,
                         dip_factor=10,
                         kernel=(3, 3, 3),
                         preview=None):
        """
        Description
        -----------
        Compute volume curvature attributes from 3D seismic dips
        
        Parameters
        ----------
        darray_il : Array-like, Inline dip - acceptable inputs include 
            Numpy, HDF5, or Dask Arrays
        darray_xl : Array-like, Crossline dip - acceptable inputs include 
            Numpy, HDF5, or Dask Arrays
        
        Keywork Arguments
        -----------------  
        dip_factor : Number, scalar for dip values
        kernel : tuple (len 3), operator size
        preview : str, enables or disables preview mode and specifies direction
            Acceptable inputs are (None, 'inline', 'xline', 'z')
            Optimizes chunk size in different orientations to facilitate rapid
            screening of algorithm output
        
        Returns
        -------
        H, K, Kmax, Kmin, KMPos, KMNeg : Dask Array, {H : 'Mean Curvature', 
                                                      K : 'Gaussian Curvature',
                                                      Kmax : 'Max Curvature',
                                                      Kmin : 'Min Curvature',
                                                      KMPos : Most Positive Curvature,
                                                      KMNeg : Most Negative Curvature}
        """

        np.seterr(all='ignore')

        # Generate Dask Array as necessary
        darray_il, chunks_init = self.create_array(darray_il,
                                                   kernel,
                                                   preview=preview)
        darray_xl, chunks_init = self.create_array(darray_xl,
                                                   kernel,
                                                   preview=preview)

        u = -darray_il / dip_factor
        v = -darray_xl / dip_factor
        w = da.ones_like(u, chunks=u.chunks)

        # Compute Gradients
        ux = sp().first_derivative(u, axis=0)
        uy = sp().first_derivative(u, axis=1)
        uz = sp().first_derivative(u, axis=2)
        vx = sp().first_derivative(v, axis=0)
        vy = sp().first_derivative(v, axis=1)
        vz = sp().first_derivative(v, axis=2)

        # Smooth Gradients
        ux = ux.map_blocks(ndi.uniform_filter, size=kernel, dtype=ux.dtype)
        uy = uy.map_blocks(ndi.uniform_filter, size=kernel, dtype=ux.dtype)
        uz = uz.map_blocks(ndi.uniform_filter, size=kernel, dtype=ux.dtype)
        vx = vx.map_blocks(ndi.uniform_filter, size=kernel, dtype=ux.dtype)
        vy = vy.map_blocks(ndi.uniform_filter, size=kernel, dtype=ux.dtype)
        vz = vz.map_blocks(ndi.uniform_filter, size=kernel, dtype=ux.dtype)

        u = util.trim_dask_array(u, kernel)
        v = util.trim_dask_array(v, kernel)
        w = util.trim_dask_array(w, kernel)
        ux = util.trim_dask_array(ux, kernel)
        uy = util.trim_dask_array(uy, kernel)
        uz = util.trim_dask_array(uz, kernel)
        vx = util.trim_dask_array(vx, kernel)
        vy = util.trim_dask_array(vy, kernel)
        vz = util.trim_dask_array(vz, kernel)

        wx = da.zeros_like(ux, chunks=ux.chunks, dtype=ux.dtype)
        wy = da.zeros_like(ux, chunks=ux.chunks, dtype=ux.dtype)
        wz = da.zeros_like(ux, chunks=ux.chunks, dtype=ux.dtype)

        uv = u * v
        vw = v * w
        u2 = u * u
        v2 = v * v
        w2 = w * w
        u2pv2 = u2 + v2
        v2pw2 = v2 + w2
        s = da.sqrt(u2pv2 + w2)

        # Measures of surfaces
        E = da.ones_like(u, chunks=u.chunks, dtype=u.dtype)
        F = -(u * w) / (da.sqrt(u2pv2) * da.sqrt(v2pw2))
        G = da.ones_like(u, chunks=u.chunks, dtype=u.dtype)
        D = -(-uv * vx + u2 * vy + v2 * ux - uv * uy) / (u2pv2 * s)
        Di = -(vw * (uy + vx) - 2 * u * w * vy - v2 * (uz + wx) + uv *
               (vz + wy)) / (2 * da.sqrt(u2pv2) * da.sqrt(v2pw2) * s)
        Dii = -(-vw * wy + v2 * wz + w2 * vy - vw * vz) / (v2pw2 * s)
        H = (E * Dii - 2 * F * Di + G * D) / (2 * (E * G - F * F))
        K = (D * Dii - Di * Di) / (E * G - F * F)
        Kmin = H - da.sqrt(H * H - K)
        Kmax = H + da.sqrt(H * H - K)

        H[da.isnan(H)] = 0
        K[da.isnan(K)] = 0
        Kmax[da.isnan(Kmax)] = 0
        Kmin[da.isnan(Kmin)] = 0

        KMPos = da.maximum(Kmax, Kmin)
        KMNeg = da.minimum(Kmax, Kmin)

        return (H, K, Kmax, Kmin, KMPos, KMNeg)
Example #29
0
 lambda x: x,
 lambda x: da.expm1(x),
 lambda x: 2 * x,
 lambda x: x / 2,
 lambda x: x ** 2,
 lambda x: x + x,
 lambda x: x * x,
 lambda x: x[0],
 lambda x: x[:, 1],
 lambda x: x[:1, None, 1:3],
 lambda x: x.T,
 lambda x: da.transpose(x, (1, 2, 0)),
 lambda x: x.sum(),
 lambda x: da.empty_like(x),
 lambda x: da.ones_like(x),
 lambda x: da.zeros_like(x),
 lambda x: da.full_like(x, 5),
 pytest.param(
     lambda x: x.mean(),
     marks=pytest.mark.skipif(
         not IS_NEP18_ACTIVE or cupy.__version__ < LooseVersion("6.4.0"),
         reason="NEP-18 support is not available in NumPy or CuPy older than "
         "6.4.0 (requires https://github.com/cupy/cupy/pull/2418)",
     ),
 ),
 pytest.param(
     lambda x: x.moment(order=0),
 ),
 lambda x: x.moment(order=2),
 pytest.param(
     lambda x: x.std(),
Example #30
0
def predict(args):
    # get inclusion regions
    include_regions = []
    exclude_regions = []
    if args.within:
        from regions import read_ds9
        import tempfile
        # kludge because regions cries over "FK5", wants lowercase
        with tempfile.NamedTemporaryFile(mode="w") as tmpfile, open(
                args.within) as regfile:
            tmpfile.write(regfile.read().lower())
            tmpfile.flush()
            include_regions = read_ds9(tmpfile.name)
            log.info("read {} inclusion region(s) from {}".format(
                len(include_regions), args.within))

    # Import source data from WSClean component list
    # See https://sourceforge.net/p/wsclean/wiki/ComponentList
    (comp_type, radec, stokes, spec_coeff, ref_freq, log_spec_ind,
     gaussian_shape) = import_from_wsclean(args.sky_model,
                                           include_regions=include_regions,
                                           exclude_regions=exclude_regions,
                                           point_only=args.points_only,
                                           num=args.num_sources or None)

    # Get the support tables
    tables = support_tables(
        args, ["FIELD", "DATA_DESCRIPTION", "SPECTRAL_WINDOW", "POLARIZATION"])

    field_ds = tables["FIELD"]
    ddid_ds = tables["DATA_DESCRIPTION"]
    spw_ds = tables["SPECTRAL_WINDOW"]
    pol_ds = tables["POLARIZATION"]
    frequencies = np.sort(
        [spw_ds[dd].CHAN_FREQ.data.flatten() for dd in range(len(spw_ds))])

    # cluster sources and refit. This only works for delta scale sources
    def __cluster(comp_type, radec, stokes, spec_coeff, ref_freq, log_spec_ind,
                  gaussian_shape, frequencies):
        uniq_radec = np.unique(radec)
        ncomp_type = []
        nradec = []
        nstokes = []
        nspec_coef = []
        nref_freq = []
        nlog_spec_ind = []
        ngaussian_shape = []

        for urd in uniq_radec:
            print comp_type.shape
            print radec.shape
            deltasel = comp_type[radec == urd] == "POINT"
            polyspecsel = np.logical_not(spec_coef[radec == urd])
            sel = deltasel & polyspecsel
            Is = stokes[sel, 0, None] * frequency[None, :]**0
            for jj in range(spec_coeff.shape[1]):
                Is += spec_coeff[sel, jj, None] * (
                    frequency[None, :] / ref_freq[sel, None] - 1)**(jj + 1)
            Is = np.sum(
                Is, axis=0)  # collapse over all the sources at this position
            logpolyspecsel = np.logical_not(log_spec_coef[radec == urd])
            sel = deltasel & logpolyspecsel

            Is = np.log(stokes[sel, 0, None] * frequency[None, :]**0)
            for jj in range(spec_coeff.shape[1]):
                Is += spec_coeff[sel, jj, None] * da.log(
                    (frequency[None, :] / ref_freq[sel, None])**(jj + 1))
            Is = np.exp(Is)
            Islogpoly = np.sum(
                Is, axis=0)  # collapse over all the sources at this position

            popt, pfitvar = curve_fit(
                lambda i, a, b, c, d: i + a *
                (frequency / ref_freq[0, None] - 1) + b *
                (frequency / ref_freq[0, None] - 1)**2 + c *
                (frequency / ref_freq[sel, None] - 1)**3 + d *
                (frequency / ref_freq[0, None] - 1)**3, frequency,
                Ispoly + Islogpoly)
            if not np.all(np.isfinite(pfitvar)):
                popt[0] = np.sum(stokes[sel, 0, None], axis=0)
                popt[1:] = np.inf
                log.warn(
                    "Refitting at position {0:s} failed. Assuming flat spectrum source of {1:.2f} Jy"
                    .format(radec, popt[0]))
            else:
                pcov = np.sqrt(np.diag(pfitvar))
                log.info(
                    "New fitted flux {0:.3f} Jy at position {1:s} with covariance {2:s}"
                    .format(popt[0], radec,
                            ", ".join([str(poptp) for poptp in popt])))

            ncomp_type.append("POINT")
            nradec.append(urd)
            nstokes.append(popt[0])
            nspec_coef.append(popt[1:])
            nref_freq.append(ref_freq[0])
            nlog_spec_ind = 0.0

        # add back all the gaussians
        sel = comp_type[radec] == "GAUSSIAN"
        for rd, stks, spec, ref, lspec, gs in zip(radec[sel], stokes[sel],
                                                  spec_coef[sel],
                                                  ref_freq[sel],
                                                  log_spec_ind[sel],
                                                  gaussian_shape[sel]):
            ncomp_type.append("GAUSSIAN")
            nradec.append(rd)
            nstokes.append(stks)
            nspec_coef.append(spec)
            nref_freq.append(ref)
            nlog_spec_ind.append(lspec)
            ngaussian_shape.append(gs)

        log.info(
            "Reduced {0:d} components to {1:d} components through by refitting"
            .format(len(comp_type), len(ncomp_type)))
        return (np.array(ncomp_type), np.array(nradec), np.array(nstokes),
                np.array(nspec_coeff), np.array(nref_freq),
                np.array(nlog_spec_ind), np.array(ngaussian_shape))

    if not args.dontcluster:
        (comp_type, radec, stokes, spec_coeff, ref_freq, log_spec_ind,
         gaussian_shape) = __cluster(comp_type, radec, stokes, spec_coeff,
                                     ref_freq, log_spec_ind, gaussian_shape,
                                     frequencies)

    # Add output column if it isn't present
    ms_rows, ms_datatype = ms_preprocess(args)

    # sort out resources
    args.row_chunks, args.model_chunks = get_budget(
        comp_type.shape[0], ms_rows, max([ss.NUM_CHAN.data for ss in spw_ds]),
        max([ss.NUM_CORR.data for ss in pol_ds]), ms_datatype, args)

    radec = da.from_array(radec, chunks=(args.model_chunks, 2))
    stokes = da.from_array(stokes, chunks=(args.model_chunks, 4))

    if np.count_nonzero(comp_type == 'GAUSSIAN') > 0:
        gaussian_components = True
        gshape_chunks = (args.model_chunks, 3)
        gaussian_shape = da.from_array(gaussian_shape, chunks=gshape_chunks)
    else:
        gaussian_components = False

    if args.spectra:
        spec_chunks = (args.model_chunks, spec_coeff.shape[1])
        spec_coeff = da.from_array(spec_coeff, chunks=spec_chunks)
        ref_freq = da.from_array(ref_freq, chunks=(args.model_chunks, ))

    # List of write operations
    writes = []

    # Construct a graph for each DATA_DESC_ID
    for xds in xds_from_ms(args.ms,
                           columns=["UVW", "ANTENNA1", "ANTENNA2", "TIME"],
                           group_cols=["FIELD_ID", "DATA_DESC_ID"],
                           chunks={"row": args.row_chunks}):
        if xds.attrs['FIELD_ID'] != args.fieldid:
            continue

        # Extract frequencies from the spectral window associated
        # with this data descriptor id
        field = field_ds[xds.attrs['FIELD_ID']]
        ddid = ddid_ds[xds.attrs['DATA_DESC_ID']]
        spw = spw_ds[ddid.SPECTRAL_WINDOW_ID.values]
        pol = pol_ds[ddid.POLARIZATION_ID.values]
        frequency = spw.CHAN_FREQ.data

        corrs = pol.NUM_CORR.values

        lm = radec_to_lm(radec, field.PHASE_DIR.data)

        if args.exp_sign_convention == 'casa':
            uvw = -xds.UVW.data
        elif args.exp_sign_convention == 'thompson':
            uvw = xds.UVW.data
        else:
            raise ValueError("Invalid sign convention '%s'" % args.sign)

        if args.spectra:
            # flux density at reference frequency ...
            # ... for logarithmic polynomial functions
            if log_spec_ind:
                Is = da.log(stokes[:, 0, None]) * frequency[None, :]**0
                # ... or for ordinary polynomial functions
            else:
                Is = stokes[:, 0, None] * frequency[None, :]**0
            # additional terms of SED ...
            for jj in range(spec_coeff.shape[1]):
                # ... for logarithmic polynomial functions
                if log_spec_ind:
                    Is += spec_coeff[:, jj, None] * da.log(
                        (frequency[None, :] / ref_freq[:, None])**(jj + 1))
                    # ... or for ordinary polynomial functions
                else:
                    Is += spec_coeff[:, jj, None] * (
                        frequency[None, :] / ref_freq[:, None] - 1)**(jj + 1)
            if log_spec_ind: Is = da.exp(Is)
            Qs = da.zeros_like(Is)
            Us = da.zeros_like(Is)
            Vs = da.zeros_like(Is)
            spectrum = da.stack(
                [Is, Qs, Us, Vs], axis=-1
            )  # stack along new axis and make it the last axis of the new array
            spectrum = spectrum.rechunk(spectrum.chunks[:2] +
                                        (spectrum.shape[2], ))

        log.info('-------------------------------------------')
        log.info('Nr sources        = {0:d}'.format(stokes.shape[0]))
        log.info('-------------------------------------------')
        log.info('stokes.shape      = {0:}'.format(stokes.shape))
        log.info('frequency.shape   = {0:}'.format(frequency.shape))
        if args.spectra: log.info('Is.shape          = {0:}'.format(Is.shape))
        if args.spectra:
            log.info('spectrum.shape    = {0:}'.format(spectrum.shape))

        # (source, row, frequency)
        phase = phase_delay(lm, uvw, frequency)
        # If at least one Gaussian component is present in the component list then all
        # sources are modelled as Gaussian components (Delta components have zero width)
        if gaussian_components:
            phase *= gaussian(uvw, frequency, gaussian_shape)
        # (source, frequency, corr_products)
        brightness = convert(spectrum if args.spectra else stokes,
                             ["I", "Q", "U", "V"], corr_schema(pol))

        log.info('brightness.shape  = {0:}'.format(brightness.shape))
        log.info('phase.shape       = {0:}'.format(phase.shape))
        log.info('-------------------------------------------')
        log.info('Attempting phase-brightness einsum with "{0:s}"'.format(
            einsum_schema(pol, args.spectra)))

        # (source, row, frequency, corr_products)
        jones = da.einsum(einsum_schema(pol, args.spectra), phase, brightness)
        log.info('jones.shape       = {0:}'.format(jones.shape))
        log.info('-------------------------------------------')
        if gaussian_components: log.info('Some Gaussian sources found')
        else: log.info('All sources are Delta functions')
        log.info('-------------------------------------------')

        # Identify time indices
        _, time_index = da.unique(xds.TIME.data, return_inverse=True)

        # Predict visibilities
        vis = predict_vis(time_index, xds.ANTENNA1.data, xds.ANTENNA2.data,
                          None, jones, None, None, None, None)

        # Reshape (2, 2) correlation to shape (4,)
        if corrs == 4:
            vis = vis.reshape(vis.shape[:2] + (4, ))

        # Assign visibilities to MODEL_DATA array on the dataset
        model_data = xr.DataArray(vis, dims=["row", "chan", "corr"])
        xds = xds.assign(**{args.output_column: model_data})
        # Create a write to the table
        write = xds_to_table(xds, args.ms, [args.output_column])
        # Add to the list of writes
        writes.append(write)

    # Submit all graph computations in parallel
    if args.num_workers:
        with ProgressBar(), dask.config.set(num_workers=args.num_workers):
            dask.compute(writes)
    else:
        with ProgressBar():
            dask.compute(writes)
Example #31
0
def create_new_diagnostic_cube(name,
                               units,
                               template_cube,
                               mandatory_attributes,
                               optional_attributes=None,
                               data=None,
                               dtype=np.float32):
    """
    Creates a new diagnostic cube with suitable metadata.

    Args:
        name (str):
            Standard or long name for output cube
        units (str or cf_units.Unit):
            Units for output cube
        template_cube (iris.cube.Cube):
            Cube from which to copy dimensional and auxiliary coordinates
        mandatory_attributes (dict):
            Dictionary containing values for the mandatory attributes
            "title", "source" and "institution".  These are overridden by
            values in the optional_attributes dictionary, if specified.
        optional_attributes (dict or None):
            Dictionary of optional attribute names and values.  If values for
            mandatory attributes are included in this dictionary they override
            the values of mandatory_attributes.
        data (numpy.ndarray or None):
            Data array.  If not set, cube is filled with zeros using a lazy
            data object, as this will be overwritten later by the caller
            routine.
        dtype (numpy.dtype):
            Datatype for dummy cube data if "data" argument is None.

    Returns:
        iris.cube.Cube:
            Cube with correct metadata to accommodate new diagnostic field
    """
    attributes = mandatory_attributes
    if optional_attributes is not None:
        attributes.update(optional_attributes)

    error_msg = ""
    for attr in MANDATORY_ATTRIBUTES:
        if attr not in attributes:
            error_msg += "{} attribute is required\n".format(attr)
    if error_msg:
        raise ValueError(error_msg)

    if data is None:
        data = da.zeros_like(template_cube.core_data(), dtype=dtype)

    aux_coords_and_dims, dim_coords_and_dims = [[
        (coord.copy(), template_cube.coord_dims(coord))
        for coord in getattr(template_cube, coord_type)
    ] for coord_type in ('aux_coords', 'dim_coords')]

    cube = iris.cube.Cube(data,
                          units=units,
                          attributes=attributes,
                          dim_coords_and_dims=dim_coords_and_dims,
                          aux_coords_and_dims=aux_coords_and_dims)
    cube.rename(name)

    return cube