Пример #1
0
def migration(population, total, k, min_density, csx, csy, **kwargs):
    """
    'relocation of a portion of a population from one region to another (migration dispersal)'

    :param da.Array population: population to redistribute
    :param da.Array k: Carrying Capacity - target location weight and distribution
    :param float min_density: Each element may have a minimum possible density when relocating population

    kwargs:
        source_weight da.Array:  Proportion of population to remove from an element. Bounded by `0` and `1`
        target_weight da.Array:  Multiplier for carrying capacity at destination    

    :return: Redistributed population
    """
    if min_density is None:
        min_density = 0

    source_weight = da.clip(
        kwargs.get("source_weight", da.ones_like(population)), 0, 1)
    target_weight = da.clip(
        kwargs.get("target_weight", da.ones_like(population)), 0, 1)

    # Remove using the source weight
    removed = population * source_weight
    new_population = population - removed

    # Weighting is a combination of k and the target weights
    target_locations = k * target_weight

    def redistribute(pop, target, default):
        # Redistribute population proportionally at the target locations
        redistribution = target / target.sum()
        # If there are no target locations, all redistribution values will be inf.
        # In this case, use the default
        return da.where(
            da.isinf(redistribution) | da.isnan(redistribution), default,
            pop * redistribution)

    new_population += redistribute(removed.sum(), target_locations, removed)

    # Remove locations where density is not met
    density_not_met = (target_locations >
                       0) & (new_population / target_locations < min_density)

    removed = da.where(density_not_met, new_population, 0)
    new_population -= removed

    new_population += redistribute(
        removed.sum(), da.where(density_not_met, 0, target_locations), removed)

    return new_population
Пример #2
0
def haversines(x1, x2, y1, y2, z1=None, z2=None):

    x1, x2 = da.deg2rad(x1), da.deg2rad(x2)
    y1, y2 = da.deg2rad(y1), da.deg2rad(y2)

    x = (x2 - x1) * da.cos((y1 + y2) * 0.5) * cst.r_earth
    y = (y2 - y1) * cst.r_earth * da.ones_like(x1) * da.ones_like(x2)

    if z1 is None or z2 is None:
        return da.stack((x, y), axis=-1)
    else:
        z1 = da.where(da.isnan(z1), 0, z1)
        z2 = da.where(da.isnan(z2), 0, z2)
        z = (z2 - z1) * da.ones_like(x)
        return da.stack((x, y, z), axis=-1)
Пример #3
0
def add_intercept(X):
    if np.isnan(np.sum(X.shape)):
        raise NotImplementedError("Can not add intercept to array with "
                                  "unknown chunk shape")
    j, k = X.chunks
    o = da.ones_like(X, shape=(X.shape[0], 1), chunks=(j, 1))
    if is_dask_array_sparse(X):
        o = o.map_blocks(sparse.COO)
    # TODO: Needed this `.rechunk` for the solver to work
    # Is this OK / correct?
    X_i = da.concatenate([X, o], axis=1).rechunk((j, (k[0] + 1, )))
    return X_i
Пример #4
0
def convert_flux_array(da, da_full, dim, top=True, fillval=0):
    dummy = xr.DataArray(ones_like(da_full.data) * fillval,
                         coords=da_full.coords,
                         dims=da_full.dims)
    if top:
        da.coords[dim] = da_full[dim][0]
        dummy_cut = dummy[{dim: slice(1, None)}]
        out = xr.concat([da, dummy_cut], dim=dim)
    else:
        da.coords[dim] = da_full[dim][-1]
        dummy_cut = dummy[{dim: slice(0, -1)}]
        out = xr.concat([dummy_cut, da], dim=dim)
    return out
Пример #5
0
def get_unitcell(result_dict, length):
    """
    Makes an  py:class:`dask.array` for the unitcell information if it can be
    loaded from the fileformat, otherwise returns None.

    Parameters
    ----------
    result_dict: dict of :py:class:`dask.delayed` objects
        dict of delayed objects where we make the time from  grab  into an dask
        array.
    lenght : int
        total length of the final dask array.

    Returns
    -------
    (unitcell_lengths, unitcell_angles, unitcell_vectors)
        dask array from the delayed objects for each term if it can be loaded,
        None otherwise.
    """

    # TODO add ensure type on these lengths
    unitcell_lengths = result_dict.pop("unitcell_lengths", None)
    unitcell_angles = result_dict.pop("unitcell_angles", None)
    unitcell_vectors = result_dict.pop("unitcell_vectors", None)
    if (unitcell_lengths is None and unitcell_angles is None
            and unitcell_vectors is None):
        ul = None
        ua = None
        uv = None
        return ul, ua, uv

    # Now if there is info to be loaded
    if unitcell_vectors is not None:
        uv = make_da(unitcell_vectors, length)
    else:
        uv = None

    if unitcell_lengths is not None and unitcell_angles is None:
        ul = make_da(unitcell_lengths, length)
        ua = da.ones_like(ul)
    elif unitcell_angles is not None:
        ul = make_da(unitcell_vectors, length)
        ua = make_da(unitcell_angles, length)
    else:
        ul = None
        ua = None

    return ul, ua, uv
Пример #6
0
def scatter_with_regression(
    x: da.Array,
    y: da.Array,
    sample_size: int,
    k: Optional[int] = None
) -> Tuple[Tuple[da.Array, da.Array], Tuple[da.Array, da.Array],
           Optional[da.Array]]:
    """Calculate pearson correlation on 2 given arrays.

    Parameters
    ----------
    xarr : da.Array
    yarr : da.Array
    sample_size : int
    k : Optional[int] = None
        Highlight k points which influence pearson correlation most
    """
    if k == 0:
        raise ValueError("k should be larger than 0")

    xp1 = da.vstack([x, da.ones_like(x)]).T
    xp1 = xp1.rechunk((xp1.chunks[0], -1))

    mask = ~(da.isnan(x) | da.isnan(y))
    # if chunk size in the first dimension is 1, lstsq will use sfqr instead of tsqr,
    # where the former does not support nan in shape.

    if len(xp1.chunks[0]) == 1:
        xp1 = xp1.rechunk((2, -1))
        y = y.rechunk((2, -1))
        mask = mask.rechunk((2, -1))

    (coeffa, coeffb), _, _, _ = da.linalg.lstsq(xp1[mask], y[mask])

    if sample_size < x.shape[0]:
        samplesel = da.random.choice(x.shape[0],
                                     int(sample_size),
                                     chunks=x.chunksize)
        x = x[samplesel]
        y = y[samplesel]

    if k is None:
        return (coeffa, coeffb), (x, y), None

    influences = pearson_influence(x, y)
    return (coeffa, coeffb), (x, y), influences
Пример #7
0
def _mask_array(dask_array, mask_array, fill_value=None):
    """Mask two last dimensions in a dask array.

    Parameters
    ----------
    dask_array : Dask array
    mask_array : NumPy array
        Array with bool values. The True values will be masked
        (i.e. ignored). Must have the same shape as the two
        last dimensions in dask_array.
    fill_value : scalar, optional

    Returns
    -------
    dask_array_masked : masked Dask array

    Examples
    --------
    >>> import dask.array as da
    >>> import pyxem.utils.dask_tools as dt
    >>> data = da.random.random(
    ...     size=(32, 32, 128, 128), chunks=(16, 16, 128, 128))
    >>> mask_array = np.ones(shape=(128, 128), dtype=bool)
    >>> mask_array[64-10:64+10, 64-10:64+10] = False
    >>> output_dask = dt._mask_array(data, mask_array=mask_array)
    >>> output = output_dask.compute()

    With fill value specified

    >>> output_dask = dt._mask_array(
    ...     data, mask_array=mask_array, fill_value=0.0)
    >>> output = output_dask.compute()

    """
    if not dask_array.shape[-2:] == mask_array.shape:
        raise ValueError(
            "mask_array ({0}) and last two dimensions in the "
            "dask_array ({1}) need to have the same shape.".format(
                mask_array.shape, dask_array.shape[-2:]))
    mask_array_4d = da.ones_like(dask_array, dtype=np.bool)
    mask_array_4d = mask_array_4d[:, :] * mask_array
    dask_array_masked = da.ma.masked_array(dask_array,
                                           mask_array_4d,
                                           fill_value=fill_value)
    return dask_array_masked
Пример #8
0
def scatter_with_regression(
    xarr: da.Array,
    yarr: da.Array,
    sample_size: int,
    k: Optional[int] = None
) -> Tuple[Tuple[float, float], dd.DataFrame, Optional[np.ndarray]]:
    """
    Calculate pearson correlation on 2 given arrays.

    Parameters
    ----------
    xarr : da.Array
    yarr : da.Array
    sample_size : int
    k : Optional[int] = None
        Highlight k points which influence pearson correlation most

    Returns
    -------
    Intermediate
    """
    if k == 0:
        raise ValueError("k should be larger than 0")

    mask = ~(da.isnan(xarr) | da.isnan(yarr))
    xarr = da.from_array(np.array(xarr)[mask])
    yarr = da.from_array(np.array(yarr)[mask])
    xarrp1 = da.vstack([xarr, da.ones_like(xarr)]).T
    xarrp1 = xarrp1.rechunk((xarrp1.chunks[0], -1))
    (coeffa, coeffb), _, _, _ = da.linalg.lstsq(xarrp1, yarr)

    if sample_size < len(xarr):
        samplesel = np.random.choice(len(xarr), int(sample_size))
        xarr = xarr[samplesel]
        yarr = yarr[samplesel]

    df = dd.concat([dd.from_dask_array(arr) for arr in [xarr, yarr]], axis=1)
    df.columns = ["x", "y"]

    if k is None:
        return (coeffa, coeffb), df, None

    influences = pearson_influence(xarr, yarr)
    return (coeffa, coeffb), df, influences
Пример #9
0
def get_unitcell(result_dict, length):
    # TODO add ensure type on these lengths
    unitcell_lengths = result_dict.pop('unitcell_lengths', None)
    unitcell_angles = result_dict.pop('unitcell_angles', None)
    unitcell_vectors = result_dict.pop('unitcell_vectors', None)
    if (unitcell_lengths is None and unitcell_angles is None
            and unitcell_vectors is None):
        ul = None
        ua = None
        uv = None
    elif unitcell_vectors is not None:
        ul = None
        ua = None
        uv = make_da(unitcell_vectors, length)
        return None, None, uv
    elif unitcell_lengths is not None and unitcell_angles is None:
        ul = make_da(unitcell_vectors, length)
        ua = da.ones_like(ul)
        uv = None
    else:
        ul = make_da(unitcell_vectors, length)
        ua = make_da(unitcell_angles, length)
        uv = None
    return ul, ua, uv
    def convert_to_dataarray(self, data, as_list=False):
        original_type_was_number = True

        if isinstance(data, xr.DataArray):
            return False, data

        if as_list:
            model = None
            for element in data:
                if isinstance(element, xr.DataArray):
                    model = element
                    original_type_was_number = False
                    break

            for i, element in enumerate(data):
                if isinstance(element, (int, float, type(None))):
                    ######################################################################
                    # This is an inefficient hotfix to handle mixed lists of numbers and
                    # DataArrays in processes such as sum, subtract, multiply, divide.
                    if model is not None:
                        new_data = element * da.ones_like(model, chunks=1000)
                        number_array = model.copy(data=new_data)
                        data[i] = number_array
                    ######################################################################
                    else:
                        data[i] = xr.DataArray(
                            np.array(element, dtype=np.float))
                elif not isinstance(element, xr.DataArray):
                    raise ProcessArgumentInvalid(
                        "The argument 'data' in process '{}' is invalid: Elements of the array must be of types '[number, null, raster-cube]'."
                        .format(self.process_id))

        else:
            data = xr.DataArray(np.array(data, dtype=np.float))

        return original_type_was_number, data
Пример #11
0
def regenie_transform(
    G: ArrayLike,
    X: ArrayLike,
    Y: ArrayLike,
    contigs: ArrayLike,
    *,
    variant_block_size: Optional[Union[int, Tuple[int, ...]]] = None,
    sample_block_size: Optional[Union[int, Tuple[int, ...]]] = None,
    alphas: Optional[ArrayLike] = None,
    add_intercept: bool = True,
    orthogonalize: bool = False,
    normalize: bool = False,
    _glow_adj_dof: bool = False,
    _glow_adj_alpha: bool = False,
    _glow_adj_scaling: bool = False,
) -> Dataset:
    """Regenie trait transformation.

    Parameters
    ----------
    G
        [array-like, shape: (M, N)]
        Genotype data array, `M` samples by `N` variants.
    X
        [array-like, shape: (M, C)]
        Covariate array, `M` samples by `C` covariates.
    Y
        [array-like, shape: (M, O)]
        Outcome array, `M` samples by `O` outcomes.
    contigs
        [array-like, shape: (N,)]
        Variant contigs as monotonic increasting integer contig index.

    See the `regenie` function for documentation on remaining fields.

    Returns
    -------
    A dataset containing the following variables:

    - `regenie_base_prediction` (blocks, alphas, samples, outcomes): Stage 1
        predictions from ridge regression reduction .
    - `regenie_meta_prediction` (samples, outcomes): Stage 2 predictions from
        the best meta estimator trained on the out-of-sample Stage 1
        predictions.
    - `regenie_loco_prediction` (contigs, samples, outcomes): LOCO predictions
        resulting from Stage 2 predictions ignoring effects for variant
        blocks on held out contigs. This will be absent if the
        data provided does not contain at least 2 contigs.

    Raises
    ------
    ValueError
        If `G`, `X`, and `Y` do not have the same size along
        the first (samples) dimension.
    """
    if not G.shape[0] == X.shape[0] == Y.shape[0]:
        raise ValueError(
            "All data arrays must have same size along first (samples) dimension "
            f"(shapes provided: G={G.shape}, X={X.shape}, Y={Y.shape})"
        )
    n_sample = Y.shape[0]
    n_variant = G.shape[1]

    if alphas is not None:
        alphas = np.asarray(alphas, like=G)

    G, X, Y = da.asarray(G), da.asarray(X), da.asarray(Y)
    contigs = da.asarray(contigs)

    # Set default block sizes if not provided
    if variant_block_size is None:
        # Block in groups of 1000, unless dataset is small
        # enough to default to 2 blocks (typically for tests)
        variant_block_size = min(1000, n_variant // 2)
    if sample_block_size is None:
        # Break into 10 chunks of approximately equal size
        sample_block_size = tuple(split_array_chunks(n_sample, min(10, n_sample)))
        assert sum(sample_block_size) == n_sample

    if normalize:
        # See: https://github.com/projectglow/glow/issues/255
        dof = 1 if _glow_adj_dof else 0
        G = (G - G.mean(axis=0)) / G.std(axis=0, ddof=dof)
        Y = (Y - Y.mean(axis=0)) / Y.std(axis=0)
        X = (X - X.mean(axis=0)) / X.std(axis=0)

    if add_intercept:
        X = da.concatenate(
            [da.ones_like(X, shape=(X.shape[0], 1), dtype=X.dtype), X], axis=1
        )

    # TODO: Test this after finding out whether or not there was a good reason
    # it was precluded in glow by unit covariate regularization:
    # https://github.com/projectglow/glow/issues/266
    if orthogonalize:  # pragma: no cover
        G = G - X @ da.linalg.lstsq(X, G)[0]
        Y = Y - X @ da.linalg.lstsq(X, Y)[0]
        G = G / G.std(axis=0)
        Y = Y / Y.std(axis=0)
        X = da.zeros(shape=(n_sample, 0), dtype=G.dtype)

    # The output of _variant_block_indexes is better suited as a NumPy array,
    # since it will be downcast to tuple. Additionally, since it's not
    # computationally intensive and cumbersome to implement it to be
    # CuPy-compatible, we map a CuPy-backed Dask array contigs to NumPy backend.
    variant_chunk_start, variant_chunk_size = _variant_block_indexes(
        variant_block_size, map_blocks_asnumpy(contigs)
    )
    G = G.rechunk(chunks=(sample_block_size, tuple(variant_chunk_size)))
    X = X.rechunk(chunks=(sample_block_size, -1))
    Y = Y.rechunk(chunks=(sample_block_size, -1))

    YP1 = _stage_1(G, X, Y, alphas=alphas)
    B2, YP2 = _stage_2(
        YP1,
        X,
        Y,
        alphas=alphas,
        _glow_adj_alpha=_glow_adj_alpha,
        _glow_adj_scaling=_glow_adj_scaling,
    )
    YP3 = _stage_3(B2, YP1, X, Y, contigs, variant_chunk_start)

    data_vars: Dict[Hashable, Any] = {}
    data_vars[variables.regenie_base_prediction] = xr.DataArray(
        YP1,
        dims=("blocks", "alphas", "samples", "outcomes"),
        attrs={"description": DESC_BASE_PRED},
    )
    data_vars[variables.regenie_meta_prediction] = xr.DataArray(
        YP2, dims=("samples", "outcomes"), attrs={"description": DESC_META_PRED}
    )
    if YP3 is not None:
        data_vars[variables.regenie_loco_prediction] = xr.DataArray(
            YP3,
            dims=("contigs", "samples", "outcomes"),
            attrs={"description": DESC_LOCO_PRED},
        )
    return create_dataset(data_vars)
Пример #12
0
functions = [
    lambda x: x,
    lambda x: da.expm1(x),
    lambda x: 2 * x,
    lambda x: x / 2,
    lambda x: x ** 2,
    lambda x: x + x,
    lambda x: x * x,
    lambda x: x[0],
    lambda x: x[:, 1],
    lambda x: x[:1, None, 1:3],
    lambda x: x.T,
    lambda x: da.transpose(x, (1, 2, 0)),
    lambda x: x.sum(),
    lambda x: da.empty_like(x),
    lambda x: da.ones_like(x),
    lambda x: da.zeros_like(x),
    lambda x: da.full_like(x, 5),
    pytest.param(
        lambda x: x.mean(),
        marks=pytest.mark.skipif(
            not IS_NEP18_ACTIVE or cupy.__version__ < LooseVersion("6.4.0"),
            reason="NEP-18 support is not available in NumPy or CuPy older than "
            "6.4.0 (requires https://github.com/cupy/cupy/pull/2418)",
        ),
    ),
    pytest.param(
        lambda x: x.moment(order=0),
    ),
    lambda x: x.moment(order=2),
    pytest.param(
Пример #13
0
def test_setitem_errs():
    x = da.ones_like(cupy.array(()), shape=(4, 4), chunks=(2, 2))

    with pytest.raises(ValueError):
        x[x > 1] = x

    # Shape mismatch
    with pytest.raises(ValueError):
        x[[True, True, False, False], 0] = [2, 3, 4]

    with pytest.raises(ValueError):
        x[[True, True, True, False], 0] = [2, 3]

    with pytest.raises(ValueError):
        x[0, [True, True, True, False]] = [2, 3]

    with pytest.raises(ValueError):
        x[0, [True, True, True, False]] = [1, 2, 3, 4, 5]

    with pytest.raises(ValueError):
        x[da.from_array([True, True, True, False]), 0] = [1, 2, 3, 4, 5]

    with pytest.raises(ValueError):
        x[0, da.from_array([True, False, False, True])] = [1, 2, 3, 4, 5]

    with pytest.raises(ValueError):
        x[:, 0] = [2, 3, 4]

    with pytest.raises(ValueError):
        x[0, :] = [1, 2, 3, 4, 5]

    x = da.ones((4, 4), chunks=(2, 2))

    # Too many indices
    with pytest.raises(IndexError):
        x[:, :, :] = 2

    # 2-d boolean indexing a single dimension
    with pytest.raises(IndexError):
        x[[[True, True, False, False]], 0] = 5

    # Too many/not enough booleans
    with pytest.raises(IndexError):
        x[[True, True, False]] = 5

    with pytest.raises(IndexError):
        x[[False, True, True, True, False]] = 5

    # 2-d indexing a single dimension
    with pytest.raises(IndexError):
        x[[[1, 2, 3]], 0] = 5

    # Multiple 1-d boolean/integer arrays
    with pytest.raises(NotImplementedError):
        x[[1, 2], [2, 3]] = 6

    with pytest.raises(NotImplementedError):
        x[[True, True, False, False], [2, 3]] = 5

    with pytest.raises(NotImplementedError):
        x[[True, True, False, False], [False, True, False, False]] = 7

    # scalar boolean indexing
    with pytest.raises(NotImplementedError):
        x[True] = 5

    with pytest.raises(NotImplementedError):
        x[cupy.array(True)] = 5

    with pytest.raises(NotImplementedError):
        x[0, da.from_array(True)] = 5

    # Scalar arrays
    y = da.from_array(cupy.array(1))
    with pytest.raises(IndexError):
        y[:] = 2

    # RHS has non-brodacastable extra leading dimensions
    x = cupy.arange(12).reshape((3, 4))
    dx = da.from_array(x, chunks=(2, 2))
    with pytest.raises(ValueError):
        dx[...] = cupy.arange(24).reshape((2, 1, 3, 4))

    # RHS has extra leading size 1 dimensions compared to LHS
    x = cupy.arange(12).reshape((3, 4))
    dx = da.from_array(x, chunks=(2, 3))
Пример #14
0
def warp_all(data, coords, dat_type, plot_me=False):
    ''' 
    NOT CURRENTLY USED
    
    apply transformation to full 4d data set 
    
    Parameters
    ----------
    
    data : inpud 4d data set (dask array)
    coords : 4D coordinates
    plot_me : boolean
    
    
    Returns
    -------
    dat_temp : warped 4D data 
    '''
    #correct for distortions in all diff patterns
    #working for 4D data ONLY
    coords = da.array(coords)
    #dat_type = 'float32'
    shape_4d = data.shape  #test_dp.shape
    #shape_3d = #test_dp.shape
    #get rid of hot px
    data[data > 20 * data.mean()] = data.mean()

    co_ords_shape = list(shape_4d)
    co_ords_shape.insert(0, 1)
    co_ords_shape = tuple(co_ords_shape)

    co_ords3d_a = da.stack([coords[0] for i in range(shape_4d[0])], axis=2)
    co_ords3d_a = da.stack([co_ords3d_a for i in range(shape_4d[0])], axis=3)
    co_ords3d_a = co_ords3d_a.T

    co_ords3d_a = co_ords3d_a.reshape(co_ords_shape)
    print(type(co_ords3d_a))
    co_ords3d_b = da.stack([coords[1] for i in range(shape_4d[0])], axis=2)
    co_ords3d_b = da.stack([co_ords3d_b for i in range(shape_4d[0])], axis=3)
    co_ords3d_b = co_ords3d_b.T

    co_ords3d_b = co_ords3d_b.reshape(co_ords_shape)

    z = np.arange(shape_4d[0], dtype=dat_type)
    Z = da.ones_like(co_ords3d_a, dtype=dat_type)  #(shape = (100, 128, 128))
    Z = Z * z[None, :, None, None, None]

    z2 = np.arange(shape_4d[1], dtype=dat_type)
    Z2 = da.ones_like(co_ords3d_a, dtype=dat_type)  #(shape = (100, 128, 128))
    Z2 = Z2 * z2[None, None, :, None, None]

    co_ords3d = da.concatenate((Z, Z2), axis=0)
    co_ords3d = da.concatenate((co_ords3d, co_ords3d_a), axis=0)
    co_ords3d = da.concatenate((co_ords3d, co_ords3d_b), axis=0)
    print(type(co_ords3d_a), type(co_ords3d_b), type(co_ords3d), type(Z),
          type(Z2))
    #co_ords3d = co_ords3d.compute()
    print(co_ords3d.shape, co_ords3d.dtype)
    t1 = time.time()
    #dat_temp = tf.warp(data,  inverse_map = co_ords3d , order =1, preserve_range  = True)
    kwords_dict = {
        'inverse_map': co_ords3d,
        #'coordinates': co_ords3d,
        'order': 1,
        #'dtype': dat_type
        #'preserve_range': True
    }
    co_ords3d = co_ords3d.compute()
    data = data.compute()
    dat_temp = sk_par(tf.warp, data, extra_keywords=kwords_dict)
    #dat_temp = sk_par(map_coordinates, data, extra_keywords=kwords_dict)#tf.warp(data, inverse_map = co_ords3d)
    print('time : ', time.time() - t1)
    im_num = 5, 5
    if plot_me == True:
        fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(8, 8))

        ax1.imshow(dat_temp[im_num[0], im_num[1], :, :])
        ax2.imshow(data[im_num[0], im_num[1], :, :])

    #print('dat : ', dat_temp[im_num[0],im_num[1], :, : ].min(), dat_temp[im_num[0],im_num[1], :, : ].max(), dat_temp[im_num[0],im_num[1], :, : ].mean(), sc.ndimage.measurements.center_of_mass(dat_temp[im_num[0],im_num[1], :, : ]))
    #print('dat : ', data[im_num[0],im_num[1], :, : ].min(), data[im_num[0],im_num[1], :, : ].max(), data[im_num[0],im_num[1], :, : ].mean(), sc.ndimage.measurements.center_of_mass(data[im_num[0],im_num[1], :, : ]))

    return dat_temp, co_ords3d
Пример #15
0
    def volume_curvature(self,
                         darray_il,
                         darray_xl,
                         dip_factor=10,
                         kernel=(3, 3, 3),
                         preview=None):
        """
        Description
        -----------
        Compute volume curvature attributes from 3D seismic dips
        
        Parameters
        ----------
        darray_il : Array-like, Inline dip - acceptable inputs include 
            Numpy, HDF5, or Dask Arrays
        darray_xl : Array-like, Crossline dip - acceptable inputs include 
            Numpy, HDF5, or Dask Arrays
        
        Keywork Arguments
        -----------------  
        dip_factor : Number, scalar for dip values
        kernel : tuple (len 3), operator size
        preview : str, enables or disables preview mode and specifies direction
            Acceptable inputs are (None, 'inline', 'xline', 'z')
            Optimizes chunk size in different orientations to facilitate rapid
            screening of algorithm output
        
        Returns
        -------
        H, K, Kmax, Kmin, KMPos, KMNeg : Dask Array, {H : 'Mean Curvature', 
                                                      K : 'Gaussian Curvature',
                                                      Kmax : 'Max Curvature',
                                                      Kmin : 'Min Curvature',
                                                      KMPos : Most Positive Curvature,
                                                      KMNeg : Most Negative Curvature}
        """

        np.seterr(all='ignore')

        # Generate Dask Array as necessary
        darray_il, chunks_init = self.create_array(darray_il,
                                                   kernel,
                                                   preview=preview)
        darray_xl, chunks_init = self.create_array(darray_xl,
                                                   kernel,
                                                   preview=preview)

        u = -darray_il / dip_factor
        v = -darray_xl / dip_factor
        w = da.ones_like(u, chunks=u.chunks)

        # Compute Gradients
        ux = sp().first_derivative(u, axis=0)
        uy = sp().first_derivative(u, axis=1)
        uz = sp().first_derivative(u, axis=2)
        vx = sp().first_derivative(v, axis=0)
        vy = sp().first_derivative(v, axis=1)
        vz = sp().first_derivative(v, axis=2)

        # Smooth Gradients
        ux = ux.map_blocks(ndi.uniform_filter, size=kernel, dtype=ux.dtype)
        uy = uy.map_blocks(ndi.uniform_filter, size=kernel, dtype=ux.dtype)
        uz = uz.map_blocks(ndi.uniform_filter, size=kernel, dtype=ux.dtype)
        vx = vx.map_blocks(ndi.uniform_filter, size=kernel, dtype=ux.dtype)
        vy = vy.map_blocks(ndi.uniform_filter, size=kernel, dtype=ux.dtype)
        vz = vz.map_blocks(ndi.uniform_filter, size=kernel, dtype=ux.dtype)

        u = util.trim_dask_array(u, kernel)
        v = util.trim_dask_array(v, kernel)
        w = util.trim_dask_array(w, kernel)
        ux = util.trim_dask_array(ux, kernel)
        uy = util.trim_dask_array(uy, kernel)
        uz = util.trim_dask_array(uz, kernel)
        vx = util.trim_dask_array(vx, kernel)
        vy = util.trim_dask_array(vy, kernel)
        vz = util.trim_dask_array(vz, kernel)

        wx = da.zeros_like(ux, chunks=ux.chunks, dtype=ux.dtype)
        wy = da.zeros_like(ux, chunks=ux.chunks, dtype=ux.dtype)
        wz = da.zeros_like(ux, chunks=ux.chunks, dtype=ux.dtype)

        uv = u * v
        vw = v * w
        u2 = u * u
        v2 = v * v
        w2 = w * w
        u2pv2 = u2 + v2
        v2pw2 = v2 + w2
        s = da.sqrt(u2pv2 + w2)

        # Measures of surfaces
        E = da.ones_like(u, chunks=u.chunks, dtype=u.dtype)
        F = -(u * w) / (da.sqrt(u2pv2) * da.sqrt(v2pw2))
        G = da.ones_like(u, chunks=u.chunks, dtype=u.dtype)
        D = -(-uv * vx + u2 * vy + v2 * ux - uv * uy) / (u2pv2 * s)
        Di = -(vw * (uy + vx) - 2 * u * w * vy - v2 * (uz + wx) + uv *
               (vz + wy)) / (2 * da.sqrt(u2pv2) * da.sqrt(v2pw2) * s)
        Dii = -(-vw * wy + v2 * wz + w2 * vy - vw * vz) / (v2pw2 * s)
        H = (E * Dii - 2 * F * Di + G * D) / (2 * (E * G - F * F))
        K = (D * Dii - Di * Di) / (E * G - F * F)
        Kmin = H - da.sqrt(H * H - K)
        Kmax = H + da.sqrt(H * H - K)

        H[da.isnan(H)] = 0
        K[da.isnan(K)] = 0
        Kmax[da.isnan(Kmax)] = 0
        Kmin[da.isnan(Kmin)] = 0

        KMPos = da.maximum(Kmax, Kmin)
        KMNeg = da.minimum(Kmax, Kmin)

        return (H, K, Kmax, Kmin, KMPos, KMNeg)
Пример #16
0
    wmax = Q.getcol("WMAX").item()

if args.cell_size:
    cell_size = args.cell_size
else:
    cell_size = estimate_cell_size(umax,
                                   vmax,
                                   wavelength,
                                   factor=3,
                                   ny=args.npix,
                                   nx=args.npix).max()

# Convolution Filter
conv_filter = convolution_filter(3, 63, "kaiser-bessel")

natural_weights = da.ones_like(xds.DATA.data, dtype=np.float64)

dirty = grid(xds.DATA.data,
             xds.UVW.data,
             xds.FLAG.data,
             natural_weights,
             wavelength,
             conv_filter,
             cell_size,
             ny=args.npix,
             nx=args.npix)

ncorr = dirty.shape[2]

# FFT each polarisation and then restack
fft_shifts = [da.fft.ifftshift(dirty[:, :, p]) for p in range(ncorr)]