예제 #1
0
def test_diag():
    v = np.arange(11)
    assert_eq(da.diag(v), np.diag(v))

    v = da.arange(11, chunks=3)
    darr = da.diag(v)
    nparr = np.diag(v)
    assert_eq(darr, nparr)
    assert sorted(da.diag(v).dask) == sorted(da.diag(v).dask)

    v = v + v + 3
    darr = da.diag(v)
    nparr = np.diag(v)
    assert_eq(darr, nparr)

    v = da.arange(11, chunks=11)
    darr = da.diag(v)
    nparr = np.diag(v)
    assert_eq(darr, nparr)
    assert sorted(da.diag(v).dask) == sorted(da.diag(v).dask)

    x = np.arange(64).reshape((8, 8))
    assert_eq(da.diag(x), np.diag(x))

    d = da.from_array(x, chunks=(4, 4))
    assert_eq(da.diag(d), np.diag(x))
예제 #2
0
def test_arange():
    darr = da.arange(77, chunks=13)
    nparr = np.arange(77)
    eq(darr, nparr)

    darr = da.arange(2, 13, chunks=5)
    nparr = np.arange(2, 13)
    eq(darr, nparr)

    darr = da.arange(4, 21, 9, chunks=13)
    nparr = np.arange(4, 21, 9)
    eq(darr, nparr)

    # negative steps
    darr = da.arange(53, 5, -3, chunks=5)
    nparr = np.arange(53, 5, -3)
    eq(darr, nparr)

    darr = da.arange(77, chunks=13, dtype=float)
    nparr = np.arange(77, dtype=float)
    eq(darr, nparr)

    darr = da.arange(2, 13, chunks=5, dtype=int)
    nparr = np.arange(2, 13, dtype=int)
    eq(darr, nparr)
    assert sorted(da.arange(2, 13, chunks=5).dask) ==\
           sorted(da.arange(2, 13, chunks=5).dask)
    assert sorted(da.arange(77, chunks=13, dtype=float).dask) ==\
           sorted(da.arange(77, chunks=13, dtype=float).dask)
예제 #3
0
 def test_expand_dims(self):
     from satpy.resample import NativeResampler
     import numpy as np
     import dask.array as da
     from xarray import DataArray
     from pyresample.geometry import AreaDefinition
     from pyresample.utils import proj4_str_to_dict
     ds1 = DataArray(da.zeros((100, 50), chunks=85), dims=('y', 'x'),
                     coords={'y': da.arange(100, chunks=85),
                             'x': da.arange(50, chunks=85)})
     proj_dict = proj4_str_to_dict('+proj=lcc +datum=WGS84 +ellps=WGS84 '
                                   '+lon_0=-95. +lat_0=25 +lat_1=25 '
                                   '+units=m +no_defs')
     target = AreaDefinition(
         'test',
         'test',
         'test',
         proj_dict,
         x_size=100,
         y_size=200,
         area_extent=(-1000., -1500., 1000., 1500.),
     )
     # source geo def doesn't actually matter
     resampler = NativeResampler(None, target)
     new_arr = resampler.resample(ds1)
     self.assertEqual(new_arr.shape, (200, 100))
     new_arr2 = resampler.resample(ds1.compute())
     self.assertTrue(np.all(new_arr == new_arr2))
예제 #4
0
    def interpolate_angles(self, angles, resolution):
        # FIXME: interpolate in cartesian coordinates if the lons or lats are
        # problematic
        from geotiepoints.multilinear import MultilinearInterpolator

        geocoding = self.root.find('.//Tile_Geocoding')
        rows = int(geocoding.find('Size[@resolution="' + str(resolution) + '"]/NROWS').text)
        cols = int(geocoding.find('Size[@resolution="' + str(resolution) + '"]/NCOLS').text)

        smin = [0, 0]
        smax = np.array(angles.shape) - 1
        orders = angles.shape
        minterp = MultilinearInterpolator(smin, smax, orders)
        minterp.set_values(da.atleast_2d(angles.ravel()))

        def _do_interp(minterp, xcoord, ycoord):
            interp_points2 = np.vstack((xcoord.ravel(),
                                        ycoord.ravel()))
            res = minterp(interp_points2)
            return res.reshape(xcoord.shape)

        x = da.arange(rows, dtype=angles.dtype, chunks=CHUNK_SIZE) / (rows-1) * (angles.shape[0] - 1)
        y = da.arange(cols, dtype=angles.dtype, chunks=CHUNK_SIZE) / (cols-1) * (angles.shape[1] - 1)
        xcoord, ycoord = da.meshgrid(x, y)
        return da.map_blocks(_do_interp, minterp, xcoord, ycoord, dtype=angles.dtype,
                             chunks=xcoord.chunks)
예제 #5
0
    def geo_mask(self):
        """Masking the space pixels from geometry info."""
        cfac = np.uint32(self.proj_info['CFAC'])
        lfac = np.uint32(self.proj_info['LFAC'])
        coff = np.float32(self.proj_info['COFF'])
        loff = np.float32(self.proj_info['LOFF'])
        nlines = int(self.data_info['number_of_lines'])
        ncols = int(self.data_info['number_of_columns'])

        # count starts at 1
        local_coff = 1
        local_loff = (self.total_segments - self.segment_number) * nlines + 1

        xmax, ymax = get_geostationary_angle_extent(self.area)

        pixel_cmax = np.rad2deg(xmax) * cfac * 1.0 / 2**16
        pixel_lmax = np.rad2deg(ymax) * lfac * 1.0 / 2**16

        def ellipse(line, col):
            return ((line / pixel_lmax) ** 2) + ((col / pixel_cmax) ** 2) <= 1

        cols_idx = da.arange(-(coff - local_coff),
                             ncols - (coff - local_coff),
                             dtype=np.float, chunks=CHUNK_SIZE)
        lines_idx = da.arange(nlines - (loff - local_loff),
                              -(loff - local_loff),
                              -1,
                              dtype=np.float, chunks=CHUNK_SIZE)
        return ellipse(lines_idx[:, None], cols_idx[None, :])
예제 #6
0
def test_arange_float_step():
    darr = da.arange(2., 13., .3, chunks=4)
    nparr = np.arange(2., 13., .3)
    eq(darr, nparr)

    darr = da.arange(7.7, 1.5, -.8, chunks=3)
    nparr = np.arange(7.7, 1.5, -.8)
    eq(darr, nparr)
예제 #7
0
def test_arange_float_step():
    darr = da.arange(2., 13., .3, blocksize=4)
    nparr = np.arange(2., 13., .3)
    eq(darr, nparr)

    darr = da.arange(7.7, 1.5, -.8, blocksize=3)
    nparr = np.arange(7.7, 1.5, -.8)
    eq(darr, nparr)
예제 #8
0
def test_diag():
    v = da.arange(11, chunks=3)
    darr = da.diag(v)
    nparr = np.diag(v)
    eq(darr, nparr)

    v = v + v + 3
    darr = da.diag(v)
    nparr = np.diag(v)
    eq(darr, nparr)

    v = da.arange(11, chunks=11)
    darr = da.diag(v)
    nparr = np.diag(v)
    eq(darr, nparr)
예제 #9
0
    def test_enhance_with_sensor_entry(self):
        """Test enhancing an image with a configuration section."""
        from satpy.writers import Enhancer, get_enhanced_image
        from xarray import DataArray
        import dask.array as da
        ds = DataArray(np.arange(1, 11.).reshape((2, 5)),
                       attrs=dict(name='test1', sensor='test_sensor', mode='L'),
                       dims=['y', 'x'])
        e = Enhancer()
        self.assertIsNotNone(e.enhancement_tree)
        img = get_enhanced_image(ds, enhancer=e)
        self.assertSetEqual(
            set(e.sensor_enhancement_configs),
            {self.ENH_FN, self.ENH_ENH_FN})
        np.testing.assert_almost_equal(img.data.isel(bands=0).max().values,
                                       1.)

        ds = DataArray(da.arange(1, 11., chunks=5).reshape((2, 5)),
                       attrs=dict(name='test1', sensor='test_sensor', mode='L'),
                       dims=['y', 'x'])
        e = Enhancer()
        self.assertIsNotNone(e.enhancement_tree)
        img = get_enhanced_image(ds, enhancer=e)
        self.assertSetEqual(set(e.sensor_enhancement_configs),
                            {self.ENH_FN, self.ENH_ENH_FN})
        np.testing.assert_almost_equal(img.data.isel(bands=0).max().values, 1.)
예제 #10
0
def test_arange_working_float_step():
    """Sometimes floating point step arguments work, but this could be platform
    dependent.
    """
    darr = da.arange(3.3, -9.1, -.25, chunks=3)
    nparr = np.arange(3.3, -9.1, -.25)
    eq(darr, nparr)
예제 #11
0
def test_arange_float_step():
    darr = da.arange(2., 13., .3, chunks=4)
    nparr = np.arange(2., 13., .3)
    assert_eq(darr, nparr)

    darr = da.arange(7.7, 1.5, -.8, chunks=3)
    nparr = np.arange(7.7, 1.5, -.8)
    assert_eq(darr, nparr)

    darr = da.arange(0, 1, 0.01, chunks=20)
    nparr = np.arange(0, 1, 0.01)
    assert_eq(darr, nparr)

    darr = da.arange(0, 1, 0.03, chunks=20)
    nparr = np.arange(0, 1, 0.03)
    assert_eq(darr, nparr)
예제 #12
0
def test_repeat():
    x = np.random.random((10, 11, 13))
    d = da.from_array(x, chunks=(4, 5, 3))

    repeats = [1, 2, 5]
    axes = [-3, -2, -1, 0, 1, 2]

    for r in repeats:
        for a in axes:
            assert_eq(x.repeat(r, axis=a), d.repeat(r, axis=a))

    assert_eq(d.repeat(2, 0), da.repeat(d, 2, 0))

    with pytest.raises(NotImplementedError):
        da.repeat(d, np.arange(10))

    with pytest.raises(NotImplementedError):
        da.repeat(d, 2, None)

    with pytest.raises(NotImplementedError):
        da.repeat(d, 2)

    for invalid_axis in [3, -4]:
        with pytest.raises(ValueError):
            da.repeat(d, 2, axis=invalid_axis)

    x = np.arange(5)
    d = da.arange(5, chunks=(2,))

    assert_eq(x.repeat(3), d.repeat(3))

    for r in [1, 2, 3, 4]:
        assert all(concat(d.repeat(r).chunks))
예제 #13
0
def test_histogram_normed_deprecation():
    x = da.arange(10)
    with pytest.raises(ValueError) as info:
        da.histogram(x, bins=[1, 2, 3], normed=True)

    assert 'density' in str(info.value)
    assert 'deprecated' in str(info.value).lower()
예제 #14
0
def test_index_with_int_dask_array_nanchunks(chunks):
    # Slice by array with nan-sized chunks
    a = da.arange(-2, 3, chunks=chunks)
    assert_eq(a[a.nonzero()], np.array([-2, -1,  1,  2]))
    # Edge case: the nan-sized chunks resolve to size 0
    a = da.zeros(5, chunks=chunks)
    assert_eq(a[a.nonzero()], np.array([]))
예제 #15
0
파일: test_atop.py 프로젝트: yliapis/dask
def test_blockwise_new_axes_chunked():
    def f(x):
        return x[None, :] * 2

    x = da.arange(0, 6, 1, chunks=2, dtype=np.int32)
    y = da.blockwise(f, 'qa', x, 'a', new_axes={'q': (1, 1)}, dtype=x.dtype)
    assert y.chunks == ((1, 1), (2, 2, 2))
    assert_eq(y, np.array([[0, 2, 4, 6, 8, 10], [0, 2, 4, 6, 8, 10]], np.int32))
예제 #16
0
def test_index_with_int_dask_array_indexerror(chunks):
    a = da.arange(4, chunks=chunks)
    idx = da.from_array([4], chunks=1)
    with pytest.raises(IndexError):
        a[idx].compute()
    idx = da.from_array([-5], chunks=1)
    with pytest.raises(IndexError):
        a[idx].compute()
예제 #17
0
파일: test_dask.py 프로젝트: jnecus/xarray
def test_dataarray_with_dask_coords():
    import toolz
    x = xr.Variable('x', da.arange(8, chunks=(4,)))
    y = xr.Variable('y', da.arange(8, chunks=(4,)) * 2)
    data = da.random.random((8, 8), chunks=(4, 4)) + 1
    array = xr.DataArray(data, dims=['x', 'y'])
    array.coords['xx'] = x
    array.coords['yy'] = y

    assert dict(array.__dask_graph__()) == toolz.merge(data.__dask_graph__(),
                                                       x.__dask_graph__(),
                                                       y.__dask_graph__())

    (array2,) = dask.compute(array)
    assert not dask.is_dask_collection(array2)

    assert all(isinstance(v._variable.data, np.ndarray) for v in array2.coords.values())
예제 #18
0
def test_diag():
    v = da.arange(11, chunks=3)
    darr = da.diag(v)
    nparr = np.diag(v)
    eq(darr, nparr)
    assert sorted(da.diag(v).dask) == sorted(da.diag(v).dask)

    v = v + v + 3
    darr = da.diag(v)
    nparr = np.diag(v)
    eq(darr, nparr)

    v = da.arange(11, chunks=11)
    darr = da.diag(v)
    nparr = np.diag(v)
    eq(darr, nparr)
    assert sorted(da.diag(v).dask) == sorted(da.diag(v).dask)
예제 #19
0
def test_arg_reductions_unknown_chunksize(func):
    x = da.arange(10, chunks=5)
    x = x[x > 1]

    with pytest.raises(ValueError) as info:
        getattr(da, func)(x)

    assert "unknown chunksize" in str(info.value)
예제 #20
0
파일: test_ghost.py 프로젝트: fortizc/dask
def test_map_overlap():
    x = da.arange(10, chunks=5)
    y = x.map_overlap(lambda x: x + len(x), depth=2, dtype=x.dtype)
    assert_eq(y, np.arange(10) + 5 + 2 + 2)

    x = da.arange(10, chunks=5)
    y = x.map_overlap(lambda x: x + len(x), depth=np.int64(2), dtype=x.dtype)
    assert all([(type(s) is int) for s in y.shape])
    assert_eq(y, np.arange(10) + 5 + 2 + 2)

    x = np.arange(16).reshape((4, 4))
    d = da.from_array(x, chunks=(2, 2))
    exp1 = d.map_overlap(lambda x: x + x.size, depth=1, dtype=d.dtype)
    exp2 = d.map_overlap(lambda x: x + x.size, depth={0: 1, 1: 1},
                         boundary={0: 'reflect', 1: 'none'}, dtype=d.dtype)
    assert_eq(exp1, x + 16)
    assert_eq(exp2, x + 12)
예제 #21
0
def test_vstack():
    x = np.arange(5)
    y = np.ones(5)
    a = da.arange(5, chunks=2)
    b = da.ones(5, chunks=2)

    assert_eq(np.vstack((x, y)), da.vstack((a, b)))
    assert_eq(np.vstack((x, y[None, :])), da.vstack((a, b[None, :])))
예제 #22
0
def test_average_raises():
    d_a = da.arange(11, chunks=2)

    with pytest.raises(TypeError):
        da.average(d_a, weights=[1, 2, 3])

    with pytest.warns(RuntimeWarning):
        da.average(d_a, weights=da.zeros_like(d_a)).compute()
예제 #23
0
파일: test_ufunc.py 프로젝트: yliapis/dask
def test_out_numpy():
    x = da.arange(10, chunks=(5,))
    empty = np.empty(10, dtype=x.dtype)
    with pytest.raises((TypeError, NotImplementedError)) as info:
        np.add(x, 1, out=empty)

    assert 'ndarray' in str(info.value)
    assert 'Array' in str(info.value)
예제 #24
0
파일: test_atop.py 프로젝트: yliapis/dask
def test_args_delayed():
    x = da.arange(10, chunks=(5,))
    y = dask.delayed(lambda: 100)()

    z = da.blockwise(add, 'i', x, 'i', y, None, dtype=x.dtype)
    assert_eq(z, np.arange(10) + 100)

    z = da.blockwise(lambda x, y: x + y, 'i', x, 'i', y=y, dtype=x.dtype)
    assert_eq(z, np.arange(10) + 100)
예제 #25
0
파일: test_dask.py 프로젝트: jnecus/xarray
def test_persist_DataArray(persist):
    x = da.arange(10, chunks=(5,))
    y = DataArray(x)
    z = y + 1
    n = len(z.data.dask)

    zz = persist(z)

    assert len(z.data.dask) == n
    assert len(zz.data.dask) == zz.data.npartitions
예제 #26
0
def interpolate_xarray_linear(xpoints, ypoints, values, shape):
    """Interpolate linearly, generating a dask array."""
    from scipy.interpolate.interpnd import (LinearNDInterpolator,
                                            _ndim_coords_from_arrays)
    points = _ndim_coords_from_arrays(np.vstack((np.asarray(ypoints),
                                                 np.asarray(xpoints))).T)

    interpolator = LinearNDInterpolator(points, values)

    def intp(grid_x, grid_y, interpolator):
        return interpolator((grid_y, grid_x))

    grid_x, grid_y = da.meshgrid(da.arange(shape[1], chunks=CHUNK_SIZE),
                                 da.arange(shape[0], chunks=CHUNK_SIZE))
    # workaround for non-thread-safe first call of the interpolator:
    interpolator((0, 0))
    res = da.map_blocks(intp, grid_x, grid_y, interpolator=interpolator)

    return DataArray(res, dims=('y', 'x'))
예제 #27
0
파일: test_atop.py 프로젝트: mrocklin/dask
def test_atop_numpy_arg():
    x = da.arange(10, chunks=(5,))
    y = np.arange(1000)

    x = x.map_blocks(lambda x, y: x, 1.0)
    x = x.map_blocks(lambda x, y: x, 'abc')
    x = x.map_blocks(lambda x, y: x, y)
    x = x.map_blocks(lambda x, y: x, 'abc')
    x = x.map_blocks(lambda x, y: x, 1.0)
    x = x.map_blocks(lambda x, y, z: x, 'abc', np.array(['a', 'b'], dtype=object))
    assert_eq(x, np.arange(10))
예제 #28
0
def test_cache():
    x = da.arange(15, chunks=5)
    y = 2 * x + 1
    z = y.cache()
    assert len(z.dask) == 3  # very short graph
    assert eq(y, z)

    cache = np.empty(15, dtype=y.dtype)
    z = y.cache(store=cache)
    assert len(z.dask) < 6  # very short graph
    assert z.chunks == y.chunks
    assert eq(y, z)
예제 #29
0
파일: test_slicing.py 프로젝트: rla3rd/dask
def test_slicing_with_negative_step_flops_keys():
    x = da.arange(10, chunks=5)
    y = x[:1:-1]
    assert (x.name, 1) in y.dask[(y.name, 0)]
    assert (x.name, 0) in y.dask[(y.name, 1)]

    assert eq(y, np.arange(10)[:1:-1])

    assert y.chunks == ((5, 3),)

    assert y.dask[(y.name, 0)] == (getitem, (x.name, 1), (slice(-1, -6, -1),))
    assert y.dask[(y.name, 1)] == (getitem, (x.name, 0), (slice(-1, -4, -1),))
예제 #30
0
def test_arange():
    darr = da.arange(77, blocksize=13)
    nparr = np.arange(77)
    eq(darr, nparr)

    darr = da.arange(2, 13, blocksize=5)
    nparr = np.arange(2, 13)
    eq(darr, nparr)

    darr = da.arange(4, 21, 9, blocksize=13)
    nparr = np.arange(4, 21, 9)
    eq(darr, nparr)

    # negative steps
    darr = da.arange(53, 5, -3, blocksize=5)
    nparr = np.arange(53, 5, -3)
    eq(darr, nparr)

    darr = da.arange(77, blocksize=13, dtype=float)
    nparr = np.arange(77, dtype=float)
    eq(darr, nparr)

    darr = da.arange(2, 13, blocksize=5, dtype=int)
    nparr = np.arange(2, 13, dtype=int)
    eq(darr, nparr)
예제 #31
0
def split(X=None,
          y=None,
          instance_indexes=None,
          test_ratio=0.3,
          initial_label_rate=0.05,
          split_count=10,
          all_class=True):
    """Split given data.
    Provide one of X, y or instance_indexes to execute the split.
    Parameters
    ----------
    X: array-like, optional
        Data matrix with [n_samples, n_features]
    y: array-like, optional
        labels of given data [n_samples, n_labels] or [n_samples]
    instance_indexes: list, optional (default=None)
        List contains instances' names, used for image datasets,
        or provide index list instead of data matrix.
        Must provide one of [instance_names, X, y]
    test_ratio: float, optional (default=0.3)
        Ratio of test set
    initial_label_rate: float, optional (default=0.05)
        Ratio of initial label set
        e.g. Initial_labelset*(1-test_ratio)*n_samples
    split_count: int, optional (default=10)
        Random split data _split_count times
    all_class: bool, optional (default=True)
        Whether each split will contain at least one instance for each class.
        If False, a totally random split will be performed.
        Giving None to disable saving.

    Returns
    -------
    train_idx: list
        index of training set, shape like [n_split_count, n_training_indexes]
    test_idx: list
        index of testing set, shape like [n_split_count, n_testing_indexes]
    label_idx: list
        index of labeling set, shape like [n_split_count, n_labeling_indexes]
    unlabel_idx: list
        index of unlabeling set, shape like [n_split_count, n_unlabeling_indexes]
    """

    # check input parameters
    if X is None and y is None and instance_indexes is None:
        raise Exception("Must provide one of X, y or instance_indexes.")

    len_of_parameters = [
        len(X) if X is not None else None,
        len(y) if y is not None else None,
        len(instance_indexes) if instance_indexes is not None else None
    ]
    number_of_instance = np.unique(
        [i for i in len_of_parameters if i is not None])
    if len(number_of_instance) > 1:
        raise ValueError("Different length of instances and _labels found.")
    else:
        number_of_instance = number_of_instance[0]

    if instance_indexes is not None:
        instance_indexes = da.array(instance_indexes)
    else:
        instance_indexes = da.arange(number_of_instance)

    # split
    train_idx = []
    test_idx = []
    label_idx = []
    unlabel_idx = []

    for i in range(split_count):
        if (not all_class) or y is None:
            rp = randperm(number_of_instance)
            cutpoint = int(round((1 - test_ratio) * len(rp)))
            tp_train = instance_indexes[rp[0:cutpoint]]
            train_idx.append(tp_train)
            test_idx.append(instance_indexes[rp[cutpoint:]])
            cutpoint = int(round(initial_label_rate * len(tp_train)))
            if cutpoint <= 1:
                cutpoint = 1
            label_idx.append(tp_train[0:cutpoint])
            unlabel_idx.append(tp_train[cutpoint:])
        else:
            if y is None:
                raise Exception(
                    "y must be provided when all_class flag is True.")
            if isinstance(y, da.core.Array):
                check_array(y, ensure_2d=False, dtype=None, distributed=False)
            else:
                y = check_array(y,
                                ensure_2d=False,
                                dtype=None,
                                distributed=True)

            if y.ndim == 1:
                label_num = len(da.unique(y).compute())
            else:
                label_num = y.shape[1]
            if round((1 - test_ratio) * initial_label_rate *
                     number_of_instance) < label_num:
                raise ValueError(
                    "The initial rate is too small to guarantee that each "
                    "split will contain at least one instance for each class.")

            # check validaty
            while 1:
                rp = randperm(number_of_instance)
                cutpoint = int(round((1 - test_ratio) * len(rp)))
                tp_train = instance_indexes[rp[0:cutpoint]]
                cutpointlabel = int(round(initial_label_rate * len(tp_train)))
                if cutpointlabel <= 1:
                    cutpointlabel = 1
                label_id = tp_train[0:cutpointlabel]
                if y.ndim == 1:
                    if len(da.unique(y[label_id]).compute()) == label_num:
                        break
                else:
                    temp = da.sum(y[label_id], axis=0)
                    if not da.any(temp == 0):
                        break

            train_idx.append(tp_train)
            test_idx.append(instance_indexes[rp[cutpoint:]])
            label_idx.append(tp_train[0:cutpointlabel])
            unlabel_idx.append(tp_train[cutpointlabel:])

    return compute(train_idx, test_idx, label_idx, unlabel_idx)
예제 #32
0
def test_dask_array_is_scalar():
    # regression test for GH1684
    import dask.array as da

    y = da.arange(8, chunks=4)
    assert not utils.is_scalar(y)
예제 #33
0
def test_stack_scalars():
    d = da.arange(4, chunks=2)

    s = da.stack([d.mean(), d.sum()])

    assert s.compute().tolist() == [np.arange(4).mean(), np.arange(4).sum()]
예제 #34
0
def generate_fake_abi_xr_dataset(filename, chunks=None, **kwargs):
    """Create a fake xarray dataset for abi data.

    This is an incomplete copy of existing file structures.
    """
    dataset = Dataset(attrs={
        'time_coverage_start': '2018-03-13T20:30:42.3Z',
        'time_coverage_end': '2018-03-13T20:41:18.9Z',
    })

    projection = DataArray(
        [-214748364],
        attrs={
            'long_name': 'GOES-R ABI fixed grid projection',
            'grid_mapping_name': 'geostationary',
            'perspective_point_height': 35786023.0,
            'semi_major_axis': 6378137.0,
            'semi_minor_axis': 6356752.31414,
            'inverse_flattening': 298.2572221,
            'latitude_of_projection_origin': 0.0,
            'longitude_of_projection_origin': -75.0,
            'sweep_angle_axis': 'x'
        })
    dataset['goes_imager_projection'] = projection

    if 'C01' in filename or 'C03' in filename or 'C05' in filename:
        stop = 10847
        step = 2
        scale = 2.8e-05
        offset = 0.151858
    elif 'C02' in filename:
        stop = 21693
        step = 4
        scale = 1.4e-05
        offset = 0.151865
    else:
        stop = 5424
        step = 1
        scale = 5.6e-05
        offset = 0.151844

    y = DataArray(
        da.arange(0, stop, step),
        attrs={
            'scale_factor': -scale,
            'add_offset': offset,
            'units': 'rad',
            'axis': 'Y',
            'long_name': 'GOES fixed grid projection y-coordinate',
            'standard_name': 'projection_y_coordinate'
            },
        dims=['y'])

    dataset['y'] = y

    x = DataArray(
        da.arange(0, stop, step),
        attrs={
            'scale_factor': scale,
            'add_offset': -offset,
            'units': 'rad',
            'axis': 'X',
            'long_name': 'GOES fixed grid projection x-coordinate',
            'standard_name': 'projection_x_coordinate'
        },
        dims=['x'])

    dataset['x'] = x

    rad = DataArray(
        da.random.randint(0, 1025, size=[len(y), len(x)], dtype=np.int16, chunks=chunks),
        attrs={
            '_FillValue': np.array(1023),
            'long_name': 'ABI L1b Radiances',
            'standard_name': 'toa_outgoing_radiance_per_unit_wavelength',
            '_Unsigned': 'true',
            'sensor_band_bit_depth': 10,
            'valid_range': np.array([0, 1022], dtype=np.int16),
            'scale_factor': 0.8121064,
            'add_offset': -25.936647,
            'units': 'W m-2 sr-1 um-1',
            'resolution': 'y: 0.000028 rad x: 0.000028 rad',
            'grid_mapping': 'goes_imager_projection',
            'cell_methods': 't: point area: point'
        },
        dims=['y', 'x']
    )

    dataset['Rad'] = rad

    sublat = DataArray(0.0, attrs={
        'long_name': 'nominal satellite subpoint latitude (platform latitude)',
        'standard_name': 'latitude',
        '_FillValue': -999.0,
        'units': 'degrees_north'})
    dataset['nominal_satellite_subpoint_lat'] = sublat

    sublon = DataArray(-75.0, attrs={
        'long_name': 'nominal satellite subpoint longitude (platform longitude)',
        'standard_name': 'longitude',
        '_FillValue': -999.0,
        'units': 'degrees_east'})

    dataset['nominal_satellite_subpoint_lon'] = sublon

    satheight = DataArray(35786.023, attrs={
        'long_name': 'nominal satellite height above GRS 80 ellipsoid (platform altitude)',
        'standard_name': 'height_above_reference_ellipsoid',
        '_FillValue': -999.0,
        'units': 'km'})

    dataset['nominal_satellite_height'] = satheight

    yaw_flip_flag = DataArray(0, attrs={
        'long_name': 'Flag indicating the spacecraft is operating in yaw flip configuration',
        '_Unsigned': 'true',
        '_FillValue': np.array(-1),
        'valid_range': np.array([0, 1], dtype=np.int8),
        'units': '1',
        'flag_values': '0 1',
        'flag_meanings': 'false true'})

    dataset['yaw_flip_flag'] = yaw_flip_flag

    return dataset
예제 #35
0
    def setUp(self):
        """Create test data and mock pycoast/pydecorate."""
        from trollimage.xrimage import XRImage
        from pyresample.geometry import AreaDefinition
        import xarray as xr
        import dask.array as da

        proj_dict = {
            'proj': 'lcc',
            'datum': 'WGS84',
            'ellps': 'WGS84',
            'lon_0': -95.,
            'lat_0': 25,
            'lat_1': 25,
            'units': 'm',
            'no_defs': True
        }
        self.area_def = AreaDefinition(
            'test',
            'test',
            'test',
            proj_dict,
            200,
            400,
            (-1000., -1500., 1000., 1500.),
        )
        self.orig_rgb_img = XRImage(
            xr.DataArray(da.arange(75., chunks=10).reshape(3, 5, 5) / 75.,
                         dims=('bands', 'y', 'x'),
                         coords={'bands': ['R', 'G', 'B']},
                         attrs={
                             'name': 'test_ds',
                             'area': self.area_def
                         }))
        self.orig_l_img = XRImage(
            xr.DataArray(da.arange(25., chunks=10).reshape(5, 5) / 75.,
                         dims=('y', 'x'),
                         attrs={
                             'name': 'test_ds',
                             'area': self.area_def
                         }))

        self.decorate = {
            'decorate': [{
                'logo': {
                    'logo_path': '',
                    'height': 143,
                    'bg': 'white',
                    'bg_opacity': 255
                }
            }, {
                'text': {
                    'txt': 'TEST',
                    'align': {
                        'top_bottom': 'bottom',
                        'left_right': 'right'
                    },
                    'font': '',
                    'font_size': 22,
                    'height': 30,
                    'bg': 'black',
                    'bg_opacity': 255,
                    'line': 'white'
                }
            }, {
                'scale': {
                    'colormap': greys,
                    'extend': False,
                    'width': 1670,
                    'height': 110,
                    'tick_marks': 5,
                    'minor_tick_marks': 1,
                    'cursor': [0, 0],
                    'bg': 'white',
                    'title': 'TEST TITLE OF SCALE',
                    'fontsize': 110,
                    'align': 'cc'
                }
            }]
        }

        import_mock = mock.MagicMock()
        modules = {
            'pycoast': import_mock.pycoast,
            'pydecorate': import_mock.pydecorate
        }
        self.module_patcher = mock.patch.dict('sys.modules', modules)
        self.module_patcher.start()
예제 #36
0
    def test_bil_resampling(self, resampler, create_filename, load, savez):
        """Test the bilinear resampler."""
        import numpy as np
        import dask.array as da
        import xarray as xr
        from satpy.resample import BilinearResampler
        data, source_area, swath_data, source_swath, target_area = get_test_data()

        # Test that bilinear resampling info calculation is called,
        # and the info is saved
        load.side_effect = IOError()
        resampler = BilinearResampler(source_swath, target_area)
        resampler.precompute(
            mask=da.arange(5, chunks=5).astype(np.bool))
        resampler.resampler.get_bil_info.assert_called()
        resampler.resampler.get_bil_info.assert_called_with()
        self.assertFalse(len(savez.mock_calls), 1)
        resampler.resampler.reset_mock()
        load.reset_mock()
        load.side_effect = None

        # Test that get_sample_from_bil_info is called properly
        fill_value = 8
        resampler.resampler.get_sample_from_bil_info.return_value = \
            xr.DataArray(da.zeros(target_area.shape), dims=('y', 'x'))
        new_data = resampler.compute(data, fill_value=fill_value)
        resampler.resampler.get_sample_from_bil_info.assert_called_with(
            data, fill_value=fill_value, output_shape=target_area.shape)
        self.assertIn('y', new_data.coords)
        self.assertIn('x', new_data.coords)
        if CRS is not None:
            self.assertIn('crs', new_data.coords)
            self.assertIsInstance(new_data.coords['crs'].item(), CRS)
            self.assertIn('lcc', new_data.coords['crs'].item().to_proj4())
            self.assertEqual(new_data.coords['y'].attrs['units'], 'meter')
            self.assertEqual(new_data.coords['x'].attrs['units'], 'meter')

        # Test that the resampling info is tried to read from the disk
        resampler = BilinearResampler(source_swath, target_area)
        resampler.precompute(cache_dir='.')
        load.assert_called()

        # Test caching the resampling info
        try:
            the_dir = tempfile.mkdtemp()
            resampler = BilinearResampler(source_area, target_area)
            create_filename.return_value = os.path.join(the_dir, 'test_cache.npz')
            load.reset_mock()
            load.side_effect = IOError()

            resampler.precompute(cache_dir=the_dir)
            savez.assert_called()
            # assert data was saved to the on-disk cache
            self.assertEqual(len(savez.mock_calls), 1)
            # assert that load was called to try to load something from disk
            self.assertEqual(len(load.mock_calls), 1)

            nbcalls = len(resampler.resampler.get_bil_info.mock_calls)
            # test reusing the resampler
            load.side_effect = None

            class FakeNPZ(dict):
                def close(self):
                    pass

            load.return_value = FakeNPZ(bilinear_s=1,
                                        bilinear_t=2,
                                        valid_input_index=3,
                                        index_array=4)
            resampler.precompute(cache_dir=the_dir)
            # we already have things cached in-memory, no need to save again
            self.assertEqual(len(savez.mock_calls), 1)
            # we already have things cached in-memory, don't need to load
            # self.assertEqual(len(load.mock_calls), 1)
            self.assertEqual(len(resampler.resampler.get_bil_info.mock_calls), nbcalls)

            # test loading saved resampler
            resampler = BilinearResampler(source_area, target_area)
            resampler.precompute(cache_dir=the_dir)
            self.assertEqual(len(load.mock_calls), 2)
            self.assertEqual(len(resampler.resampler.get_bil_info.mock_calls), nbcalls)
            # we should have cached things in-memory now
            # self.assertEqual(len(resampler._index_caches), 1)
        finally:
            shutil.rmtree(the_dir)
예제 #37
0
    def test_kd_resampling(self, resampler, create_filename, load, savez):
        """Test the kd resampler."""
        import numpy as np
        import dask.array as da
        from satpy.resample import KDTreeResampler
        data, source_area, swath_data, source_swath, target_area = get_test_data()

        resampler = KDTreeResampler(source_swath, target_area)
        resampler.precompute(
            mask=da.arange(5, chunks=5).astype(np.bool), cache_dir='.')
        resampler.resampler.get_neighbour_info.assert_called()
        # swath definitions should not be cached
        self.assertFalse(len(savez.mock_calls), 0)
        resampler.resampler.reset_mock()

        resampler = KDTreeResampler(source_area, target_area)
        resampler.precompute()
        resampler.resampler.get_neighbour_info.assert_called_with(mask=None)

        try:
            the_dir = tempfile.mkdtemp()
            resampler = KDTreeResampler(source_area, target_area)
            create_filename.return_value = os.path.join(the_dir, 'test_cache.npz')
            load.side_effect = IOError()
            resampler.precompute(cache_dir=the_dir)
            # assert data was saved to the on-disk cache
            self.assertEqual(len(savez.mock_calls), 1)
            # assert that load was called to try to load something from disk
            self.assertEqual(len(load.mock_calls), 1)
            # we should have cached things in-memory
            self.assertEqual(len(resampler._index_caches), 1)
            nbcalls = len(resampler.resampler.get_neighbour_info.mock_calls)
            # test reusing the resampler
            load.side_effect = None

            class FakeNPZ(dict):
                def close(self):
                    pass

            load.return_value = FakeNPZ(valid_input_index=1,
                                        valid_output_index=2,
                                        index_array=3,
                                        distance_array=4)
            resampler.precompute(cache_dir=the_dir)
            # we already have things cached in-memory, no need to save again
            self.assertEqual(len(savez.mock_calls), 1)
            # we already have things cached in-memory, don't need to load
            self.assertEqual(len(load.mock_calls), 1)
            # we should have cached things in-memory
            self.assertEqual(len(resampler._index_caches), 1)
            self.assertEqual(len(resampler.resampler.get_neighbour_info.mock_calls), nbcalls)

            # test loading saved resampler
            resampler = KDTreeResampler(source_area, target_area)
            resampler.precompute(cache_dir=the_dir)
            self.assertEqual(len(load.mock_calls), 2)
            self.assertEqual(len(resampler.resampler.get_neighbour_info.mock_calls), nbcalls)
            # we should have cached things in-memory now
            self.assertEqual(len(resampler._index_caches), 1)
        finally:
            shutil.rmtree(the_dir)

        fill_value = 8
        resampler.compute(data, fill_value=fill_value)
        resampler.resampler.get_sample_from_neighbour_info.assert_called_with(data, fill_value)
예제 #38
0
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import pytest
import numpy as np
import dask.array as da

from dask_image.ndmorph import _utils


@pytest.mark.parametrize("err_type, input, structure", [
    (RuntimeError, da.ones([1, 2], dtype=bool, chunks=(
        1,
        2,
    )), da.arange(2, dtype=bool, chunks=(2, ))),
    (TypeError, da.arange(2, dtype=bool, chunks=(2, )), 2.0),
])
def test_errs__get_structure(err_type, input, structure):
    with pytest.raises(err_type):
        _utils._get_structure(input, structure)


@pytest.mark.parametrize("err_type, iterations", [
    (TypeError, 0.0),
    (NotImplementedError, 0),
])
def test_errs__get_iterations(err_type, iterations):
    with pytest.raises(err_type):
        _utils._get_iterations(iterations)


@pytest.mark.parametrize("err_type, input, mask", [
def test_array_reduction_out(func):
    x = da.arange(10, chunks=(5, ))
    y = da.ones((10, 10), chunks=(4, 4))
    func(y, axis=0, out=x)
    assert_eq(x, func(np.ones((10, 10)), axis=0))
def test_arange_dtypes(start, stop, step, dtype):
    a_np = np.arange(start, stop, step, dtype=dtype)
    a_da = da.arange(start, stop, step, dtype=dtype, chunks=-1)
    assert_eq(a_np, a_da)
def test_arange():
    darr = da.arange(77, chunks=13)
    nparr = np.arange(77)
    assert_eq(darr, nparr)

    darr = da.arange(2, 13, chunks=5)
    nparr = np.arange(2, 13)
    assert_eq(darr, nparr)

    darr = da.arange(4, 21, 9, chunks=13)
    nparr = np.arange(4, 21, 9)
    assert_eq(darr, nparr)

    # negative steps
    darr = da.arange(53, 5, -3, chunks=5)
    nparr = np.arange(53, 5, -3)
    assert_eq(darr, nparr)

    darr = da.arange(77, chunks=13, dtype=float)
    nparr = np.arange(77, dtype=float)
    assert_eq(darr, nparr)

    darr = da.arange(2, 13, chunks=5, dtype=int)
    nparr = np.arange(2, 13, dtype=int)
    assert_eq(darr, nparr)
    assert sorted(da.arange(2, 13, chunks=5).dask) == sorted(
        da.arange(2, 13, chunks=5).dask)
    assert sorted(da.arange(77, chunks=13, dtype=float).dask) == sorted(
        da.arange(77, chunks=13, dtype=float).dask)

    # 0 size output
    darr = da.arange(0, 1, -0.5, chunks=20)
    nparr = np.arange(0, 1, -0.5)
    assert_eq(darr, nparr)

    darr = da.arange(0, -1, 0.5, chunks=20)
    nparr = np.arange(0, -1, 0.5)
    assert_eq(darr, nparr)

    # Unexpected or missing kwargs
    with pytest.raises(TypeError, match="whatsthis"):
        da.arange(10, chunks=-1, whatsthis=1)

    assert da.arange(10).chunks == ((10, ), )
예제 #42
0
def test_gh3579():
    assert_eq(np.arange(10)[0::-1], da.arange(10, chunks=3)[0::-1])
    assert_eq(np.arange(10)[::-1], da.arange(10, chunks=3)[::-1])
예제 #43
0
def test_index_with_int_dask_array_negindex(chunks):
    a = da.arange(4, chunks=chunks)
    idx = da.from_array([-1, -4], chunks=1)
    assert_eq(a[idx], np.array([3, 0]))
예제 #44
0
def test_map_overlap_multiarray_uneven_numblocks_exception():
    x = da.arange(10, chunks=(10, ))
    y = da.arange(10, chunks=(5, 5))
    with pytest.raises(ValueError):
        # Fail with chunk alignment explicitly disabled
        da.map_overlap(lambda x, y: x + y, x, y, align_arrays=False).compute()
예제 #45
0
파일: test_ghost.py 프로젝트: tomo461/dask
def test_map_overlap_no_depth(boundary):
    x = da.arange(10, chunks=5)

    y = x.map_overlap(lambda i: i, depth=0, boundary=boundary, dtype=x.dtype)
    assert_eq(y, x)
예제 #46
0
def _initialize_clusters(n_el, n_clusters, chunks=None):
    """ Initialize cluster array """
    cluster_idx = da.mod(da.arange(n_el, chunks=(chunks or n_el)), n_clusters)
    return da.random.permutation(cluster_idx)
예제 #47
0
def get_test_data(input_shape=(100, 50), output_shape=(200, 100), output_proj=None,
                  input_dims=('y', 'x')):
    """Get common data objects used in testing.

    Returns: tuple with the following elements
        input_data_on_area: DataArray with dimensions as if it is a gridded
            dataset.
        input_area_def: AreaDefinition of the above DataArray
        input_data_on_swath: DataArray with dimensions as if it is a swath.
        input_swath: SwathDefinition of the above DataArray
        target_area_def: AreaDefinition to be used as a target for resampling

    """
    from xarray import DataArray
    import dask.array as da
    from pyresample.geometry import AreaDefinition, SwathDefinition
    from pyresample.utils import proj4_str_to_dict
    ds1 = DataArray(da.zeros(input_shape, chunks=85),
                    dims=input_dims,
                    attrs={'name': 'test_data_name', 'test': 'test'})
    if input_dims and 'y' in input_dims:
        ds1 = ds1.assign_coords(y=da.arange(input_shape[-2], chunks=85))
    if input_dims and 'x' in input_dims:
        ds1 = ds1.assign_coords(x=da.arange(input_shape[-1], chunks=85))
    if input_dims and 'bands' in input_dims:
        ds1 = ds1.assign_coords(bands=list('RGBA'[:ds1.sizes['bands']]))

    input_proj_str = ('+proj=geos +lon_0=-95.0 +h=35786023.0 +a=6378137.0 '
                      '+b=6356752.31414 +sweep=x +units=m +no_defs')
    source = AreaDefinition(
        'test_target',
        'test_target',
        'test_target',
        proj4_str_to_dict(input_proj_str),
        input_shape[1],  # width
        input_shape[0],  # height
        (-1000., -1500., 1000., 1500.))
    ds1.attrs['area'] = source
    if CRS is not None:
        crs = CRS.from_string(input_proj_str)
        ds1 = ds1.assign_coords(crs=crs)

    ds2 = ds1.copy()
    input_area_shape = tuple(ds1.sizes[dim] for dim in ds1.dims
                             if dim in ['y', 'x'])
    geo_dims = ('y', 'x') if input_dims else None
    lons = da.random.random(input_area_shape, chunks=50)
    lats = da.random.random(input_area_shape, chunks=50)
    swath_def = SwathDefinition(
        DataArray(lons, dims=geo_dims),
        DataArray(lats, dims=geo_dims))
    ds2.attrs['area'] = swath_def
    if CRS is not None:
        crs = CRS.from_string('+proj=latlong +datum=WGS84 +ellps=WGS84')
        ds2 = ds2.assign_coords(crs=crs)

    # set up target definition
    output_proj_str = ('+proj=lcc +datum=WGS84 +ellps=WGS84 '
                       '+lon_0=-95. +lat_0=25 +lat_1=25 +units=m +no_defs')
    output_proj_str = output_proj or output_proj_str
    target = AreaDefinition(
        'test_target',
        'test_target',
        'test_target',
        proj4_str_to_dict(output_proj_str),
        output_shape[1],  # width
        output_shape[0],  # height
        (-1000., -1500., 1000., 1500.),
    )
    return ds1, source, ds2, swath_def, target
예제 #48
0
 def test_reshape_array_3d(self):
     """Test the chunk stacking on 3d arrays."""
     from pyresample.gradient import reshape_to_stacked_3d
     data = da.arange(432).reshape((3, 12, 12)).rechunk((3, 4, 6))
     res = reshape_to_stacked_3d(data)
     assert(res.shape == (3, 4, 6, 6))
예제 #49
0
def test_coarsen_with_excess():
    x = da.arange(10, chunks=5)
    assert_eq(da.coarsen(np.min, x, {0: 3}, trim_excess=True),
              np.array([0, 5]))
    assert_eq(da.coarsen(np.sum, x, {0: 3}, trim_excess=True),
              np.array([0 + 1 + 2, 5 + 6 + 7]))
예제 #50
0
def test_arange_has_dtype():
    assert da.arange(5, chunks=2).dtype == np.arange(5).dtype
예제 #51
0
    def write_component_model(self, comps, ref_freq, mask, row_chunks,
                              chan_chunks):
        print("Writing model data at full freq resolution", file=log)
        order, npix = comps.shape
        comps = da.from_array(comps, chunks=(-1, -1))
        mask = da.from_array(mask.squeeze(), chunks=(-1, -1))
        writes = []
        for ims in self.ms:
            xds = xds_from_ms(ims,
                              group_cols=('FIELD_ID', 'DATA_DESC_ID'),
                              chunks={
                                  'row': (row_chunks, ),
                                  'chan': (chan_chunks, )
                              },
                              columns=('MODEL_DATA', 'UVW'))

            # subtables
            ddids = xds_from_table(ims + "::DATA_DESCRIPTION")
            fields = xds_from_table(ims + "::FIELD", group_cols="__row__")
            spws = xds_from_table(ims + "::SPECTRAL_WINDOW",
                                  group_cols="__row__")
            pols = xds_from_table(ims + "::POLARIZATION", group_cols="__row__")

            # subtable data
            ddids = dask.compute(ddids)[0]
            fields = dask.compute(fields)[0]
            pols = dask.compute(pols)[0]

            out_data = []
            for ds in xds:
                field = fields[ds.FIELD_ID]
                radec = field.PHASE_DIR.data.squeeze()
                if not np.array_equal(radec, self.radec):
                    continue

                spw = spws[ds.DATA_DESC_ID]
                freq = spw.CHAN_FREQ.data.squeeze()
                freq_bin_idx = da.arange(0,
                                         freq.size,
                                         1,
                                         chunks=freq.chunks,
                                         dtype=np.int64)
                freq_bin_counts = da.ones(freq.size,
                                          chunks=freq.chunks,
                                          dtype=np.int64)

                uvw = ds.UVW.data

                model_vis = getattr(ds, 'MODEL_DATA').data

                model = model_from_comps(comps, freq, mask, ref_freq)

                vis = im2vis(uvw,
                             freq,
                             model,
                             freq_bin_idx,
                             freq_bin_counts,
                             self.cell,
                             nthreads=self.nthreads,
                             epsilon=self.epsilon,
                             do_wstacking=self.do_wstacking)

                model_vis = populate_model(vis, model_vis)

                out_ds = ds.assign(
                    **
                    {self.model_column: (("row", "chan", "corr"), model_vis)})
                out_data.append(out_ds)
            writes.append(
                xds_to_table(out_data, ims, columns=[self.model_column]))
        dask.compute(writes, scheduler='single-threaded')
예제 #52
0
        assert type(ddc._meta) is type(ddc.compute())
    else:
        assert_eq(ddc, ddc)  # Check that _meta and computed arrays match types
        assert_eq(ddc, ddn, check_type=False)


@pytest.mark.parametrize("dtype", ["f4", "f8"])
def test_sizeof(dtype):
    c = cupy.random.random((2, 3, 4), dtype=dtype)

    assert sizeof(c) == c.nbytes


@pytest.mark.skipif(not _numpy_120, reason="NEP-35 is not available")
@pytest.mark.parametrize(
    "arr", [np.arange(5), cupy.arange(5), da.arange(5), da.from_array(cupy.arange(5))]
)
@pytest.mark.parametrize(
    "like", [np.arange(5), cupy.arange(5), da.arange(5), da.from_array(cupy.arange(5))]
)
def test_asanyarray(arr, like):
    if isinstance(like, np.ndarray) and isinstance(
        da.utils.meta_from_array(arr), cupy.ndarray
    ):
        with pytest.raises(TypeError):
            a = da.utils.asanyarray_safe(arr, like=like)
    else:
        a = da.utils.asanyarray_safe(arr, like=like)
        assert type(a) is type(like)

예제 #53
0
def create_jitter_noise(channel,
                        x_jit,
                        y_jit,
                        frame_osf,
                        frame_time,
                        key,
                        opt,
                        visualize=False):

    outputPointingTL = create_output_pointing_timeline(
        x_jit, y_jit, frame_osf, ndrCumSeq=channel.ndr_cumulative_sequence)

    jitter_x = channel.osf * (x_jit / channel.opt.plate_scale()).simplified
    jitter_y = channel.osf * (y_jit / channel.opt.plate_scale()).simplified

    fp_units = channel.fp.units
    fp = channel.fp.magnitude
    osf = np.int32(channel.osf)
    offs = np.int32(channel.offs)

    magnification_factor = np.ceil(
        max(3.0 / jitter_x.std(), 3.0 / jitter_y.std()))

    if (magnification_factor > 1):
        try:
            mag = np.int(magnification_factor.item()) | 1
        except:
            mag = np.int(magnification_factor) | 1

        fp = exolib.oversample(fp, mag)

        #### See ExoSim Issue 42, for following.
        #    fp = np.where(fp >= 0.0, fp, 1e-10)

        osf *= mag
        offs = mag * offs + mag // 2
        jitter_x *= mag
        jitter_y *= mag

    if opt.noise.EnableSpatialJitter() != 'True': jitter_y *= 0.0
    if opt.noise.EnableSpectralJitter() != 'True': jitter_x *= 0.0

    jitter_x = np.round(jitter_x)
    jitter_y = np.round(jitter_y)
    noise = np.zeros((int(fp.shape[0] // osf), int(fp.shape[1] // osf),
                      0)).astype(np.float32)

    # multiple orders reshaping
    if (hasattr(channel.opt, "dispersion")
            and (isinstance(channel.opt.dispersion, list))):
        exosim_msg(
            "Using Dask for parellized processing of timeline datacube...")
        cores = opt.common.num_cores
        lim = cores // len(channel.opt.dispersion)
        indxRanges = np.arange(0, lim + 1) * channel.tl_shape[2] // lim

        # need to separate the "fake" focal planes so that they don't bleed into one another
        # when jitter is applied.

        client = Client(n_workers=cores,
                        memory_limit=f'{opt.common.gb_per_core}GB')
        temp_shape = (noise.shape[0],
                      noise.shape[1] // len(channel.opt.dispersion), 0)
        fp = np.array(np.split(fp, len(channel.opt.dispersion), 1))
        results = []
        for d, _ in enumerate(channel.opt.dispersion):
            jitters = []
            for i in range(len(indxRanges) - 1):
                startIdx = int(indxRanges[i])
                endIdx = int(indxRanges[i + 1])

                tfp = fp[d].astype(np.float32)
                tosf = osf.astype(np.int32)
                tndr = channel.ndr_sequence[startIdx:endIdx].astype(np.int32)
                tndrs = channel.ndr_cumulative_sequence[
                    startIdx:endIdx].astype(np.int32)
                tfosf = frame_osf.astype(np.int32)
                tjitx = jitter_x.magnitude.astype(np.int32)
                tjity = jitter_y.magnitude.astype(np.int32)
                txoff = offs.astype(np.int32)
                tyoff = offs.astype(np.int32)

                jitter = delayed(c_create_jitter_noise)(tfp,
                                                        tosf,
                                                        tndr,
                                                        tndrs,
                                                        tfosf,
                                                        tjitx,
                                                        tjity,
                                                        x_offs=txoff,
                                                        y_offs=tyoff)
                jitters.append(jitter)

            result = da.concatenate([
                da.from_delayed(
                    jit,
                    dtype='float32',
                    shape=(temp_shape[0], temp_shape[1], endIdx - startIdx))
                for jit in jitters
            ],
                                    axis=2)
            results.append(result)
        noise = da.stack(results, axis=0)

        noise = da.concatenate((noise[da.arange(noise.shape[0])]), axis=1)
        if visualize:
            noise.visualize(filename='exosim-dask-noise.png')
        noise = noise.compute()
        client.close()

    else:
        indxRanges = np.arange(0, 7) * channel.tl_shape[2] // 6

    for i in range(len(indxRanges) - 1):
        startIdx = int(indxRanges[i])
        endIdx = int(indxRanges[i + 1])
        noise = np.append(
            noise,
            c_create_jitter_noise(
                fp.astype(np.float32),
                osf.astype(np.int32),
                channel.ndr_sequence[startIdx:endIdx].astype(np.int32),
                channel.ndr_cumulative_sequence[startIdx:endIdx].astype(
                    np.int32),
                frame_osf.astype(np.int32),
                jitter_x.magnitude.astype(np.int32),
                jitter_y.magnitude.astype(np.int32),
                x_offs=offs.astype(np.int32),
                y_offs=offs.astype(np.int32)).astype(np.float32),
            axis=2)

    ## Multiply units to noise in 2 steps, to avoid
    ##        Quantities memmory inefficiency
    qq = channel.ndr_sequence * fp_units * frame_time
    noise = noise * qq
    return noise, outputPointingTL
예제 #54
0
def test_arange():
    darr = da.arange(77, chunks=13)
    nparr = np.arange(77)
    assert_eq(darr, nparr)

    darr = da.arange(2, 13, chunks=5)
    nparr = np.arange(2, 13)
    assert_eq(darr, nparr)

    darr = da.arange(4, 21, 9, chunks=13)
    nparr = np.arange(4, 21, 9)
    assert_eq(darr, nparr)

    # negative steps
    darr = da.arange(53, 5, -3, chunks=5)
    nparr = np.arange(53, 5, -3)
    assert_eq(darr, nparr)

    darr = da.arange(77, chunks=13, dtype=float)
    nparr = np.arange(77, dtype=float)
    assert_eq(darr, nparr)

    darr = da.arange(2, 13, chunks=5, dtype=int)
    nparr = np.arange(2, 13, dtype=int)
    assert_eq(darr, nparr)
    assert (sorted(da.arange(2, 13, chunks=5).dask) == sorted(
        da.arange(2, 13, chunks=5).dask))
    assert (sorted(da.arange(77, chunks=13, dtype=float).dask) == sorted(
        da.arange(77, chunks=13, dtype=float).dask))

    # 0 size output
    darr = da.arange(0, 1, -0.5, chunks=20)
    nparr = np.arange(0, 1, -0.5)
    assert_eq(darr, nparr)

    darr = da.arange(0, -1, 0.5, chunks=20)
    nparr = np.arange(0, -1, 0.5)
    assert_eq(darr, nparr)
예제 #55
0
def test_empty_array():
    assert eq(np.arange(0), da.arange(0, chunks=5))
예제 #56
0
def test_array_ufunc_out():
    x = da.arange(10, chunks=(5,))
    np.sin(x, out=x)
    np.add(x, 10, out=x)
    assert_eq(x, np.sin(np.arange(10)) + 10)
예제 #57
0
def test_unsupported_ufunc_methods():
    x = da.arange(10, chunks=(5,))
    with pytest.raises(TypeError):
        assert np.add.reduce(x)
예제 #58
0
def test_out_shape_mismatch():
    x = da.arange(10, chunks=(5,))
    y = da.arange(15, chunks=(5,))
    with pytest.raises(ValueError):
        assert np.log(x, out=y)
예제 #59
0
def test_arange_cast_float_int_step():
    darr = da.arange(3.3, -9.1, -.25, chunks=3, dtype='i8')
    nparr = np.arange(3.3, -9.1, -.25, dtype='i8')
    assert_eq(darr, nparr)
예제 #60
0
def test_setitem_extended_API_2d_rhs_func_of_lhs():
    # Cases:
    # * RHS and/or indices are a function of the LHS
    # * Indices have unknown chunk sizes
    # * RHS has extra leading size 1 dimensions compared to LHS
    x = cupy.arange(60).reshape((6, 10))
    chunks = (2, 3)

    dx = da.from_array(x, chunks=chunks)
    dx[2:4, dx[0] > 3] = -5
    x[2:4, x[0] > 3] = -5
    assert_eq(x, dx.compute())

    dx = da.from_array(x, chunks=chunks)
    dx[2, dx[0] < -2] = -7
    x[2, x[0] < -2] = -7
    assert_eq(x, dx.compute())

    dx = da.from_array(x, chunks=chunks)
    dx[dx % 2 == 0] = -8
    x[x % 2 == 0] = -8
    assert_eq(x, dx.compute())

    dx = da.from_array(x, chunks=chunks)
    dx[dx % 2 == 0] = -8
    x[x % 2 == 0] = -8
    assert_eq(x, dx.compute())

    dx = da.from_array(x, chunks=chunks)
    dx[3:5, 5:1:-2] = -dx[:2, 4:1:-2]
    x[3:5, 5:1:-2] = -x[:2, 4:1:-2]
    assert_eq(x, dx.compute())

    dx = da.from_array(x, chunks=chunks)
    dx[0, 1:3] = -dx[0, 4:2:-1]
    x[0, 1:3] = -x[0, 4:2:-1]
    assert_eq(x, dx.compute())

    dx = da.from_array(x, chunks=chunks)
    dx[...] = dx
    x[...] = x
    assert_eq(x, dx.compute())

    dx = da.from_array(x, chunks=chunks)
    dx[...] = dx[...]
    x[...] = x[...]
    assert_eq(x, dx.compute())

    dx = da.from_array(x, chunks=chunks)
    dx[0] = dx[-1]
    x[0] = x[-1]
    assert_eq(x, dx.compute())

    dx = da.from_array(x, chunks=chunks)
    dx[0, :] = dx[-2, :]
    x[0, :] = x[-2, :]
    assert_eq(x, dx.compute())

    dx = da.from_array(x, chunks=chunks)
    dx[:, 1] = dx[:, -3]
    x[:, 1] = x[:, -3]
    assert_eq(x, dx.compute())

    index = da.from_array([0, 2], chunks=(2,))
    dx = da.from_array(x, chunks=chunks)
    dx[index, 8] = [99, 88]
    x[[0, 2], 8] = [99, 88]
    assert_eq(x, dx.compute())

    dx = da.from_array(x, chunks=chunks)
    dx[:, index] = dx[:, :2]
    x[:, [0, 2]] = x[:, :2]
    assert_eq(x, dx.compute())

    index = da.where(da.arange(3, chunks=(1,)) < 2)[0]
    dx = da.from_array(x, chunks=chunks)
    dx[index, 7] = [-23, -33]
    x[index.compute(), 7] = [-23, -33]
    assert_eq(x, dx.compute())

    index = da.where(da.arange(3, chunks=(1,)) < 2)[0]
    dx = da.from_array(x, chunks=chunks)
    dx[(index,)] = -34
    x[(index.compute(),)] = -34
    assert_eq(x, dx.compute())

    index = index - 4
    dx = da.from_array(x, chunks=chunks)
    dx[index, 7] = [-43, -53]
    x[index.compute(), 7] = [-43, -53]
    assert_eq(x, dx.compute())

    index = da.from_array([0, -1], chunks=(1,))
    x[[0, -1]] = 9999
    dx[(index,)] = 9999
    assert_eq(x, dx.compute())

    dx = da.from_array(x, chunks=(-1, -1))
    dx[...] = da.from_array(x, chunks=chunks)
    assert_eq(x, dx.compute())

    # Both tests below fail in CuPy due to leading singular dimensions
    if False:
        # RHS has extra leading size 1 dimensions compared to LHS
        dx = da.from_array(x.copy(), chunks=(2, 3))
        v = x.reshape((1, 1) + x.shape)
        x[...] = v
        dx[...] = v
        assert_eq(x, dx.compute())

        index = da.where(da.arange(3, chunks=(1,)) < 2)[0]
        v = -cupy.arange(12).reshape(1, 1, 6, 2)
        x[:, [0, 1]] = v
        dx[:, index] = v
        assert_eq(x, dx.compute())