示例#1
0
def cf_1gm_geos_b_a():
    return xr.Dataset(
        {
            "Rad":
            xr.DataArray(
                da.zeros((OTHER_DIM_SIZE, Y_DIM_SIZE, X_DIM_SIZE)),
                dims=("other", "b", "a"),
                attrs={"grid_mapping": "goes_imager_projection"},
            )
        },
        coords={
            "b":
            xr.DataArray(
                da.linspace(0.1265, 0.04257, Y_DIM_SIZE),
                dims=("b", ),
                attrs={"units": "rad"},
            ),
            "a":
            xr.DataArray(
                da.linspace(-0.07503, 0.06495, X_DIM_SIZE),
                dims=("a", ),
                attrs={"units": "rad"},
            ),
            "t":
            np.array("2017-09-02T18:03:34", dtype=np.datetime64),
            "band_id":
            xr.DataArray(np.array([1], dtype=np.uint8),
                         dims=("band", ),
                         attrs={"units": "1"}),
        },
    )
示例#2
0
def raw_coords_lats1d_lons1d():
    return xr.DataArray(
        da.empty((Y_DIM_SIZE, X_DIM_SIZE)),
        dims=("lats", "lons"),
        coords={
            "lons": da.linspace(25, 35, X_DIM_SIZE),
            "lats": da.linspace(25, 35, Y_DIM_SIZE),
        },
    )
示例#3
0
def test_linspace(endpoint):
    darr = da.linspace(6, 49, endpoint=endpoint, chunks=5)
    nparr = np.linspace(6, 49, endpoint=endpoint)
    assert_eq(darr, nparr)

    darr = da.linspace(1.4, 4.9, endpoint=endpoint, chunks=5, num=13)
    nparr = np.linspace(1.4, 4.9, endpoint=endpoint, num=13)
    assert_eq(darr, nparr)

    darr = da.linspace(6, 49, endpoint=endpoint, chunks=5, dtype=float)
    nparr = np.linspace(6, 49, endpoint=endpoint, dtype=float)
    assert_eq(darr, nparr)

    darr, dstep = da.linspace(6, 49, endpoint=endpoint, chunks=5, retstep=True)
    nparr, npstep = np.linspace(6, 49, endpoint=endpoint, retstep=True)
    assert np.allclose(dstep, npstep)
    assert_eq(darr, nparr)

    darr = da.linspace(1.4, 4.9, endpoint=endpoint, chunks=5, num=13, dtype=int)
    nparr = np.linspace(1.4, 4.9, num=13, endpoint=endpoint, dtype=int)
    assert_eq(darr, nparr)
    assert (sorted(da.linspace(1.4, 4.9, endpoint=endpoint, chunks=5, num=13).dask) ==
            sorted(da.linspace(1.4, 4.9, endpoint=endpoint, chunks=5, num=13).dask))
    assert (sorted(da.linspace(6, 49, endpoint=endpoint, chunks=5, dtype=float).dask) ==
            sorted(da.linspace(6, 49, endpoint=endpoint, chunks=5, dtype=float).dask))
示例#4
0
def test_linspace(endpoint):
    darr = da.linspace(6, 49, endpoint=endpoint, chunks=5)
    nparr = np.linspace(6, 49, endpoint=endpoint)
    assert_eq(darr, nparr)

    darr = da.linspace(1.4, 4.9, endpoint=endpoint, chunks=5, num=13)
    nparr = np.linspace(1.4, 4.9, endpoint=endpoint, num=13)
    assert_eq(darr, nparr)

    darr = da.linspace(6, 49, endpoint=endpoint, chunks=5, dtype=float)
    nparr = np.linspace(6, 49, endpoint=endpoint, dtype=float)
    assert_eq(darr, nparr)

    darr, dstep = da.linspace(6, 49, endpoint=endpoint, chunks=5, retstep=True)
    nparr, npstep = np.linspace(6, 49, endpoint=endpoint, retstep=True)
    assert np.allclose(dstep, npstep)
    assert_eq(darr, nparr)

    darr = da.linspace(1.4, 4.9, endpoint=endpoint, chunks=5, num=13, dtype=int)
    nparr = np.linspace(1.4, 4.9, num=13, endpoint=endpoint, dtype=int)
    assert_eq(darr, nparr)
    assert sorted(
        da.linspace(1.4, 4.9, endpoint=endpoint, chunks=5, num=13).dask
    ) == sorted(da.linspace(1.4, 4.9, endpoint=endpoint, chunks=5, num=13).dask)
    assert sorted(
        da.linspace(6, 49, endpoint=endpoint, chunks=5, dtype=float).dask
    ) == sorted(da.linspace(6, 49, endpoint=endpoint, chunks=5, dtype=float).dask)
示例#5
0
def _perlin_dask_numpy(data: da.Array, freq: tuple, seed: int) -> da.Array:
    np.random.seed(seed)
    p = np.random.permutation(2**20)
    p = np.append(p, p)

    height, width = data.shape
    linx = da.linspace(0, freq[0], width, endpoint=False, dtype=np.float32)
    liny = da.linspace(0, freq[1], height, endpoint=False, dtype=np.float32)
    x, y = da.meshgrid(linx, liny)

    _func = partial(_perlin, p)
    data = da.map_blocks(_func, x, y, meta=np.array((), dtype=np.float32))

    data = (data - da.min(data)) / da.ptp(data)
    return data
示例#6
0
 def test_1d_tiles_from_coords_chunks(self):
     gm = GridMapping.from_coords(
         x_coords=xr.DataArray(da.linspace(177.5, 184.5, 8, chunks=4),
                               dims='lon'),
         y_coords=xr.DataArray(da.linspace(4.5, -4.5, 10, chunks=5),
                               dims='lat'),
         crs=GEO_CRS)
     self.assertEqual((8, 10), gm.size)
     self.assertEqual((4, 5), gm.tile_size)
     self.assertEqual((1, 1), gm.xy_res)
     self.assertEqual((177, -5, 185, 5), gm.xy_bbox)
     self.assertEqual(GEO_CRS, gm.crs)
     self.assertEqual(True, gm.is_regular)
     self.assertEqual(False, gm.is_j_axis_up)
     self.assertEqual(True, gm.is_lon_360)
示例#7
0
    def generate(self):
        """
        Sub-classable method for generating a factorial design of specified 'levels' in the given domain.
        The number of generated points is levels^d.
        
        Returns
        -------
        dask.delayed
            
        """
        if hasattr(self, 'random_idx'):
            del self.random_idx

        # Get grid coordinates
        grid_coords = [
            da.linspace(lb, ub, num=self.levels)
            for lb, ub in zip(self.xmin, self.xmax)
        ]

        # Generate the full grid
        x = da.meshgrid(*grid_coords)
        dim_idx = [item.ravel() for item in x]
        x = da.vstack(dim_idx).T
        x = x.rechunk(('auto', x.shape[1]))
        if self.use_logger:
            self.logger.info(
                "Factorial design: generated {0} points in {1} dimensions".
                format(len(x), len(self.xmin)))
        self.generated = x
        return x
示例#8
0
def discretize(x_data, n_state, min_=0.0, max_=1.0, chunks=()):
    """Primitive discretization of a microstructure.

    Args:
      x_data: the data to discrtize
      n_state: the number of local states
      min_: the minimum local state
      max_: the maximum local state

    Returns:
      the discretized microstructure

    >>> discretize(da.random.random((12, 9), chunks=(3, 9)),
    ...            3,
    ...            chunks=(1,)).chunks
    ((3, 3, 3, 3), (9,), (1, 1, 1))

    >>> discretize(np.array([[0, 1], [0.5, 0.5]]), 3, chunks=(1,)).chunks
    ((2,), (2,), (1, 1, 1))

    >>> discretize(np.array([[0, 1], [0.5, 0.5]]), 3, chunks=(1,)).compute()
    array([[[ 1.,  0.,  0.],
            [ 0.,  0.,  1.]],
    <BLANKLINE>
           [[ 0.,  1.,  0.],
            [ 0.,  1.,  0.]]])
    """
    return da.maximum(
        discretize_nomax(
            da.clip(x_data, min_, max_),
            da.linspace(min_, max_, n_state, chunks=chunks or (n_state, ))), 0)
示例#9
0
def plot_hyperplane(clf, min_x, max_x, linestyle, label):
    # get the separating hyperplane
    w = clf.coef_[0]
    a = -w[0] / w[1]
    xx = da.linspace(min_x - 5, max_x + 5)  # make sure the line is long enough
    yy = a * xx - (clf.intercept_[0]) / w[1]
    plt.plot(xx, yy, linestyle, label=label, linewidth=2)
示例#10
0
def test_linspace():
    darr = da.linspace(6, 49, chunks=5)
    nparr = np.linspace(6, 49)
    eq(darr, nparr)

    darr = da.linspace(1.4, 4.9, chunks=5, num=13)
    nparr = np.linspace(1.4, 4.9, num=13)
    eq(darr, nparr)

    darr = da.linspace(6, 49, chunks=5, dtype=float)
    nparr = np.linspace(6, 49, dtype=float)
    eq(darr, nparr)

    darr = da.linspace(1.4, 4.9, chunks=5, num=13, dtype=int)
    nparr = np.linspace(1.4, 4.9, num=13, dtype=int)
    eq(darr, nparr)
示例#11
0
    def test_calibrate(self, *mocks):
        """Test calibrate."""
        lut = np.linspace(1e6, 1.6e6, num=1024).astype(np.int32)
        lut = np.tile(lut, (10, 1))
        fh = HRITGOMSFileHandler()
        fh.prologue = {'ImageCalibration': lut}
        fh.chid = 1

        # Set up test input data

        counts = DataArray(
            da.linspace(1, 1023, 25, chunks=5, dtype=np.uint16).reshape(5, 5))

        # Test that calibration fails if given a silly mode
        self.assertRaises(NotImplementedError, fh.calibrate, counts,
                          'nonsense')

        # Test that 'counts' calibration returns identical values to input
        out = fh.calibrate(counts, 'counts')
        self.assertTrue(np.all(out.values == counts.values))

        # Test that 'radiance' calibrates successfully
        out = fh.calibrate(counts, 'radiance')
        self.assertTrue(np.allclose(out.values, lut[0, counts] / 1000.))

        # Test that 'brightness_temperature' calibrates successfully
        out = fh.calibrate(counts, 'brightness_temperature')
        self.assertTrue(np.allclose(out.values, lut[0, counts] / 1000.))
示例#12
0
def test_linspace():
    darr = da.linspace(6, 49, blocksize=5)
    nparr = np.linspace(6, 49)
    eq(darr, nparr)

    darr = da.linspace(1.4, 4.9, blocksize=5, num=13)
    nparr = np.linspace(1.4, 4.9, num=13)
    eq(darr, nparr)

    darr = da.linspace(6, 49, blocksize=5, dtype=float)
    nparr = np.linspace(6, 49, dtype=float)
    eq(darr, nparr)

    darr = da.linspace(1.4, 4.9, blocksize=5, num=13, dtype=int)
    nparr = np.linspace(1.4, 4.9, num=13, dtype=int)
    eq(darr, nparr)
示例#13
0
def atm_variables_finder(mus,
                         muv,
                         phi,
                         height,
                         tau,
                         tO3,
                         tH2O,
                         taustep4sphalb,
                         tO2=1.0):
    tau_step = da.linspace(taustep4sphalb,
                           MAXNUMSPHALBVALUES * taustep4sphalb,
                           MAXNUMSPHALBVALUES,
                           chunks=int(MAXNUMSPHALBVALUES / 2))
    sphalb0 = csalbr(tau_step)
    taur = tau * da.exp(-height / SCALEHEIGHT)
    rhoray, trdown, trup = chand(phi, muv, mus, taur)
    if isinstance(height, xr.DataArray):

        def _sphalb_index(index_arr, sphalb0):
            # FIXME: if/when dask can support lazy index arrays then remove this
            return sphalb0[index_arr]

        sphalb = da.map_blocks(_sphalb_index, (taur / taustep4sphalb +
                                               0.5).astype(np.int32).data,
                               sphalb0.compute(),
                               dtype=sphalb0.dtype)
    else:
        sphalb = sphalb0[(taur / taustep4sphalb + 0.5).astype(np.int32)]
    Ttotrayu = ((2 / 3. + muv) + (2 / 3. - muv) * trup) / (4 / 3. + taur)
    Ttotrayd = ((2 / 3. + mus) + (2 / 3. - mus) * trdown) / (4 / 3. + taur)
    TtotraytH2O = Ttotrayu * Ttotrayd * tH2O
    tOG = tO3 * tO2
    return sphalb, rhoray, TtotraytH2O, tOG
示例#14
0
def test_linspace(endpoint):
    darr = da.linspace(6, 49, endpoint=endpoint, chunks=5)
    nparr = np.linspace(6, 49, endpoint=endpoint)
    assert_eq(darr, nparr)

    darr = da.linspace(1.4, 4.9, endpoint=endpoint, chunks=5, num=13)
    nparr = np.linspace(1.4, 4.9, endpoint=endpoint, num=13)
    assert_eq(darr, nparr)

    darr = da.linspace(6, 49, endpoint=endpoint, chunks=5, dtype=float)
    nparr = np.linspace(6, 49, endpoint=endpoint, dtype=float)
    assert_eq(darr, nparr)

    darr, dstep = da.linspace(6, 49, endpoint=endpoint, chunks=5, retstep=True)
    nparr, npstep = np.linspace(6, 49, endpoint=endpoint, retstep=True)
    assert np.allclose(dstep, npstep)
    assert_eq(darr, nparr)

    darr = da.linspace(1.4,
                       4.9,
                       endpoint=endpoint,
                       chunks=5,
                       num=13,
                       dtype=int)
    nparr = np.linspace(1.4, 4.9, num=13, endpoint=endpoint, dtype=int)
    assert_eq(darr, nparr)
    assert sorted(
        da.linspace(1.4, 4.9, endpoint=endpoint, chunks=5,
                    num=13).dask) == sorted(
                        da.linspace(1.4,
                                    4.9,
                                    endpoint=endpoint,
                                    chunks=5,
                                    num=13).dask)
    assert sorted(
        da.linspace(6, 49, endpoint=endpoint, chunks=5,
                    dtype=float).dask) == sorted(
                        da.linspace(6,
                                    49,
                                    endpoint=endpoint,
                                    chunks=5,
                                    dtype=float).dask)

    x = da.array([0.2, 6.4, 3.0, 1.6])
    nparr = np.linspace(0, 2, 8, endpoint=endpoint)
    darr = da.linspace(da.argmin(x), da.argmax(x) + 1, 8, endpoint=endpoint)
    assert_eq(darr, nparr)
示例#15
0
def _terrain_dask_numpy(data: da.Array,
                        seed: int,
                        x_range_scaled: tuple,
                        y_range_scaled: tuple,
                        zfactor: int) -> da.Array:
    data = data * 0

    height, width = data.shape
    linx = da.linspace(
        x_range_scaled[0], x_range_scaled[1], width, endpoint=False,
        dtype=np.float32
    )
    liny = da.linspace(
        y_range_scaled[0], y_range_scaled[1], height, endpoint=False,
        dtype=np.float32
    )
    x, y = da.meshgrid(linx, liny)

    nrange = np.arange(2 ** 20, dtype=int)

    # multiplier, (xfreq, yfreq)
    NOISE_LAYERS = ((1 / 2 ** i, (2 ** i, 2 ** i)) for i in range(16))
    for i, (m, (xfreq, yfreq)) in enumerate(NOISE_LAYERS):
        np.random.seed(seed + i)
        p = np.random.permutation(nrange)
        p = np.append(p, p)

        _func = partial(_perlin, p)
        noise = da.map_blocks(
            _func,
            x * xfreq,
            y * yfreq,
            meta=np.array((), dtype=np.float32)
        )

        data += noise * m

    data /= (1.00 + 0.50 + 0.25 + 0.13 + 0.06 + 0.03)
    data = data ** 3

    data = (data - np.min(data)) / np.ptp(data)
    data[data < 0.3] = 0  # create water
    data *= zfactor

    return data
示例#16
0
def gx_y_x():
    crs = CRS.from_epsg(4326)
    return xr.DataArray(
        da.empty((Y_DIM_SIZE, X_DIM_SIZE)),
        dims=("y", "x"),
        attrs={
            "grid_mapping": "spatial_ref",
        },
        coords={
            "spatial_ref": xr.DataArray(
                0,
                attrs={
                    "crs_wkt": crs.to_wkt(),
                    "spatial_ref": crs.to_wkt(),
                },
            ),
            "y": da.linspace(0, 15000, X_DIM_SIZE),
            "x": da.linspace(-15000, 10000, Y_DIM_SIZE),
        },
    )
示例#17
0
 def _new_xy_coords(self) -> xr.DataArray:
     self._assert_regular()
     x_res_05, y_res_05 = self.x_res / 2, self.y_res / 2
     x1, x2 = self.x_min + x_res_05, self.x_max - x_res_05
     y1, y2 = self.y_min + y_res_05, self.y_max - y_res_05
     if not self.is_j_axis_up:
         y1, y2 = y2, y1
     x_name, y_name = self.xy_dim_names
     x_coords_1d = xr.DataArray(da.linspace(x1, x2, self.width,
                                            chunks=self.tile_width),
                                dims=x_name)
     y_coords_1d = xr.DataArray(da.linspace(y1, y2, self.height,
                                            chunks=self.tile_height),
                                dims=y_name)
     y_coords_2d, x_coords_2d = xr.broadcast(y_coords_1d, x_coords_1d)
     xy_coords = xr.concat([x_coords_2d, y_coords_2d],
                           dim='coord').chunk((2,
                                               self.tile_height,
                                               self.tile_width))
     xy_coords.name = 'xy_coords'
     return xy_coords
示例#18
0
    def test_calibrate(self):
        """Test calibration"""
        # Generate test data
        counts = DataArray(da.linspace(0, 1200, 25, chunks=5).reshape(5, 5))
        refl = np.array(
            [[np.nan, 4.79247312, 9.68494624, 14.57741935, 19.46989247],
             [24.36236559, 29.25483871, 34.14731183, 39.03978495, 43.93225806],
             [48.82473118, 53.7172043, 58.60967742, 63.50215054, 68.39462366],
             [73.28709677, 78.17956989, 83.07204301, 87.96451613, 92.85698925],
             [97.74946237, 100., 100., 100., 100.]])
        bt = np.array(
            [[np.nan, 320.20678397, 310.43356794, 300.66035191, 290.88713587],
             [
                 281.11391984, 271.34070381, 261.56748778, 251.79427175,
                 242.02105572
             ],
             [
                 232.24783969, 222.47462366, 212.70140762, 202.92819159,
                 193.15497556
             ],
             [
                 183.38175953, 173.6085435, 163.83532747, 154.06211144,
                 144.28889541
             ], [134.51567937, 130.02, 130.02, 130.02, 130.02]])

        # Choose an area near the subsatellite point to avoid masking
        # of space pixels
        mda = self._get_mda(nlines=5,
                            ncols=5,
                            loff=1375.0,
                            coff=1375.0,
                            segno=0)
        reader = self._get_reader(mda=mda)

        # 1. Counts
        res = reader.calibrate(data=counts, calibration='counts')
        self.assertTrue(np.all(counts.values == res.values))

        # 2. Reflectance
        res = reader.calibrate(data=counts, calibration='reflectance')
        np.testing.assert_allclose(refl, res.values)  # also compares NaN

        # 3. Brightness temperature
        mda_bt = self._get_mda(nlines=5,
                               ncols=5,
                               loff=1375.0,
                               coff=1375.0,
                               segno=0,
                               vis=False)
        reader_bt = self._get_reader(mda=mda_bt)
        res = reader_bt.calibrate(data=counts,
                                  calibration='brightness_temperature')
        np.testing.assert_allclose(bt, res.values)  # also compares NaN
示例#19
0
def test_dataset_create_table(tmp_path, dataset_chunks, dtype):
    datasets = []
    names = []
    datas = []
    row_sum = 0

    for chunks in dataset_chunks:
        shapes = {k: sum(c) for k, c in chunks.items()}
        row_sum += shapes['row']

        # Make some visibilities
        dims = ("row", "chan", "corr")
        shape = tuple(shapes[d] for d in dims)
        data_chunks = tuple(chunks[d] for d in dims)
        data = da.random.random(shape, chunks=data_chunks).astype(dtype)
        data_var = Variable(dims, data, {})

        # Make some string names
        dims = ("row", )
        shape = tuple(shapes[d] for d in dims)
        str_chunks = tuple(chunks[d] for d in dims)
        np_str_array = np.asarray(["BOB"] * shape[0], dtype=np.object)
        da_str_array = da.from_array(np_str_array, chunks=str_chunks)
        str_array_var = Variable(dims, da_str_array, {})

        datasets.append(Dataset({"DATA": data_var, "NAMES": str_array_var}))
        datas.append(data)
        names.extend(np_str_array.tolist())

    freq = da.linspace(.856e9, 2 * .856e9, 64, chunks=16)
    sub_datasets = [Dataset({"FREQ": (("row", "chan"), freq[None, :])})]

    # Write the data to new tables
    table_name = os.path.join(str(tmp_path), 'test.table')
    writes = write_datasets(table_name, datasets, ["DATA", "NAMES"])
    subt_writes = write_datasets(table_name + "::SPW", sub_datasets, ["FREQ"])
    dask.compute(writes, subt_writes)

    # Check written data
    with pt.table(table_name, readonly=True, lockoptions='auto',
                  ack=False) as T:
        assert row_sum == T.nrows()
        assert_array_equal(T.getcol("DATA"), np.concatenate(datas))
        assert_array_equal(T.getcol("NAMES"), names)

    # Sub-table correctly linked and populated
    with pt.table(table_name + "::SPW",
                  readonly=True,
                  lockoptions='auto',
                  ack=False) as T:
        assert T.nrows() == 1
        assert_array_equal(T.getcol("FREQ")[0], freq)
示例#20
0
def test_linspace():
    darr = da.linspace(6, 49, chunks=5)
    nparr = np.linspace(6, 49)
    assert_eq(darr, nparr)

    darr = da.linspace(1.4, 4.9, chunks=5, num=13)
    nparr = np.linspace(1.4, 4.9, num=13)
    assert_eq(darr, nparr)

    darr = da.linspace(6, 49, chunks=5, dtype=float)
    nparr = np.linspace(6, 49, dtype=float)
    assert_eq(darr, nparr)

    darr = da.linspace(1.4, 4.9, chunks=5, num=13, dtype=int)
    nparr = np.linspace(1.4, 4.9, num=13, dtype=int)
    assert_eq(darr, nparr)
    assert (sorted(da.linspace(1.4, 4.9, chunks=5, num=13).dask) ==
            sorted(da.linspace(1.4, 4.9, chunks=5, num=13).dask))
    assert (sorted(da.linspace(6, 49, chunks=5, dtype=float).dask) ==
            sorted(da.linspace(6, 49, chunks=5, dtype=float).dask))
示例#21
0
def test_linspace():
    darr = da.linspace(6, 49, chunks=5)
    nparr = np.linspace(6, 49)
    assert_eq(darr, nparr)

    darr = da.linspace(1.4, 4.9, chunks=5, num=13)
    nparr = np.linspace(1.4, 4.9, num=13)
    assert_eq(darr, nparr)

    darr = da.linspace(6, 49, chunks=5, dtype=float)
    nparr = np.linspace(6, 49, dtype=float)
    assert_eq(darr, nparr)

    darr = da.linspace(1.4, 4.9, chunks=5, num=13, dtype=int)
    nparr = np.linspace(1.4, 4.9, num=13, dtype=int)
    assert_eq(darr, nparr)
    assert (sorted(da.linspace(1.4, 4.9, chunks=5, num=13).dask) == sorted(
        da.linspace(1.4, 4.9, chunks=5, num=13).dask))
    assert (sorted(da.linspace(6, 49, chunks=5, dtype=float).dask) == sorted(
        da.linspace(6, 49, chunks=5, dtype=float).dask))
示例#22
0
    def test_temperature_difference(self, tmpdir, abi_l1b_c01_data_array):
        new_data_arr = abi_l1b_c01_data_array.copy()
        data = da.linspace(-10, 10,
                           new_data_arr.size).reshape(new_data_arr.shape)
        new_data_arr.data = data
        new_data_arr.attrs["name"] = "test_temperature_difference"
        scn = Scene()
        scn["test_temperature_difference"] = new_data_arr
        out_fn = str(tmpdir + "test_temperature_difference.tif")
        scn.save_datasets(filename=out_fn)

        with rasterio.open(out_fn, "r") as out_ds:
            assert out_ds.count == 2
            l_data = out_ds.read(1)
            # see polar2grid/tests/etc/enhancements/generic.yaml
            flat_l_data = l_data.ravel()
            data = data.ravel().compute()
            exp_out = np.round(np.linspace(5.0, 205.0,
                                           data.size)).astype(np.uint8)
            np.testing.assert_allclose(flat_l_data, exp_out)
示例#23
0
    def test_p2g_palettize(self, keep_palette, ds_name, tmpdir,
                           abi_l1b_c01_data_array):
        if ds_name == "test_p2g_palettize3":
            shutil.copy(os.path.join(TEST_ETC_DIR, f"{ds_name}.npy"), tmpdir)
        new_data_arr = abi_l1b_c01_data_array.copy()
        data = da.linspace(180, 280,
                           new_data_arr.size).reshape(new_data_arr.shape)
        new_data_arr.data = data
        new_data_arr.attrs["name"] = ds_name
        scn = Scene()
        scn[ds_name] = new_data_arr
        out_fn = str(tmpdir + f"{ds_name}_{keep_palette}.tif")
        with easy_cwd(tmpdir):
            scn.save_datasets(filename=out_fn, keep_palette=keep_palette)

        with rasterio.open(out_fn, "r") as out_ds:
            is_palette = keep_palette and "palettize" in ds_name
            num_bands = 1 if is_palette else 4
            assert out_ds.count == num_bands
            if is_palette:
                assert out_ds.colormap(1) is not None
示例#24
0
def discretize(x_data, n_state=2, min_=0.0, max_=1.0, chunks=None):
    """Primitive discretization of a microstructure.

    Args:
      x_data: the data to discretize
      n_state: the number of local states
      min_: the minimum local state
      max_: the maximum local state
      chunks: chunks size for state axis

    Returns:
      the discretized microstructure

    >>> discretize(da.random.random((12, 9), chunks=(3, 9)),
    ...            3,
    ...            chunks=1).chunks
    ((3, 3, 3, 3), (9,), (1, 1, 1))

    >>> discretize(np.array([[0, 1], [0.5, 0.5]]), 3, chunks=1).chunks
    ((2,), (2,), (1, 1, 1))

    >>> assert np.allclose(
    ...     discretize(
    ...         np.array([[0, 1], [0.5, 0.5]]),
    ...         3,
    ...         chunks=1
    ...     ).compute(),
    ...     [[[1, 0, 0], [0, 0, 1]], [[0, 1, 0], [0, 1, 0]]]
    ... )
    """
    return da.maximum(
        discretize_nomax(
            da.clip(x_data, min_, max_),
            da.linspace(min_, max_, n_state, chunks=(chunks or n_state,)),
        ),
        0,
    )
示例#25
0
def test_dask_degridder_gridder():
    from africanus.filters import convolution_filter
    from africanus.gridding.simple.dask import grid, degrid

    import dask.array as da

    row = 100
    chan = 16
    corr = (2, 2)
    nx = ny = 1024

    row_chunk = 25
    chan_chunk = 4
    corr_chunk = corr

    vis_shape = (row, chan) + corr
    vis_chunks = (row_chunk, chan_chunk) + corr_chunk

    vis = (da.random.random(vis_shape, chunks=vis_chunks) +
           1j * da.random.random(vis_shape, chunks=vis_chunks))
    uvw = da.random.random((row, 3), chunks=(row_chunk, 3))
    # 4 channels of MeerKAT L band
    ref_wave = lightspeed / da.linspace(
        .856e9, .856e9 * 2, chan, chunks=chan_chunk)
    flags = da.random.randint(0, 1, size=vis_shape, chunks=vis_chunks)

    weights = da.random.random(vis_shape, chunks=vis_chunks)

    conv_filter = convolution_filter(3, 63, "sinc")

    vis_grid = grid(vis, uvw, flags, weights, ref_wave, conv_filter, ny, nx)

    degrid_vis = degrid(vis_grid, uvw, weights, ref_wave, conv_filter)

    np_vis_grid, np_degrid_vis = da.compute(vis_grid, degrid_vis)
    assert np_vis_grid.shape == (ny, nx) + corr
    assert np_degrid_vis.shape == (row, chan) + corr
示例#26
0
def test_ms_create(Dataset, tmp_path, chunks, num_chans, corr_types, sources):
    # Set up
    rs = np.random.RandomState(42)

    ms_path = tmp_path / "create.ms"

    ms_table_name = str(ms_path)
    ant_table_name = "::".join((ms_table_name, "ANTENNA"))
    ddid_table_name = "::".join((ms_table_name, "DATA_DESCRIPTION"))
    pol_table_name = "::".join((ms_table_name, "POLARIZATION"))
    spw_table_name = "::".join((ms_table_name, "SPECTRAL_WINDOW"))
    # SOURCE is an optional MS sub-table
    src_table_name = "::".join((ms_table_name, "SOURCE"))

    ms_datasets = []
    ant_datasets = []
    ddid_datasets = []
    pol_datasets = []
    spw_datasets = []
    src_datasets = []

    # For comparison
    all_data_desc_id = []
    all_data = []

    # Create ANTENNA dataset of 64 antennas
    # Each column in the ANTENNA has a fixed shape so we
    # can represent all rows with one dataset
    na = 64
    position = da.random.random((na, 3)) * 10000
    offset = da.random.random((na, 3))
    names = np.array(['ANTENNA-%d' % i for i in range(na)], dtype=np.object)
    ds = Dataset({
        'POSITION': (("row", "xyz"), position),
        'OFFSET': (("row", "xyz"), offset),
        'NAME': (("row", ), da.from_array(names, chunks=na)),
    })
    ant_datasets.append(ds)

    # Create SOURCE datasets
    for s, (name, direction, rest_freq) in enumerate(sources):
        dask_num_lines = da.full((1, ), len(rest_freq), dtype=np.int32)
        dask_direction = da.asarray(direction)[None, :]
        dask_rest_freq = da.asarray(rest_freq)[None, :]
        dask_name = da.asarray(np.asarray([name], dtype=np.object))
        ds = Dataset({
            "NUM_LINES": (("row", ), dask_num_lines),
            "NAME": (("row", ), dask_name),
            "REST_FREQUENCY": (("row", "line"), dask_rest_freq),
            "DIRECTION": (("row", "dir"), dask_direction),
        })
        src_datasets.append(ds)

    # Create POLARISATION datasets.
    # Dataset per output row required because column shapes are variable
    for r, corr_type in enumerate(corr_types):
        dask_num_corr = da.full((1, ), len(corr_type), dtype=np.int32)
        dask_corr_type = da.from_array(corr_type,
                                       chunks=len(corr_type))[None, :]
        ds = Dataset({
            "NUM_CORR": (("row", ), dask_num_corr),
            "CORR_TYPE": (("row", "corr"), dask_corr_type),
        })

        pol_datasets.append(ds)

    # Create multiple MeerKAT L-band SPECTRAL_WINDOW datasets
    # Dataset per output row required because column shapes are variable
    for num_chan in num_chans:
        dask_num_chan = da.full((1, ), num_chan, dtype=np.int32)
        dask_chan_freq = da.linspace(.856e9,
                                     2 * .856e9,
                                     num_chan,
                                     chunks=num_chan)[None, :]
        dask_chan_width = da.full((1, num_chan), .856e9 / num_chan)

        ds = Dataset({
            "NUM_CHAN": (("row", ), dask_num_chan),
            "CHAN_FREQ": (("row", "chan"), dask_chan_freq),
            "CHAN_WIDTH": (("row", "chan"), dask_chan_width),
        })

        spw_datasets.append(ds)

    # For each cartesian product of SPECTRAL_WINDOW and POLARIZATION
    # create a corresponding DATA_DESCRIPTION.
    # Each column has fixed shape so we handle all rows at once
    spw_ids, pol_ids = zip(
        *product(range(len(num_chans)), range(len(corr_types))))
    dask_spw_ids = da.asarray(np.asarray(spw_ids, dtype=np.int32))
    dask_pol_ids = da.asarray(np.asarray(pol_ids, dtype=np.int32))
    ddid_datasets.append(
        Dataset({
            "SPECTRAL_WINDOW_ID": (("row", ), dask_spw_ids),
            "POLARIZATION_ID": (("row", ), dask_pol_ids),
        }))

    # Now create the associated MS dataset
    for ddid, (spw_id, pol_id) in enumerate(zip(spw_ids, pol_ids)):
        # Infer row, chan and correlation shape
        row = sum(chunks['row'])
        chan = spw_datasets[spw_id].CHAN_FREQ.shape[1]
        corr = pol_datasets[pol_id].CORR_TYPE.shape[1]

        # Create some dask vis data
        dims = ("row", "chan", "corr")
        np_data = (rs.normal(size=(row, chan, corr)) +
                   1j * rs.normal(size=(row, chan, corr))).astype(np.complex64)

        data_chunks = tuple((chunks['row'], chan, corr))
        dask_data = da.from_array(np_data, chunks=data_chunks)
        # Create dask ddid column
        dask_ddid = da.full(row, ddid, chunks=chunks['row'], dtype=np.int32)
        dataset = Dataset({
            'DATA': (dims, dask_data),
            'DATA_DESC_ID': (("row", ), dask_ddid)
        })
        ms_datasets.append(dataset)
        all_data.append(dask_data)
        all_data_desc_id.append(dask_ddid)

    ms_writes = xds_to_table(ms_datasets, ms_table_name, columns="ALL")
    ant_writes = xds_to_table(ant_datasets, ant_table_name, columns="ALL")
    pol_writes = xds_to_table(pol_datasets, pol_table_name, columns="ALL")
    spw_writes = xds_to_table(spw_datasets, spw_table_name, columns="ALL")
    ddid_writes = xds_to_table(ddid_datasets, ddid_table_name, columns="ALL")
    source_writes = xds_to_table(src_datasets, src_table_name, columns="ALL")

    dask.compute(ms_writes, ant_writes, pol_writes, spw_writes, ddid_writes,
                 source_writes)

    # Check ANTENNA table correctly created
    with pt.table(ant_table_name, ack=False) as A:
        assert_array_equal(A.getcol("NAME"), names)
        assert_array_equal(A.getcol("POSITION"), position)
        assert_array_equal(A.getcol("OFFSET"), offset)

        required_desc = pt.required_ms_desc("ANTENNA")
        required_columns = set(k for k in required_desc.keys()
                               if not k.startswith("_"))

        assert set(A.colnames()) == set(required_columns)

    # Check POLARIZATION table correctly created
    with pt.table(pol_table_name, ack=False) as P:
        for r, corr_type in enumerate(corr_types):
            assert_array_equal(P.getcol("CORR_TYPE", startrow=r, nrow=1),
                               [corr_type])
            assert_array_equal(P.getcol("NUM_CORR", startrow=r, nrow=1),
                               [len(corr_type)])

        required_desc = pt.required_ms_desc("POLARIZATION")
        required_columns = set(k for k in required_desc.keys()
                               if not k.startswith("_"))

        assert set(P.colnames()) == set(required_columns)

    # Check SPECTRAL_WINDOW table correctly created
    with pt.table(spw_table_name, ack=False) as S:
        for r, num_chan in enumerate(num_chans):
            assert_array_equal(
                S.getcol("NUM_CHAN", startrow=r, nrow=1)[0], num_chan)
            assert_array_equal(
                S.getcol("CHAN_FREQ", startrow=r, nrow=1)[0],
                np.linspace(.856e9, 2 * .856e9, num_chan))
            assert_array_equal(
                S.getcol("CHAN_WIDTH", startrow=r, nrow=1)[0],
                np.full(num_chan, .856e9 / num_chan))

        required_desc = pt.required_ms_desc("SPECTRAL_WINDOW")
        required_columns = set(k for k in required_desc.keys()
                               if not k.startswith("_"))

        assert set(S.colnames()) == set(required_columns)

    # We should get a cartesian product out
    with pt.table(ddid_table_name, ack=False) as D:
        spw_id, pol_id = zip(
            *product(range(len(num_chans)), range(len(corr_types))))
        assert_array_equal(pol_id, D.getcol("POLARIZATION_ID"))
        assert_array_equal(spw_id, D.getcol("SPECTRAL_WINDOW_ID"))

        required_desc = pt.required_ms_desc("DATA_DESCRIPTION")
        required_columns = set(k for k in required_desc.keys()
                               if not k.startswith("_"))

        assert set(D.colnames()) == set(required_columns)

    with pt.table(src_table_name, ack=False) as S:
        for r, (name, direction, rest_freq) in enumerate(sources):
            assert_array_equal(S.getcol("NAME", startrow=r, nrow=1)[0], [name])
            assert_array_equal(S.getcol("REST_FREQUENCY", startrow=r, nrow=1),
                               [rest_freq])
            assert_array_equal(S.getcol("DIRECTION", startrow=r, nrow=1),
                               [direction])

    with pt.table(ms_table_name, ack=False) as T:
        # DATA_DESC_ID's are all the same shape
        assert_array_equal(T.getcol("DATA_DESC_ID"),
                           da.concatenate(all_data_desc_id))

        # DATA is variably shaped (on DATA_DESC_ID) so we
        # compared each one separately.
        for ddid, data in enumerate(all_data):
            ms_data = T.getcol("DATA", startrow=ddid * row, nrow=row)
            assert_array_equal(ms_data, data)

        required_desc = pt.required_ms_desc()
        required_columns = set(k for k in required_desc.keys()
                               if not k.startswith("_"))

        # Check we have the required columns
        assert set(T.colnames()) == required_columns.union(
            ["DATA", "DATA_DESC_ID"])
示例#27
0
def genGridFile(fname="renoGrid.hdf5", iniFname="grid_file.ini"):
    #load the ini construction file
    with open(iniFname, 'r') as fileObject:
        ini = [
            line.strip() for line in fileObject if ("#" not in line.strip())
        ]
    ini = json.loads(r''.join(ini))
    #create hdf5 grid file
    #note that flipping x and y IS NOT A MISTAKE!
    y = da.linspace(0,
                    ini["NX"] * ini["DX"],
                    ini["NX"],
                    chunks=(ini["CHUNKX"], )).astype(np.int32)
    x = da.linspace(0,
                    ini["NY"] * ini["DY"],
                    ini["NY"],
                    chunks=(ini["CHUNKY"], )).astype(np.int32)
    z = da.linspace(0,
                    ini["NZ"] * ini["DZ"],
                    ini["NZ"],
                    chunks=(ini["CHUNKZ"], )).astype(np.int32)
    topo = da.zeros(shape=(x.shape[0], y.shape[0]),
                    chunks=(ini["CHUNKX"], ini["CHUNKY"]))
    basinSurface = da.zeros(shape=(x.shape[0], y.shape[0]),
                            chunks=(ini["CHUNKX"], ini["CHUNKY"]))
    #compute a dask shape
    #note that I chose to use cartesian indexing
    gridCoords = da.meshgrid(x, y, z, sparse=False, indexing='xy')
    #try flipping z -- Note that an rFile scans down from a maximum elevation to the bottom (not from bottome to top as the mesh grid does, this corrects that orientation)
    #gridCoords[2] = da.flip(gridCoords[2],axis=-1)
    #now write to the hdf5 file
    da.to_hdf5(fname, '/grid/x', gridCoords[0])
    da.to_hdf5(fname, '/grid/y', gridCoords[1])
    da.to_hdf5(fname, '/grid/z', gridCoords[2])
    #save an empty (bullshit) topography
    da.to_hdf5(fname, '/grid/topo', topo)
    #create all of the empty coordinate spaces
    #-666 is the not interpolated, empty space flag (not to be confuesd with -999 the no value flag for sw4
    da.to_hdf5(
        fname, '/grid/vp',
        da.full(gridCoords[0].shape,
                -666,
                chunks=gridCoords[0].chunksize,
                dtype=np.float32).flatten())
    da.to_hdf5(
        fname, '/grid/vs',
        da.full(gridCoords[0].shape,
                -666,
                chunks=gridCoords[0].chunksize,
                dtype=np.float32).flatten())
    da.to_hdf5(
        fname, '/grid/p',
        da.full(gridCoords[0].shape,
                -666,
                chunks=gridCoords[0].chunksize,
                dtype=np.float32).flatten())
    da.to_hdf5(
        fname, '/grid/qp',
        da.full(gridCoords[0].shape,
                -666,
                chunks=gridCoords[0].chunksize,
                dtype=np.float32).flatten())
    da.to_hdf5(
        fname, '/grid/qs',
        da.full(gridCoords[0].shape,
                -666,
                chunks=gridCoords[0].chunksize,
                dtype=np.float32).flatten())
    #build a unit descriptor array
    da.to_hdf5(
        fname, '/grid/unit',
        da.full(gridCoords[0].shape,
                -1,
                chunks=gridCoords[0].chunksize,
                dtype=np.int8).flatten())
    #now write the config file to the hdf5 file header
    result = h5py.File(fname, 'r+')
    #there really must be a better way of doing this! This reencodes the json as ascii to remove any unicode which might be in it
    ini = [i.encode("ascii", "ignore") for i in json.dumps(ini)]
    result.create_dataset('/grid/ini', (len(ini), 1), 'S10', ini)
示例#28
0
print(
    "\nPressure fields for the following cases are computed (case, U, a, p0, x0)"
)
with open(f"{current_dir}/parameters_pressure.txt", "w") as file:
    for i in range(num_cases):
        line = f"{cases[i]:02.0f},{U_list[i]:0.0f},{a_list[i]:0.0f},{p0_list[i]:0.0f},{x0_list[i]:0.0f}"
        print(line.replace(",", "\t"))
        file.write(line + "\n")

    del line, i

### Grid
x_num = int((x_max - x_min) / x_step + 1)
y_num = int((y_max - y_min) / y_step + 1)

x = da.linspace(x_min, x_max, x_num, chunks=256)
y = da.linspace(y_min, y_max, y_num, chunks=1024)
t = da.arange(t_min, t_max + 1, t_step, chunks=128)

t_num = t.size

tt, yy, xx = da.meshgrid(t, y, x, indexing="ij", sparse=True)

print("Grid parameters:")
print(f"{x.size=}\t\t{y.size=}\t\t{t.size=}")
print(f"{x.chunksize=}\t{y.chunksize=}\t{t.chunksize=}")


### Function
def pressure(x, y, t, t0=10000., U=50., a=200000., p0=2000., x0=0.):
    return (p0 * (1. - da.exp(-t / t0)) * da.exp(-((x - x0)**2. +
示例#29
0
 def _lin_axis(min_: Union[float, int], max_: Union[float, int],
               points: int) -> da.core.Array:
     """Generates linearly spaced array."""
     return da.linspace(min_, max_, points)
示例#30
0
def fixture_fake_dataset():
    """Create fake dataset."""
    count_ir = da.linspace(0, 255, 4, dtype=np.uint8).reshape(2, 2)
    count_wv = da.linspace(0, 255, 4, dtype=np.uint8).reshape(2, 2)
    count_vis = da.linspace(0, 255, 16, dtype=np.uint8).reshape(4, 4)
    sza = da.from_array(np.array([[45, 90], [0, 45]], dtype=np.float32))
    mask = da.from_array(
        np.array(
            [
                [0, 0, 0, 0],
                [0, 0, 0, 0],
                [0, 0, 1, 0],  # 1 = "invalid"
                [0, 0, 0, 0]
            ],
            dtype=np.uint8))
    time = np.arange(4).astype('datetime64[h]').reshape(2, 2)
    ds = xr.Dataset(data_vars={
        'count_vis': (('y', 'x'), count_vis),
        'count_wv': (('y_ir_wv', 'x_ir_wv'), count_wv),
        'count_ir': (('y_ir_wv', 'x_ir_wv'), count_ir),
        'toa_bidirectional_reflectance_vis':
        vis_refl_exp / 100,
        'u_independent_toa_bidirectional_reflectance':
        u_vis_refl_exp / 100,
        'quality_pixel_bitmask': (('y', 'x'), mask),
        'solar_zenith_angle': (('y_tie', 'x_tie'), sza),
        'time_ir_wv': (('y_ir_wv', 'x_ir_wv'), time),
        'a_ir':
        -5.0,
        'b_ir':
        1.0,
        'bt_a_ir':
        10.0,
        'bt_b_ir':
        -1000.0,
        'a_wv':
        -0.5,
        'b_wv':
        0.05,
        'bt_a_wv':
        10.0,
        'bt_b_wv':
        -2000.0,
        'years_since_launch':
        20.0,
        'a0_vis':
        1.0,
        'a1_vis':
        0.01,
        'a2_vis':
        -0.0001,
        'mean_count_space_vis':
        1.0,
        'distance_sun_earth':
        1.0,
        'solar_irradiance_vis':
        650.0,
        'sub_satellite_longitude_start':
        57.1,
        'sub_satellite_longitude_end':
        np.nan,
        'sub_satellite_latitude_start':
        np.nan,
        'sub_satellite_latitude_end':
        0.1,
    },
                    coords={
                        'y': [1, 2, 3, 4],
                        'x': [1, 2, 3, 4],
                        'y_ir_wv': [1, 2],
                        'x_ir_wv': [1, 2],
                        'y_tie': [1, 2],
                        'x_tie': [1, 2]
                    },
                    attrs={'foo': 'bar'})
    ds['count_ir'].attrs['ancillary_variables'] = 'a_ir b_ir'
    ds['count_wv'].attrs['ancillary_variables'] = 'a_wv b_wv'
    return ds
示例#31
0
 def _log_axis(min_: Union[float, int], max_: Union[float, int],
               points: int) -> da.core.Array:
     """Generates logarithmically spaced array."""
     min_ = np.log10(min_)
     max_ = np.log10(max_)
     return 10.0**da.linspace(min_, max_, points)
示例#32
0
 def setUp(self):
     """Create fake data for the tests."""
     data = da.linspace(0, 1, 16).reshape((4, 4))
     self.da = xr.DataArray(data, dims=('y', 'x'), attrs={'test': 'test'})