示例#1
0
 def test_binary(self):
     args = [0,
             np.zeros(2),
             xr.Variable(['x'], [0, 0]),
             xr.DataArray([0, 0], dims='x'),
             xr.Dataset({'y': ('x', [0, 0])})]
     for n, t1 in enumerate(args):
         for t2 in args[n:]:
             self.assertIdentical(t2 + 1, xu.maximum(t1, t2 + 1))
             self.assertIdentical(t2 + 1, xu.maximum(t2, t1 + 1))
             self.assertIdentical(t2 + 1, xu.maximum(t1 + 1, t2))
             self.assertIdentical(t2 + 1, xu.maximum(t2 + 1, t1))
 def test_binary(self):
     args = [0,
             np.zeros(2),
             xr.Variable(['x'], [0, 0]),
             xr.DataArray([0, 0], dims='x'),
             xr.Dataset({'y': ('x', [0, 0])})]
     for n, t1 in enumerate(args):
         for t2 in args[n:]:
             self.assert_identical(t2 + 1, xu.maximum(t1, t2 + 1))
             self.assert_identical(t2 + 1, xu.maximum(t2, t1 + 1))
             self.assert_identical(t2 + 1, xu.maximum(t1 + 1, t2))
             self.assert_identical(t2 + 1, xu.maximum(t2 + 1, t1))
示例#3
0
文件: plot.py 项目: jcmgray/xarray
def _rescale_imshow_rgb(darray, vmin, vmax, robust):
    assert robust or vmin is not None or vmax is not None
    # There's a cyclic dependency via DataArray, so we can't import from
    # xarray.ufuncs in global scope.
    from xarray.ufuncs import maximum, minimum
    # Calculate vmin and vmax automatically for `robust=True`
    if robust:
        if vmax is None:
            vmax = np.nanpercentile(darray, 100 - ROBUST_PERCENTILE)
        if vmin is None:
            vmin = np.nanpercentile(darray, ROBUST_PERCENTILE)
    # If not robust and one bound is None, calculate the default other bound
    # and check that an interval between them exists.
    elif vmax is None:
        vmax = 255 if np.issubdtype(darray.dtype, np.integer) else 1
        if vmax < vmin:
            raise ValueError(
                'vmin=%r is less than the default vmax (%r) - you must supply '
                'a vmax > vmin in this case.' % (vmin, vmax))
    elif vmin is None:
        vmin = 0
        if vmin > vmax:
            raise ValueError(
                'vmax=%r is less than the default vmin (0) - you must supply '
                'a vmin < vmax in this case.' % vmax)
    # Scale interval [vmin .. vmax] to [0 .. 1], with darray as 64-bit float
    # to avoid precision loss, integer over/underflow, etc with extreme inputs.
    # After scaling, downcast to 32-bit float.  This substantially reduces
    # memory usage after we hand `darray` off to matplotlib.
    darray = ((darray.astype('f8') - vmin) / (vmax - vmin)).astype('f4')
    return minimum(maximum(darray, 0), 1)
示例#4
0
文件: plot.py 项目: zhishang80/xarray
def _rescale_imshow_rgb(darray, vmin, vmax, robust):
    assert robust or vmin is not None or vmax is not None
    # There's a cyclic dependency via DataArray, so we can't import from
    # xarray.ufuncs in global scope.
    from xarray.ufuncs import maximum, minimum
    # Calculate vmin and vmax automatically for `robust=True`
    if robust:
        if vmax is None:
            vmax = np.nanpercentile(darray, 100 - ROBUST_PERCENTILE)
        if vmin is None:
            vmin = np.nanpercentile(darray, ROBUST_PERCENTILE)
    # If not robust and one bound is None, calculate the default other bound
    # and check that an interval between them exists.
    elif vmax is None:
        vmax = 255 if np.issubdtype(darray.dtype, np.integer) else 1
        if vmax < vmin:
            raise ValueError(
                'vmin=%r is less than the default vmax (%r) - you must supply '
                'a vmax > vmin in this case.' % (vmin, vmax))
    elif vmin is None:
        vmin = 0
        if vmin > vmax:
            raise ValueError(
                'vmax=%r is less than the default vmin (0) - you must supply '
                'a vmin < vmax in this case.' % vmax)
    # Scale interval [vmin .. vmax] to [0 .. 1], with darray as 64-bit float
    # to avoid precision loss, integer over/underflow, etc with extreme inputs.
    # After scaling, downcast to 32-bit float.  This substantially reduces
    # memory usage after we hand `darray` off to matplotlib.
    darray = ((darray.astype('f8') - vmin) / (vmax - vmin)).astype('f4')
    return minimum(maximum(darray, 0), 1)
示例#5
0
文件: sar.py 项目: ziaridoy20/satpy
def overlay(top, bottom):
    """Blending two layers.

    from: https://docs.gimp.org/en/gimp-concepts-layer-modes.html
    """
    maxval = xu.maximum(top.max(), bottom.max())

    res = ((2 * top / maxval - 1) * bottom + 2 * top) * bottom / maxval

    return res
示例#6
0
    def test_groupby(self):
        ds = xr.Dataset({'a': ('x', [0, 0, 0])}, {'c': ('x', [0, 0, 1])})
        ds_grouped = ds.groupby('c')
        group_mean = ds_grouped.mean('x')
        arr_grouped = ds['a'].groupby('c')

        assert_identical(ds, xu.maximum(ds_grouped, group_mean))
        assert_identical(ds, xu.maximum(group_mean, ds_grouped))

        assert_identical(ds, xu.maximum(arr_grouped, group_mean))
        assert_identical(ds, xu.maximum(group_mean, arr_grouped))

        assert_identical(ds, xu.maximum(ds_grouped, group_mean['a']))
        assert_identical(ds, xu.maximum(group_mean['a'], ds_grouped))

        assert_identical(ds.a, xu.maximum(arr_grouped, group_mean.a))
        assert_identical(ds.a, xu.maximum(group_mean.a, arr_grouped))

        with raises_regex(TypeError, 'only support binary ops'):
            xu.maximum(ds.a.variable, ds_grouped)
示例#7
0
    def test_groupby(self):
        ds = xr.Dataset({'a': ('x', [0, 0, 0])}, {'c': ('x', [0, 0, 1])})
        ds_grouped = ds.groupby('c')
        group_mean = ds_grouped.mean('x')
        arr_grouped = ds['a'].groupby('c')

        self.assertIdentical(ds, xu.maximum(ds_grouped, group_mean))
        self.assertIdentical(ds, xu.maximum(group_mean, ds_grouped))

        self.assertIdentical(ds, xu.maximum(arr_grouped, group_mean))
        self.assertIdentical(ds, xu.maximum(group_mean, arr_grouped))

        self.assertIdentical(ds, xu.maximum(ds_grouped, group_mean['a']))
        self.assertIdentical(ds, xu.maximum(group_mean['a'], ds_grouped))

        self.assertIdentical(ds.a, xu.maximum(arr_grouped, group_mean.a))
        self.assertIdentical(ds.a, xu.maximum(group_mean.a, arr_grouped))

        with raises_regex(TypeError, 'only support binary ops'):
            xu.maximum(ds.a.variable, ds_grouped)
示例#8
0
 def test_bivariate_ufunc(self):
     u = self.eager_var
     v = self.lazy_var
     self.assertLazyAndAllClose(np.maximum(u, 0), xu.maximum(v, 0))
     self.assertLazyAndAllClose(np.maximum(u, 0), xu.maximum(0, v))
示例#9
0
 def test_bivariate_ufunc(self):
     u = self.eager_var
     v = self.lazy_var
     self.assertLazyAndAllClose(np.maximum(u, 0), xu.maximum(v, 0))
     self.assertLazyAndAllClose(np.maximum(u, 0), xu.maximum(0, v))
示例#10
0
 def test_bivariate_ufunc(self):
     assert_sparse_equal(np.maximum(self.data, 0),
                         xu.maximum(self.var, 0).data)
     assert_sparse_equal(np.maximum(self.data, 0),
                         xu.maximum(0, self.var).data)
    # interpolating from U,V to T
    U = U4.rolling({'x': 2}).mean().fillna(0.)
    V = V4.rolling({'y': 2}).mean().fillna(0.)

    hpge = np.sqrt(np.power(U, 2) + np.power(V, 2))

    if F == 0:
        ni = hpge.data.shape[3]
        nj = hpge.data.shape[2]
        max_hpge0 = np.zeros(shape=(nj, ni))
        max_hpge1 = np.zeros(shape=(nj, ni))

    maxhpge_0 = hpge.isel(k=slice(None, num_lev[0])).max(dim='k').max(dim='t')
    maxhpge_1 = hpge.isel(k=slice(num_lev[0], num_lev[1])).max(dim='k').max(
        dim='t')
    max_hpge0 = xu.maximum(max_hpge0, maxhpge_0.data)
    max_hpge1 = xu.maximum(max_hpge1, maxhpge_1.data)

# Saving
ds_hpge = xr.Dataset()
ds_hpge["max_hpge_0"] = xr.DataArray(max_hpge0, dims=('y', 'x'))
ds_hpge["max_hpge_1"] = xr.DataArray(max_hpge1, dims=('y', 'x'))

# -------------------------------------------------------------------------------------
# Writing the max_hpge file

print('WRITING the maximum_hpge.nc FILE')

out_file = "maximum_hpge.nc"
delayed_obj = ds_hpge.to_netcdf(join(HPGEdir, out_file), compute=False)
示例#12
0
 def test_bivariate_ufunc(self):
     sparse.utils.assert_eq(np.maximum(self.data, 0),
                            xu.maximum(self.var, 0).data)
     sparse.utils.assert_eq(np.maximum(self.data, 0),
                            xu.maximum(0, self.var).data)