def test_e2c(PM_da_ds1d): """Test ensemble_mean-to-control (which can be any other one member) (e2c) comparison basic functionality. Clean comparison: Remove one control member from ensemble to use as reference. Take the remaining member mean as forecasts.""" ds = PM_da_ds1d aforecast, areference = _e2c(ds) control_member = [0] supervector_dim = 'svd' reference = ds.isel(member=control_member).squeeze() if 'member' in reference.coords: del reference['member'] reference = reference.rename({'init': supervector_dim}) # drop the member being reference ds = _drop_members(ds, rmd_member=[ds.member.values[control_member]]) forecast = ds.mean('member') forecast = forecast.rename({'init': supervector_dim}) eforecast, ereference = forecast, reference # very weak testing on shape assert eforecast.size == aforecast.size assert ereference.size == areference.size assert_equal(eforecast, aforecast) assert_equal(ereference, areference)
def test_pickle_rasterio(self): # regression test for https://github.com/pydata/xarray/issues/2121 with create_tmp_geotiff() as (tmp_file, expected): with xr.open_rasterio(tmp_file) as rioda: temp = pickle.dumps(rioda) with pickle.loads(temp) as actual: assert_equal(actual, rioda)
def test_open_grid(self, create_example_grid_file): example_grid = create_example_grid_file with pytest.warns(UserWarning): result = open_boutdataset(datapath=example_grid) result = result.drop_vars(["x", "y"]) assert_equal(result, open_dataset(example_grid)) result.close()
def test_time_units_with_timezone_roundtrip(calendar): # Regression test for GH 2649 expected_units = "days since 2000-01-01T00:00:00-05:00" expected_num_dates = np.array([1, 2, 3]) dates = decode_cf_datetime(expected_num_dates, expected_units, calendar) # Check that dates were decoded to UTC; here the hours should all # equal 5. result_hours = DataArray(dates).dt.hour expected_hours = DataArray([5, 5, 5]) assert_equal(result_hours, expected_hours) # Check that the encoded values are accurately roundtripped. result_num_dates, result_units, result_calendar = encode_cf_datetime( dates, expected_units, calendar ) if calendar in _STANDARD_CALENDARS: np.testing.assert_array_equal(result_num_dates, expected_num_dates) else: # cftime datetime arithmetic is not quite exact. np.testing.assert_allclose(result_num_dates, expected_num_dates) assert result_units == expected_units assert result_calendar == calendar
def test_tickers_dict_to_data_array(self): ticker_1 = BloombergTicker("Example 1") ticker_2 = BloombergTicker("Example 2") fields = [PriceField.Open, PriceField.Close] index = self.index[:3] data = [[[4., 1.], [nan, 5.]], [[5., 2.], [nan, 7.]], [[6., 3.], [nan, 8.]]] prices_df_1 = QFDataFrame(data={ PriceField.Close: [1., 2., 3.], PriceField.Open: [4., 5., 6.] }, index=index) prices_df_2 = QFDataFrame(data={PriceField.Close: [5., 7., 8.]}, index=index) data_array = tickers_dict_to_data_array( { ticker_1: prices_df_1, ticker_2: prices_df_2 }, [ticker_1, ticker_2], fields) self.assertEqual(dtype("float64"), data_array.dtype) expected_data_array = QFDataArray.create(index, [ticker_1, ticker_2], fields, data) assert_equal(data_array, expected_data_array)
def test_cross_phase_1d(self, dask): N = 32 x = np.linspace(0, 1, num=N, endpoint=False) f = 6 phase_offset = np.pi / 2 signal1 = np.cos(2 * np.pi * f * x) # frequency = 1/(2*pi) signal2 = np.cos(2 * np.pi * f * x - phase_offset) da1 = xr.DataArray(data=signal1, name="a", dims=["x"], coords={"x": x}) da2 = xr.DataArray(data=signal2, name="b", dims=["x"], coords={"x": x}) if dask: da1 = da1.chunk({"x": 32}) da2 = da2.chunk({"x": 32}) cp = xrft.cross_phase(da1, da2, dim=["x"]) actual_phase_offset = cp.sel(freq_x=f).values npt.assert_almost_equal(actual_phase_offset, phase_offset) assert cp.name == "a_b_phase" xrt.assert_equal(xrft.cross_phase(da1, da2), cp) with pytest.raises(ValueError): xrft.cross_phase(da1, da2.isel(x=0).drop("x")) with pytest.raises(ValueError): xrft.cross_phase(da1, da2.rename({"x": "y"}))
def test_boundary_constant(self): def interp(a): return 0.5 * (a[..., :-1] + a[..., 1:]) @as_grid_ufunc( signature="(X:center)->(X:left)", boundary_width={"X": (1, 0)}, boundary="fill", fill_value=0, ) def interp_center_to_left(a): return interp(a) grid = create_1d_test_grid("lat") arr = np.arange(9) da = grid._ds.lat_c.copy(data=arr) # test that bound kwargs are used result = interp_center_to_left(grid, da, axis=[["lat"]]) interped_arr_padded_with_zero = interp(np.concatenate([[0], arr])) expected = grid._ds.lat_g.copy(data=interped_arr_padded_with_zero) assert_equal(result, expected) # test that bound kwargs can be overridden at call time result = interp_center_to_left( grid, da, axis=[["lat"]], boundary="fill", fill_value=1 ) interped_arr_padded_with_one = interp(np.concatenate([[1], arr])) expected = grid._ds.lat_g.copy(data=interped_arr_padded_with_one) assert_equal(result, expected)
def test_squashed_file(self, tmp_path_factory, bout_xyt_example_files): path = bout_xyt_example_files(tmp_path_factory, nxpe=4, nype=3, nt=1, squashed=True, write_to_disk=True) with pytest.warns(UserWarning): actual = open_boutdataset(datapath=path, keep_xboundaries=False) expected = create_bout_ds(lengths=(6, 8, 12, 7)) expected = expected.set_coords(["t_array", "dx", "dy", "dz"]).rename(t_array="t") xrt.assert_equal( actual.drop_vars(["x", "y", "z"]).load(), expected.drop_vars( METADATA_VARS + _BOUT_PER_PROC_VARIABLES + _BOUT_TIME_DEPENDENT_META_VARS, errors="ignore", ), ) # check creation without writing to disk gives identical result fake_ds_list = bout_xyt_example_files(None, nxpe=4, nype=3, nt=1, squashed=True) with pytest.warns(UserWarning): fake = open_boutdataset(datapath=fake_ds_list, keep_xboundaries=False) xrt.assert_identical(actual, fake)
def test_xy_reversed_coords(ref): cutout = Cutout(path="xy_r", module="era5", time=TIME, x=slice(X1, X0), y=slice(Y1, Y0)) assert_equal(cutout.coords.to_dataset(), ref.coords.to_dataset())
def test_xy_coords(ref): cutout = Cutout(path="xy", module="era5", time=TIME, x=slice(X0, X1), y=slice(Y0, Y1)) assert_equal(cutout.coords.to_dataset(), ref.coords.to_dataset())
def test_compound_t(func, meth, dtype, use_dask): x = INPUT.astype(dtype) c = T_COMPOUND_MATRIX expect = xarray.concat( [ getattr(x.isel(t=[0]), meth)("t"), getattr(x.isel(t=[0, 2]), meth)("t"), getattr(x.isel(t=[1, 0]), meth)("t"), getattr(x.isel(t=[3, 0, 2]), meth)("t"), ], dim="t2", ).T.astype(dtype) expect.coords["t2"] = c.coords["t2"] if use_dask: x = x.chunk({"s": 2}) expect = expect.chunk({"s": 2}) c = c.chunk() actual = func(x, c, "t", "c") if use_dask: assert_equal(expect.compute(), actual.compute()) else: assert_equal(expect, actual) assert expect.dtype == actual.dtype assert actual.chunks == expect.chunks
def test_m2e(PM_da_ds1d): """Test many-to-ensemble-mean (m2e) comparison basic functionality. Clean comparison: Remove one member from ensemble to use as reference. Take the remaining members as forecasts.""" ds = PM_da_ds1d aforecast, areference = _m2e(ds) supervector_dim = 'svd' reference_list = [] forecast_list = [] for m in ds.member.values: forecast = _drop_members(ds, rmd_member=[m]).mean('member') reference = ds.sel(member=m).squeeze() forecast, reference = xr.broadcast(forecast, reference) forecast_list.append(forecast) reference_list.append(reference) reference = xr.concat(reference_list, 'init').rename({'init': supervector_dim}) forecast = xr.concat(forecast_list, 'init').rename({'init': supervector_dim}) eforecast, ereference = forecast, reference # very weak testing on shape assert eforecast.size == aforecast.size assert ereference.size == areference.size assert_equal(eforecast, aforecast) assert_equal(ereference, areference)
def test_combine_along_xy(self, tmpdir_factory, bout_xyt_example_files): path = bout_xyt_example_files(tmpdir_factory, nxpe=4, nype=3, nt=1, syn_data_type='stepped') actual = open_boutdataset(datapath=path, keep_xboundaries=False) bout_ds = create_bout_ds line1 = concat( [bout_ds(0), bout_ds(1), bout_ds(2), bout_ds(3)], dim='x', data_vars='minimal') line2 = concat( [bout_ds(4), bout_ds(5), bout_ds(6), bout_ds(7)], dim='x', data_vars='minimal') line3 = concat( [bout_ds(8), bout_ds(9), bout_ds(10), bout_ds(11)], dim='x', data_vars='minimal') expected = concat([line1, line2, line3], dim='y', data_vars='minimal') xrt.assert_equal( actual.load(), expected.drop(METADATA_VARS + _BOUT_PER_PROC_VARIABLES, errors='ignore'))
def test_raw_to_radiance_correctness(rad_expected, rad_computed): # Demosaicing is actually not interpolation on edges currently expected = rad_expected[c.radiance_data].isel( x=slice(1, -2), y=slice(1, -2)).transpose(*c.radiance_dims).compute() actual = rad_computed[c.radiance_data].isel(x=slice(1, -2), y=slice( 1, -2)).transpose(*c.radiance_dims).compute() xrt.assert_equal(expected, actual)
def test_apply_along_one_axis(self): grid = create_2d_test_grid("lon", "lat") def diff_center_to_left(a): return a - np.roll(a, shift=-1, axis=-1) da = grid._ds.lat_c**2 + grid._ds.lon_c**2 diffed = (da - da.roll(lon_c=-1, roll_coords=False)).data expected = xr.DataArray( diffed, dims=["lat_c", "lon_g"], coords={"lat_c": grid._ds.lat_c, "lon_g": grid._ds.lon_g}, ) # Test direct application result = apply_as_grid_ufunc( diff_center_to_left, da, axis=[("lon",)], grid=grid, signature="(X:center)->(X:left)", ) assert_equal(result, expected) # Test decorator @as_grid_ufunc() def diff_center_to_left( a: Annotated[np.ndarray, "X:center"] ) -> Annotated[np.ndarray, "X:left"]: return a - np.roll(a, shift=-1, axis=-1) result = diff_center_to_left(grid, da, axis=[("lon",)]) assert_equal(result, expected)
def test_cftime_datetime_mean_long_time_period(): import cftime times = np.array( [ [ cftime.DatetimeNoLeap(400, 12, 31, 0, 0, 0, 0), cftime.DatetimeNoLeap(520, 12, 31, 0, 0, 0, 0), ], [ cftime.DatetimeNoLeap(520, 12, 31, 0, 0, 0, 0), cftime.DatetimeNoLeap(640, 12, 31, 0, 0, 0, 0), ], [ cftime.DatetimeNoLeap(640, 12, 31, 0, 0, 0, 0), cftime.DatetimeNoLeap(760, 12, 31, 0, 0, 0, 0), ], ] ) da = DataArray(times, dims=["time", "d2"]) result = da.mean("d2") expected = DataArray( [ cftime.DatetimeNoLeap(460, 12, 31, 0, 0, 0, 0), cftime.DatetimeNoLeap(580, 12, 31, 0, 0, 0, 0), cftime.DatetimeNoLeap(700, 12, 31, 0, 0, 0, 0), ], dims=["time"], ) assert_equal(result, expected)
def test_detrend_1D( array_dims, array_shape, detrend_dim, chunks, detrend_type, trend_amplitude, linear_error, ): da_original = noise(array_dims, array_shape) da_trend = da_original + trend_amplitude * da_original[detrend_dim] if chunks: da_trend = da_trend.chunk(chunks) # bail out if we are expecting an error if detrend_type == "linear" and linear_error: with pytest.raises(linear_error): detrend(da_trend, detrend_dim, detrend_type=detrend_type) return detrended = detrend(da_trend, detrend_dim, detrend_type=detrend_type) assert detrended.chunks == da_trend.chunks if detrend_type is None: xrt.assert_equal(detrended, da_trend) elif detrend_type == "constant": xrt.assert_allclose(detrended, da_trend - da_trend.mean(dim=detrend_dim)) elif detrend_type == "linear": xrt.assert_allclose(detrended, da_original)
def test_concat_experiments(logger: LiveLogger, num_exps: int, data: st.DataObject): metrics = list(logger.train_metrics) assume(len(metrics) > 0) logger.set_train_batch( {k: data.draw(st.floats(-1e6, 1e6)) for k in metrics}, batch_size=1) batch_xarrays = [logger.to_xarray("train")[0]] for n in range(num_exps - 1): logger.set_train_batch( {k: data.draw(st.floats(-1e6, 1e6)) for k in metrics}, batch_size=1) batch_xarrays.append(logger.to_xarray("train")[0]) out = concat_experiments(*batch_xarrays) assert list(out.coords["experiment"]) == list(range(num_exps)) assert list(out.data_vars) == list(metrics) for n in range(num_exps): for metric in metrics: assert_equal( batch_xarrays[n].to_array(metric), out.isel(experiment=n).drop_vars( names=["experiment"]).to_array(metric).dropna( dim="iterations"), )
def test_cross_phase_1d(self, dask): N = 32 x = np.linspace(0, 1, num=N, endpoint=False) f = 6 phase_offset = np.pi/2 signal1 = np.cos(2*np.pi*f*x) # frequency = 1/(2*pi) signal2 = np.cos(2*np.pi*f*x - phase_offset) da1 = xr.DataArray(data=signal1, name='a', dims=['x'], coords={'x': x}) da2 = xr.DataArray(data=signal2, name='b', dims=['x'], coords={'x': x}) if dask: da1 = da1.chunk({'x': 32}) da2 = da2.chunk({'x': 32}) cp = xrft.cross_phase(da1, da2, dim=['x']) actual_phase_offset = cp.sel(freq_x=f).values npt.assert_almost_equal(actual_phase_offset, phase_offset) assert cp.name == 'a_b_phase' xrt.assert_equal(xrft.cross_phase(da1, da2), cp) with pytest.raises(ValueError): xrft.cross_phase(da1, da2.isel(x=0).drop('x')) with pytest.raises(ValueError): xrft.cross_phase(da1, da2.rename({'x':'y'}))
def test_data_arrays_concat_on_tickers(self): ticker_1 = BloombergTicker("Example 1") ticker_2 = BloombergTicker("Example 2") fields = [PriceField.Open, PriceField.Close] index = date_range(start=str_to_date("2017-01-01"), periods=5, freq="D") index_1 = index[:3] data_1 = [[[4., 1.]], [[5., 2.]], [[6., 3.]]] data_array_1 = QFDataArray.create(index_1, [ticker_1], fields, data_1) self.assertEqual(np.dtype("float64"), data_array_1.dtype) index_2 = index[3:] data_2 = [[[np.nan, 10.]], [[np.nan, 14.]]] data_array_2 = QFDataArray.create(index_2, [ticker_2], fields, data_2) self.assertEqual(np.dtype("float64"), data_array_2.dtype) data = [[[4., 1.], [np.nan, np.nan]], [[5., 2.], [np.nan, np.nan]], [[6., 3.], [np.nan, np.nan]], [[np.nan, np.nan], [np.nan, 10.]], [[np.nan, np.nan], [np.nan, 14.]]] expected_data_array = QFDataArray.create(index, [ticker_1, ticker_2], fields, data) self.assertEqual(np.dtype("float64"), expected_data_array.dtype) concatenated_data_array = QFDataArray.concat( [data_array_1, data_array_2], dim=TICKERS) self.assertEqual(np.dtype("float64"), concatenated_data_array.dtype) assert_equal(expected_data_array, concatenated_data_array)
def test_multiple_inputs(self): @as_grid_ufunc( boundary_width=None, map_overlap=True, dask="allowed", ) def multiply_left_right( a: Annotated[np.ndarray, "X:left"], b: Annotated[np.ndarray, "X:right"] ) -> Annotated[np.ndarray, "X:center"]: """Mocking up a function which can only act on in-memory arrays, and requires no padding""" if isinstance(a, np.ndarray) and isinstance(b, np.ndarray): return np.multiply(a, b) else: raise TypeError grid = create_1d_test_grid("depth") a = np.sin(grid._ds.depth_g * 2 * np.pi / 9).chunk(2) a.coords["depth_g"] = grid._ds.depth_g b = np.cos(grid._ds.depth_r * 2 * np.pi / 9).chunk(2) b.coords["depth_r"] = grid._ds.depth_r depth_c_coord = xr.DataArray(np.arange(1, 10), dims="depth_c") expected = xr.DataArray( np.multiply(a.data, b.data), dims=["depth_c"], coords={"depth_c": depth_c_coord}, ) result = multiply_left_right(grid, a, b, axis=[("depth",), ("depth",)]) assert_equal(result, expected)
def test_combine_along_y(self, tmp_path_factory, bout_xyt_example_files): path = bout_xyt_example_files( tmp_path_factory, nxpe=1, nype=3, nt=1, syn_data_type="stepped", write_to_disk=True, ) with pytest.warns(UserWarning): actual = open_boutdataset(datapath=path, keep_xboundaries=False) bout_ds = create_bout_ds expected = concat( [bout_ds(0), bout_ds(1), bout_ds(2)], dim="y", data_vars="minimal") expected = expected.set_coords(["t_array", "dx", "dy", "dz"]).rename(t_array="t") xrt.assert_equal( actual.drop_vars(["x", "y", "z"]).load(), expected.drop_vars(METADATA_VARS + _BOUT_PER_PROC_VARIABLES, errors="ignore"), ) # check creation without writing to disk gives identical result fake_ds_list = bout_xyt_example_files(None, nxpe=1, nype=3, nt=1, syn_data_type="stepped") with pytest.warns(UserWarning): fake = open_boutdataset(datapath=fake_ds_list, keep_xboundaries=False) xrt.assert_identical(actual, fake)
def test_zero_width_boundary(self): def increment(x): """Mocking up a function which can only act on in-memory arrays, and requires no padding""" if isinstance(x, np.ndarray): return np.add(x, 1) else: raise TypeError grid = create_1d_test_grid("depth") a = np.sin(grid._ds.depth_g * 2 * np.pi / 9).chunk(2) a.coords["depth_g"] = grid._ds.depth_g expected = a + 1 result = apply_as_grid_ufunc( increment, a, axis=[("depth",)], grid=grid, signature="(X:left)->(X:left)", boundary_width=None, dask="allowed", map_overlap=True, ).compute() assert_equal(result, expected) # in this case the result should be the same as using just map_blocks expected_data = dask.array.map_blocks(increment, a.data) np.testing.assert_equal(result.data, expected_data)
def test_cumsum(self): def cumsum_center_to_left(a): return np.cumsum(a, axis=-1)[..., :-1] grid = create_1d_test_grid("depth") da = grid._ds.depth_c**2 da.coords["depth_c"] = grid._ds.depth_c cum = da.cumsum(dim="depth_c").roll(depth_c=1, roll_coords=False) cum[0] = 0 expected = xr.DataArray( cum.data, dims=["depth_g"], coords={"depth_g": grid._ds.depth_g} ) result = apply_as_grid_ufunc( cumsum_center_to_left, da, axis=[("depth",)], grid=grid, signature="(X:center)->(X:left)", boundary_width={"X": (1, 0)}, boundary="fill", fill_value=0, pad_before_func=False, ) assert_equal(result, expected)
def test_1d_padded_but_no_change_in_grid_position(self): def diff_center_to_center_second_order(a): b = a[..., 2:] c = a[..., :-2] return 0.5 * (b - c) grid = create_1d_test_grid("depth") da = grid._ds.depth_c**2 da.coords["depth_c"] = grid._ds.depth_c diffed = 0.5 * (da - da.roll(depth_c=2, roll_coords=False)).data expected = xr.DataArray( diffed, dims=["depth_c"], coords={"depth_c": grid._ds.depth_c} ) def pad_args(func, pad_width): def padding_version_of_func(*args): padded_args = [ np.pad(a, pad_width=pad_width, mode="wrap") for a in args ] return func(*padded_args) return padding_version_of_func # Test direct application result = apply_as_grid_ufunc( pad_args(diff_center_to_center_second_order, pad_width=[(2, 0)]), da, axis=[("depth",)], grid=grid, signature="(X:center)->(X:center)", boundary_width=None, ) assert_equal(result, expected)
def test_m2e(PM_da_initialized_1d): """Test many-to-ensemble-mean (m2e) comparison basic functionality. Clean comparison: Remove one member from ensemble to use as reference. Take the remaining members as forecasts.""" ds = PM_da_initialized_1d aforecast, areference = __m2e.function(ds, metric=metric) reference_list = [] forecast_list = [] for m in ds.member.values: forecast = ds.drop_sel(member=m).mean("member") reference = ds.sel(member=m, drop=True) forecast, reference = xr.broadcast(forecast, reference) forecast_list.append(forecast) reference_list.append(reference) reference = xr.concat(reference_list, "member") forecast = xr.concat(forecast_list, "member") forecast["member"] = np.arange(forecast.member.size) reference["member"] = np.arange(reference.member.size) eforecast, ereference = forecast, reference # very weak testing on shape assert eforecast.size == aforecast.size assert ereference.size == areference.size assert_equal(eforecast, aforecast) assert_equal(ereference, areference)
def test_compound_s(func, meth, dtype, use_dask): x = INPUT.astype(dtype) c = S_COMPOUND_MATRIX expect = xarray.concat( [ getattr(x.sel(s=["s3", "s2"]), meth)("s"), getattr(x.sel(s=["s1"]), meth)("s"), ], dim="s2", ).T.astype(dtype) expect.coords["s2"] = c.coords["s2"] if use_dask: x = x.chunk({"t": 2}) expect = expect.chunk({"t": 2}) c = c.chunk() actual = func(x, c, "s", "c") if use_dask: assert_equal(expect.compute(), actual.compute()) else: assert_equal(expect, actual) assert expect.dtype == actual.dtype assert actual.chunks == expect.chunks
def test_generate_grid_ds(): # simple case...just the dims axis_dims = {'X': 'lon', 'Y': 'lat', 'Z': 'z'} axis_coords = {'X': 'llon', 'Y': 'llat', 'Z': 'zz'} ds_old = ds_original.copy() ds_new = generate_grid_ds(ds_old, axis_dims, boundary_discontinuity={ 'lon': 360, 'lat': 180 }, pad={'z': 'auto'}) assert_equal(ds_new, ds_out_left.drop(['llon_left', 'llat_left', 'zz_left'])) # TODO why are they not identical ? assert identical fails ds_new = generate_grid_ds(ds_original, axis_dims, axis_coords, boundary_discontinuity={ 'lon': 360, 'lat': 180, 'llon': 360, 'llat': 180 }, pad={ 'z': 'auto', 'zz': 'auto' }) assert_equal(ds_new, ds_out_left)
def test_availability_matrix_rastered(ref, raster): """ Availability matrix with a non-zero raster must have less available area than the Indicator matrix. """ shapes = gpd.GeoSeries([ box(X0 + 1, Y0 + 1, X1 - 1, Y0 / 2 + Y1 / 2), box(X0 + 1, Y0 / 2 + Y1 / 2, X1 - 1, Y1 - 1) ], crs=ref.crs).rename_axis('shape') I = np.asarray(ref.indicatormatrix(shapes).todense()) I = I.reshape(shapes.shape + ref.shape) I = xr.DataArray(I, coords=[('shape', shapes.index), ref.coords['y'], ref.coords['x']]) excluder = ExclusionContainer(ref.crs, res=0.01) excluder.add_raster(raster) ds = ref.availabilitymatrix(shapes, excluder) eligible_share = 1 - raster_clip assert isclose(I.sum() * eligible_share, ds.sum(), atol=5) assert_allclose(I.sum(['x', 'y']) * eligible_share, ds.sum(['x', 'y']), atol=5) # check parallel mode excluder = ExclusionContainer(ref.crs, res=0.01) excluder.add_raster(raster) assert_equal(ds, ref.availabilitymatrix(shapes, excluder, 2))
def test_isel(self, tmpdir_factory, bout_xyt_example_files): path = bout_xyt_example_files(tmpdir_factory, nxpe=1, nype=1, nt=1) bd = open_boutdataset(datapath=path, inputfilepath=None, keep_xboundaries=False) actual = bd.isel(x=slice(None,None,2)) expected = bd.bout.data.isel(x=slice(None,None,2)) xrt.assert_equal(actual, expected)
def test_start_experiment(get_many_defaults): dirs, dsim, expected = get_many_defaults ctrl = ControlField('ctrl', expected['mymodel'].sortby(['Z', 'YC'])) obs = ControlField('obs', expected['obs_mask'].sortby(['Zobs', 'Yobs'])) F = interp_operator_2d([ expected['mymodel']['Z'].sortby('Z'), expected['mymodel']['YC'].sortby('YC') ], [ expected['obs_mask']['Zobs'].sortby('Zobs'), expected['obs_mask']['Yobs'].sortby('Yobs') ], pack_index_in=ctrl.wet_ind, pack_index_out=obs.wet_ind) testoid = OIDriver('test_start') assert testoid.experiment == 'test_start' testoid.start(dirs, dsim, mymodel=expected['mymodel'], obs_mask=expected['obs_mask'], obs_std=expected['obs_std'], startat=None) assert testoid.dirs == dirs assert testoid.dsim == dsim assert_equal(testoid.mymodel, expected['mymodel']) assert_equal(testoid.obs_std, expected['obs_std']) assert_equal(testoid.ctrl.mask, ctrl.mask) assert_equal(testoid.obs.mask, expected['obs_mask']) assert_equal(testoid.obs.mask, obs.mask) assert np.all(F == testoid.F)
def test_cftime_datetime_mean(): times = cftime_range('2000', periods=4) da = DataArray(times, dims=['time']) assert da.isel(time=0).mean() == da.isel(time=0) expected = DataArray(times.date_type(2000, 1, 2, 12)) result = da.mean() assert_equal(result, expected) da_2d = DataArray(times.values.reshape(2, 2)) result = da_2d.mean() assert_equal(result, expected)
def test_ButterwothFilter(self): from xarray.testing import assert_equal b_filter = ButterworthFilter(time_series=self.base_eegs, freq_range=[58., 62.], filt_type='stop', order=4) base_eegs_filtered_1 = b_filter.filter() base_eegs_filtered_2 = self.base_eegs.filtered(freq_range=[58., 62.], filt_type='stop', order=4) assert_equal(base_eegs_filtered_1, base_eegs_filtered_2) with self.assertRaises(AssertionError): assert_equal(base_eegs_filtered_1, self.base_eegs)
def test_generate_grid_ds(): # simple case...just the dims axis_dims = {'X': 'lon', 'Y': 'lat', 'Z': 'z'} axis_coords = {'X': 'llon', 'Y': 'llat', 'Z': 'zz'} ds_old = ds_original.copy() ds_new = generate_grid_ds(ds_old, axis_dims, boundary_discontinuity={'lon': 360, 'lat': 180}, pad={'z': 'auto'}) assert_equal(ds_new, ds_out_left.drop(['llon_left', 'llat_left', 'zz_left'])) # TODO why are they not identical ? assert identical fails ds_new = generate_grid_ds(ds_original, axis_dims, axis_coords, boundary_discontinuity={'lon': 360, 'lat': 180, 'llon': 360, 'llat': 180}, pad={'z': 'auto', 'zz': 'auto'}) assert_equal(ds_new, ds_out_left)
def test_coarsen_staggered(): x = xr.DataArray(np.arange(10), dims=['x']) y = coarsen_staggered(x, {'x': 5}, stagger_dim='x') np.testing.assert_equal(y.values, [2.5, 6.5]) x, y = np.ogrid[:40, :40] data = x + y * 0 x, y = [arr.ravel() for arr in [x, y]] xarr = xr.DataArray(data, coords=(('x', x), ('y', y))) blocks = {'x': 10, 'y': 10} ds = destagger(xarr, 'x', mode='wrap') c1 = coarsen_centered(ds, blocks) c2 = coarsen_staggered(xarr, blocks, stagger_dim='x', mode='wrap') assert_equal(c1.transpose(*c2.dims), c2) x = xr.DataArray(np.arange(10), dims=['x']) x = x.assign_coords(x=x) y = coarsen(x, {'x': 5}) np.testing.assert_equal(y.coords['x'].values, [2.5, 7.5])
def test_time_units_with_timezone_roundtrip(calendar): # Regression test for GH 2649 expected_units = 'days since 2000-01-01T00:00:00-05:00' expected_num_dates = np.array([1, 2, 3]) dates = decode_cf_datetime(expected_num_dates, expected_units, calendar) # Check that dates were decoded to UTC; here the hours should all # equal 5. result_hours = DataArray(dates).dt.hour expected_hours = DataArray([5, 5, 5]) assert_equal(result_hours, expected_hours) # Check that the encoded values are accurately roundtripped. result_num_dates, result_units, result_calendar = encode_cf_datetime( dates, expected_units, calendar) if calendar in _STANDARD_CALENDARS: np.testing.assert_array_equal(result_num_dates, expected_num_dates) else: # cftime datetime arithmetic is not quite exact. np.testing.assert_allclose(result_num_dates, expected_num_dates) assert result_units == expected_units assert result_calendar == calendar
def assertCoordinatesEqual(self, d1, d2): __tracebackhide__ = True # noqa: F841 assert_equal(d1, d2)
def assertVariableEqual(self, v1, v2): __tracebackhide__ = True # noqa: F841 assert_equal(v1, v2)
def assertDatasetEqual(self, d1, d2): __tracebackhide__ = True # noqa: F841 assert_equal(d1, d2)
def assertDataArrayEqual(self, ar1, ar2): __tracebackhide__ = True # noqa: F841 assert_equal(ar1, ar2)
def test_isnull_with_dask(): da = construct_dataarray(2, np.float32, contains_nan=True, dask=True) assert isinstance(da.isnull().data, dask_array_type) assert_equal(da.isnull().load(), da.load().isnull())
def assertDataArrayEqual(self, ar1, ar2): assert_equal(ar1, ar2)
def assertCoordinatesEqual(self, d1, d2): assert_equal(d1, d2)
def assertDatasetEqual(self, d1, d2): assert_equal(d1, d2)
def assertVariableEqual(self, v1, v2): assert_equal(v1, v2)