def test_density(self): """Test density""" outfile = 'test_xarray.nc' analysis_file = 'test_xarray_analysis.nc' o = OceanDrift(loglevel=20) o.set_config('environment:fallback:land_binary_mask', 0) t1 = datetime.now() t2 = t1 + timedelta(hours=6) o.seed_elements(time=t1, lon=4, lat=60, number=100, origin_marker=0) o.seed_elements(time=[t1, t2], lon=4.2, lat=60.2, number=100, origin_marker=1) o.seed_elements(time=[t1, t2], lon=4.1, lat=60.1, number=100, origin_marker=2) reader_x = reader_oscillating.Reader('x_sea_water_velocity', amplitude=1, zero_time=t1) reader_y = reader_oscillating.Reader('y_sea_water_velocity', amplitude=1, zero_time=t2) o.add_reader([reader_x, reader_y]) o.set_config('drift:horizontal_diffusivity', 10) o.run(duration=timedelta(hours=12), time_step=1800, outfile=outfile) #o.plot(fast=True) density_pixelsize_m = 5000 H, Hsub, Hsurf, lon_array, lat_array = o.get_density_array( pixelsize_m=density_pixelsize_m) ox = opendrift.open_xarray(outfile, analysis_file=analysis_file) Hx, lon_arrayx, lat_arrayx = ox.get_density_xarray( pixelsize_m=density_pixelsize_m) Hx = Hx[0] # Presently only for origin_marker = 0 self.assertAlmostEqual(lon_array[0], 3.94, 1) self.assertAlmostEqual(lon_array[-1], 4.76, 1) self.assertAlmostEqual(lon_arrayx[0], 3.90, 1) self.assertAlmostEqual(lon_arrayx[-1], 4.67, 1) self.assertEqual(Hx.shape, H.shape) Hsum = H.sum(axis=1).sum(axis=1) Hxsum = Hx.sum(axis=1).sum(axis=1) self.assertEqual(Hsum[0], 118) self.assertEqual(Hxsum[0], 118) self.assertEqual(Hsum[-1], 300) self.assertEqual(Hxsum[-1], 300) os.remove(outfile) os.remove(analysis_file)
reader_y = reader_oscillating.Reader('y_sea_water_velocity', amplitude=1, zero_time=t2) o.add_reader([reader_x, reader_y]) o.set_config('drift:horizontal_diffusivity', 10) o.run(duration=timedelta(hours=24), time_step=900, time_step_output=1800, outfile=outfile) #%% # Opening the output file lazily with Xarray. # This will work even if the file is too large to fit in memory, as it # will read and process data chuck-by-chunk directly from file using Dask. # (See also `example_river_runoff.py <https://opendrift.github.io/gallery/example_river_runoff.html>`_) oa = opendrift.open_xarray(outfile) #%% # Calculating histogram # The histogram may be stored/cached to a netCDF file for later re-use, # as the calculation may be time consuming for huge output files. h = oa.get_histogram(pixelsize_m=500) #%% # Plot the cumulative coverage of first seeding (origin_marker=0) b = h.isel(origin_marker=0).sum(dim='time') oa.plot(background=b.where(b > 0), fast=True, show_elements=False, vmin=0, vmax=1000,
reader_y = reader_oscillating.Reader('y_sea_water_velocity', amplitude=1, zero_time=t2) o.add_reader([reader_x, reader_y]) o.set_config('drift:horizontal_diffusivity', 10) o.run(duration=timedelta(hours=24), time_step=900, time_step_output=1800, outfile=outfile) #%% # Opening the output file lazily with Xarray. # This will work even if the file is too large to fit in memory, as it # will read and process data chuck-by-chunk directly from file using Dask. # Note that the analysis file will be re-used if existing. Thus this file should be deleted after making any changes to the simulation above. o = opendrift.open_xarray(outfile, analysis_file='simulation_density.nc') #%% # Making two animations, for each of the two seedings / origin_markere. # The calculated density fields will be stored/cached in the analysis file # for later re-use, as their calculation may be time consuming # for huge output files. # Note that other analysis/plotting methods are not yet adapted # to datasets opened lazily with open_xarray for om in [0, 1]: o.animation(density=True, density_pixelsize_m=500, fast=False, corners=[4.0, 6, 59.5, 61], origin_marker=om, show_elements=False,
amplitude=.5, zero_time=t2) o.add_reader([reader_x, reader_y]) o.set_config('drift:horizontal_diffusivity', 300) o.set_config('general:coastline_action', 'previous') o.run(duration=timedelta(hours=48), time_step=1800, time_step_output=3600, outfile=outfile) #%% # Opening the output file lazily with Xarray. # This will work even if the file is too large to fit in memory, as it # will read and process data chuck-by-chunk directly from file using Dask. # Note that the analysis file will be re-used if existing. Thus this file should be deleted after making any changes to the simulation above. o = opendrift.open_xarray(outfile, analysis_file=analysis_file) #%% # Animation of the spatial density of river runoff water. # Although there are the same number of elements from each river, the density plots are # weighted with the actual runoff at time of seeding. This weighting can be done/changed # afterwards without needing to redo the simulation. # The calculated density fields will be stored/cached in the analysis file # for later re-use, as their calculation may be time consuming # for huge output files. # Note that other analysis/plotting methods are not yet adapted # to datasets opened lazily with open_xarray runoff = np.abs(np.cos(np.arange(number * 2) * 2 * np.pi / (number))) # Impose a temporal variation of runoff runoff[0:number] *= .1 # Let River 1 have 10% of the runoff of River 2
#%% # Seed elements at 5 different locations/longitudes lons = [4, 4.2, 4.3, 4.32, 4.6] t = datetime.now() o = OceanDrift(loglevel=20) for i, lon in enumerate(lons): o.seed_elements(lon=lon, lat=60, radius=3000, number=2000, time=t, origin_marker_name='Lon %f' % lon) o.set_config('environment:constant:y_sea_water_velocity', .1) o.run(steps=15, outfile=of) #%% # Calculate spatial density of elements at 1500m grid spacing oa = opendrift.open_xarray(of) oa.ds = oa.ds.where(oa.ds.status==0) d = oa.get_histogram(pixelsize_m=1500, weights=None) dom = d.argmax(dim='origin_marker', skipna=True) dom = dom.where(d.sum(dim='origin_marker')>0) dom.name = 'Dominating source' #%% # Show which of the 5 sources are dominating within each grid cell oa.animation(background=dom, show_elements=False, bgalpha=1, legend=oa.origin_marker, colorbar=False, vmin=0, vmax=4) #%% # .. image:: /gallery/animations/example_dominating_0.gif