def test_fieldset_from_data_gridtypes(xdim=20, ydim=10, zdim=4): """ Simple test for fieldset initialisation from data. """ lon = np.linspace(0., 10., xdim, dtype=np.float32) lat = np.linspace(0., 10., ydim, dtype=np.float32) depth = np.linspace(0., 1., zdim, dtype=np.float32) depth_s = np.ones((zdim, ydim, xdim)) U = np.ones((zdim, ydim, xdim)) V = np.ones((zdim, ydim, xdim)) dimensions = {'lat': lat, 'lon': lon, 'depth': depth} data = { 'U': np.array(U, dtype=np.float32), 'V': np.array(V, dtype=np.float32) } lonm, latm = np.meshgrid(lon, lat) for k in range(zdim): data['U'][k, :, :] = lonm * (depth[k] + 1) + .1 depth_s[k, :, :] = depth[k] # Rectilinear Z grid fieldset = FieldSet.from_data(data, dimensions, mesh='flat') pset = ParticleSet(fieldset, ScipyParticle, [0, 0], [0, 0], [0, .4]) pset.execute(AdvectionRK4, runtime=1, dt=.5) plon = [p.lon for p in pset] plat = [p.lat for p in pset] # sol of dx/dt = (init_depth+1)*x+0.1; x(0)=0 assert np.allclose(plon, [0.17173462592827032, 0.2177736932123214]) assert np.allclose(plat, [1, 1]) # Rectilinear S grid dimensions['depth'] = depth_s fieldset = FieldSet.from_data(data, dimensions, mesh='flat') pset = ParticleSet(fieldset, ScipyParticle, [0, 0], [0, 0], [0, .4]) pset.execute(AdvectionRK4, runtime=1, dt=.5) assert np.allclose(plon, [p.lon for p in pset]) assert np.allclose(plat, [p.lat for p in pset]) # Curvilinear Z grid dimensions['lon'] = lonm dimensions['lat'] = latm dimensions['depth'] = depth fieldset = FieldSet.from_data(data, dimensions, mesh='flat') pset = ParticleSet(fieldset, ScipyParticle, [0, 0], [0, 0], [0, .4]) pset.execute(AdvectionRK4, runtime=1, dt=.5) assert np.allclose(plon, [p.lon for p in pset]) assert np.allclose(plat, [p.lat for p in pset]) # Curvilinear S grid dimensions['depth'] = depth_s fieldset = FieldSet.from_data(data, dimensions, mesh='flat') pset = ParticleSet(fieldset, ScipyParticle, [0, 0], [0, 0], [0, .4]) pset.execute(AdvectionRK4, runtime=1, dt=.5) assert np.allclose(plon, [p.lon for p in pset]) assert np.allclose(plat, [p.lat for p in pset])
def test_ofam_xarray_vs_netcdf(dt): fieldsetNetcdf = set_ofam_fieldset(use_xarray=False) fieldsetxarray = set_ofam_fieldset(use_xarray=True) lonstart, latstart, runtime = (180, 10, delta(days=7)) psetN = ParticleSet(fieldsetNetcdf, pclass=JITParticle, lon=lonstart, lat=latstart) psetN.execute(AdvectionRK4, runtime=runtime, dt=dt) psetX = ParticleSet(fieldsetxarray, pclass=JITParticle, lon=lonstart, lat=latstart) psetX.execute(AdvectionRK4, runtime=runtime, dt=dt) assert np.allclose(psetN[0].lon, psetX[0].lon) assert np.allclose(psetN[0].lat, psetX[0].lat)
def test_advection_3D_outofbounds(mode, direction, wErrorThroughSurface): xdim = ydim = zdim = 2 dimensions = { 'lon': np.linspace(0., 1, xdim, dtype=np.float32), 'lat': np.linspace(0., 1, ydim, dtype=np.float32), 'depth': np.linspace(0., 1, zdim, dtype=np.float32) } wfac = -1. if direction == 'up' else 1. data = { 'U': 0.01 * np.ones((xdim, ydim, zdim), dtype=np.float32), 'V': np.zeros((xdim, ydim, zdim), dtype=np.float32), 'W': wfac * np.ones((xdim, ydim, zdim), dtype=np.float32) } fieldset = FieldSet.from_data(data, dimensions, mesh='flat') def DeleteParticle(particle, fieldset, time): particle.delete() def SubmergeParticle(particle, fieldset, time): particle.depth = 0 AdvectionRK4( particle, fieldset, time ) # perform a 2D advection because vertical flow will always push up in this case particle.time = time + particle.dt # to not trigger kernels again, otherwise infinite loop particle.set_state(StateCode.Success) recovery_dict = {ErrorCode.ErrorOutOfBounds: DeleteParticle} if wErrorThroughSurface: recovery_dict[ErrorCode.ErrorThroughSurface] = SubmergeParticle pset = ParticleSet(fieldset=fieldset, pclass=ptype[mode], lon=0.5, lat=0.5, depth=0.9) pset.execute(AdvectionRK4_3D, runtime=10., dt=1, recovery=recovery_dict) if direction == 'up' and wErrorThroughSurface: assert np.allclose(pset.lon[0], 0.6) assert np.allclose(pset.depth[0], 0) else: assert len(pset) == 0
def test_brownian_example(mode, npart=3000): fieldset = zeros_fieldset() # Set diffusion constants. kh_zonal = 100 kh_meridional = 100 # Create field of Kh_zonal and Kh_meridional, using same grid as U grid = fieldset.U.grid fieldset.add_field(Field('Kh_zonal', kh_zonal * np.ones((2, 2)), grid=grid)) fieldset.add_field( Field('Kh_meridional', kh_meridional * np.ones((2, 2)), grid=grid)) # Set random seed random.seed(123456) runtime = delta(days=1) random.seed(1234) pset = ParticleSet(fieldset=fieldset, pclass=ptype[mode], lon=np.zeros(npart), lat=np.zeros(npart)) pset.execute(pset.Kernel(BrownianMotion2D), runtime=runtime, dt=delta(hours=1)) expected_std_x = np.sqrt(2 * kh_zonal * runtime.total_seconds()) expected_std_y = np.sqrt(2 * kh_meridional * runtime.total_seconds()) conversion = (1852 * 60) # to convert from degrees to m ys = np.array([p.lat for p in pset]) * conversion xs = np.array( [p.lon for p in pset] ) * conversion # since near equator, we do not need to care about curvature effect tol = 200 # 200m tolerance assert np.allclose(np.std(xs), expected_std_x, atol=tol) assert np.allclose(np.std(ys), expected_std_y, atol=tol) assert np.allclose(np.mean(xs), 0, atol=tol) assert np.allclose(np.mean(ys), 0, atol=tol)
def test_nearest_neighbour_interpolation3D(mode, k_sample_p, npart=81): dims = (2, 2, 2) dimensions = { 'lon': np.linspace(0., 1., dims[0], dtype=np.float32), 'lat': np.linspace(0., 1., dims[1], dtype=np.float32), 'depth': np.linspace(0., 1., dims[2], dtype=np.float32) } data = { 'U': np.zeros(dims, dtype=np.float32), 'V': np.zeros(dims, dtype=np.float32), 'P': np.zeros(dims, dtype=np.float32) } data['P'][0, 1, 1] = 1. fieldset = FieldSet.from_data(data, dimensions, mesh='flat', transpose=True) fieldset.P.interp_method = 'nearest' xv, yv = np.meshgrid(np.linspace(0, 1.0, int(np.sqrt(npart))), np.linspace(0, 1.0, int(np.sqrt(npart)))) # combine a pset at 0m with pset at 1m, as meshgrid does not do 3D pset = ParticleSet(fieldset, pclass=pclass(mode), lon=xv.flatten(), lat=yv.flatten(), depth=np.zeros(npart)) pset2 = ParticleSet(fieldset, pclass=pclass(mode), lon=xv.flatten(), lat=yv.flatten(), depth=np.ones(npart)) pset.add(pset2) pset.execute(k_sample_p, endtime=1, dt=1) assert np.allclose(pset.p[(pset.lon < 0.5) & (pset.lat > 0.5) & (pset.depth > 0.5)], 1.0, rtol=1e-5) assert np.allclose(pset.p[(pset.lon > 0.5) | (pset.lat < 0.5) & (pset.depth < 0.5)], 0.0, rtol=1e-5)
def test_globcurrent_startparticles_between_time_arrays( mode, dt, with_starttime): fieldset = set_globcurrent_fieldset() fnamesFeb = sorted( glob( path.join(path.dirname(__file__), 'GlobCurrent_example_data', '200202*.nc'))) fieldset.add_field( Field.from_netcdf(fnamesFeb, ('P', 'eastward_eulerian_current_velocity'), { 'lat': 'lat', 'lon': 'lon', 'time': 'time' })) class MyParticle(ptype[mode]): sample_var = Variable('sample_var', initial=0.) def SampleP(particle, fieldset, time): particle.sample_var += fieldset.P[time, particle.depth, particle.lat, particle.lon] if with_starttime: time = fieldset.U.grid.time[0] if dt > 0 else fieldset.U.grid.time[-1] pset = ParticleSet(fieldset, pclass=MyParticle, lon=[25], lat=[-35], time=time) else: pset = ParticleSet(fieldset, pclass=MyParticle, lon=[25], lat=[-35]) if with_starttime: with pytest.raises(TimeExtrapolationError): pset.execute(pset.Kernel(AdvectionRK4) + SampleP, runtime=delta(days=1), dt=dt) else: pset.execute(pset.Kernel(AdvectionRK4) + SampleP, runtime=delta(days=1), dt=dt)
def test_zonalflow_spherical(mode, k_sample_p, xdim=100, ydim=200): """ Create uniform EASTWARD flow on spherical earth and advect particles As flow is so simple, it can be directly compared to analytical solution Note that in this case the cosine conversion is needed """ maxvel = 1. p_fld = 10 dimensions = { 'lon': np.linspace(-180, 180, xdim, dtype=np.float32), 'lat': np.linspace(-90, 90, ydim, dtype=np.float32) } data = { 'U': maxvel * np.ones([xdim, ydim]), 'V': np.zeros([xdim, ydim]), 'P': p_fld * np.ones([xdim, ydim]) } fieldset = FieldSet.from_data(data, dimensions, mesh='spherical', transpose=True) lonstart = [0, 45] latstart = [0, 45] runtime = delta(hours=24) pset = ParticleSet(fieldset, pclass=pclass(mode), lon=lonstart, lat=latstart) pset.execute(pset.Kernel(AdvectionRK4) + k_sample_p, runtime=runtime, dt=delta(hours=1)) assert (pset.lat[0] - latstart[0] < 1e-4) assert (pset.lon[0] - (lonstart[0] + runtime.total_seconds() * maxvel / 1852 / 60 / cos(latstart[0] * pi / 180)) < 1e-4) assert (abs(pset.p[0] - p_fld) < 1e-4) assert (pset.lat[1] - latstart[1] < 1e-4) assert (pset.lon[1] - (lonstart[1] + runtime.total_seconds() * maxvel / 1852 / 60 / cos(latstart[1] * pi / 180)) < 1e-4) assert (abs(pset.p[1] - p_fld) < 1e-4)
def test_randomexponential(mode, lambd, npart=1000): fieldset = zeros_fieldset() # Rate parameter for random.expovariate fieldset.lambd = lambd # Set random seed random.seed(1234) pset = ParticleSet(fieldset=fieldset, pclass=ptype[mode], lon=np.zeros(npart), lat=np.zeros(npart), depth=np.zeros(npart)) def vertical_randomexponential(particle, fieldset, time): # Kernel for random exponential variable in depth direction particle.depth = random.expovariate(fieldset.lambd) pset.execute(vertical_randomexponential, runtime=1, dt=1) depth = np.array([particle.depth for particle in pset.particles]) expected_mean = 1./fieldset.lambd assert np.allclose(np.mean(depth), expected_mean, rtol=.1)
def test_reset_dt(fieldset, mode, tmpdir): # Assert that p.dt gets reset when a write_time is not a multiple of dt # for p.dt=0.02 to reach outputdt=0.05 and endtime=0.1, the steps should be [0.2, 0.2, 0.1, 0.2, 0.2, 0.1], resulting in 6 kernel executions filepath = tmpdir.join("pfile_reset_dt.nc") def Update_lon(particle, fieldset, time): particle.lon += 0.1 pset = ParticleSet(fieldset, pclass=ptype[mode], lon=[0], lat=[0], lonlatdepth_dtype=np.float64) ofile = pset.ParticleFile(name=filepath, outputdt=0.05) pset.execute(pset.Kernel(Update_lon), endtime=0.1, dt=0.02, output_file=ofile) assert np.allclose(pset.lon, .6)
def test_advection_periodic_zonal_meridional(mode, xdim=100, ydim=100): fieldset = periodicfields(xdim, ydim, uvel=1., vvel=1.) fieldset.add_periodic_halo(zonal=True, meridional=True) assert (len(fieldset.U.lat) == ydim + 10 ) # default halo size is 5 grid points assert (len(fieldset.U.lon) == xdim + 10 ) # default halo size is 5 grid points assert np.allclose(np.diff(fieldset.U.lat), fieldset.U.lat[1] - fieldset.U.lat[0], rtol=0.001) assert np.allclose(np.diff(fieldset.U.lon), fieldset.U.lon[1] - fieldset.U.lon[0], rtol=0.001) pset = ParticleSet(fieldset, pclass=ptype[mode], lon=[0.4], lat=[0.5]) pset.execute(AdvectionRK4 + pset.Kernel(periodicBC), runtime=delta(hours=20), dt=delta(seconds=30)) assert abs(pset[0].lon - 0.05) < 0.1 assert abs(pset[0].lat - 0.15) < 0.1
def test_variable_written_once(fieldset, mode, tmpdir, npart): filepath = tmpdir.join("pfile_once_written_variables") def Update_v(particle, fieldset, time): particle.v_once += 1. particle.age += particle.dt class MyParticle(ptype[mode]): v_once = Variable('v_once', dtype=np.float32, initial=0., to_write='once') age = Variable('age', dtype=np.float32, initial=0.) lon = np.linspace(0, 1, npart, dtype=np.float32) lat = np.linspace(1, 0, npart, dtype=np.float32) pset = ParticleSet(fieldset, pclass=MyParticle, lon=lon, lat=lat, repeatdt=0.1) pset.execute(pset.Kernel(Update_v), endtime=1, dt=0.1, output_file=pset.ParticleFile(name=filepath, outputdt=0.1)) assert np.allclose([p.v_once - p.age * 10 for p in pset], 0, atol=1e-5) ncfile = Dataset(filepath+".nc", 'r', 'NETCDF4') vfile = ncfile.variables['v_once'][:] assert (vfile.shape == (npart*11, )) assert [v == 0 for v in vfile]
def test_concatenate_interaction_kernels(fieldset, mode): lons = [0.0, 0.1, 0.25, 0.44] lats = [0.0, 0.0, 0.0, 0.0] # Distance in meters R_earth*0.2 degrees interaction_distance = 6371000 * 0.2 * np.pi / 180 pset = ParticleSet(fieldset, pclass=ptype[mode], lon=lons, lat=lats, interaction_distance=interaction_distance) pset.execute(DoNothing, pyfunc_inter=pset.InteractionKernel(DummyMoveNeighbor) + pset.InteractionKernel(DummyMoveNeighbor), endtime=1., dt=1.) # The kernel results are only applied after all interactionkernels # have been executed, so we expect the result to be double the # movement from executing the kernel once. assert np.allclose(pset.lat, [0.2, 0.4, 0.1, 0.0], rtol=1e-5)
def test_globcurrent_pset_fromfile(mode, dt, pid_offset, tmpdir): filename = tmpdir.join("pset_fromparticlefile.nc") fieldset = set_globcurrent_fieldset() ptype[mode].setLastID(pid_offset) pset = ParticleSet(fieldset, pclass=ptype[mode], lon=25, lat=-35) pfile = pset.ParticleFile(filename, outputdt=delta(hours=6)) pset.execute(AdvectionRK4, runtime=delta(days=1), dt=dt, output_file=pfile) pfile.close() ptype[mode].setLastID(0) # need to reset to zero pset_new = ParticleSet.from_particlefile(fieldset, pclass=ptype[mode], filename=filename) pset.execute(AdvectionRK4, runtime=delta(days=1), dt=dt) pset_new.execute(AdvectionRK4, runtime=delta(days=1), dt=dt) for var in ['lon', 'lat', 'depth', 'time', 'id']: assert np.allclose([getattr(p, var) for p in pset], [getattr(p, var) for p in pset_new])
def test_advection_meridional(lon, lat, mode, npart=10): """ Particles at high latitude move geographically faster due to the pole correction in `GeographicPolar`. """ data = { 'U': np.zeros((lon.size, lat.size), dtype=np.float32), 'V': np.ones((lon.size, lat.size), dtype=np.float32) } dimensions = {'lon': lon, 'lat': lat} fieldset = FieldSet.from_data(data, dimensions, mesh='spherical') pset = ParticleSet(fieldset, pclass=ptype[mode], lon=np.linspace(-60, 60, npart, dtype=np.float32), lat=np.linspace(0, 30, npart, dtype=np.float32)) delta_lat = np.diff(np.array([p.lat for p in pset])) pset.execute(AdvectionRK4, runtime=delta(hours=2), dt=delta(seconds=30)) assert np.allclose(np.diff(np.array([p.lat for p in pset])), delta_lat, rtol=1.e-4)
def test_ofam_particles(mode): fieldset = set_ofam_fieldset() lonstart = [180] latstart = [10] depstart = [2.5] # the depth of the first layer in OFAM pset = ParticleSet(fieldset, pclass=ptype[mode], lon=lonstart, lat=latstart, depth=depstart) pset.execute(AdvectionRK4, runtime=delta(days=10), dt=delta(minutes=5), interval=delta(hours=6)) assert (abs(pset[0].lon - 173) < 1) assert (abs(pset[0].lat - 11) < 1)
def test_globcurrent_netcdf_timestamps(dt): fieldsetNetcdf = set_globcurrent_fieldset() timestamps = fieldsetNetcdf.U.grid.timeslices fieldsetTimestamps = set_globcurrent_fieldset(timestamps=timestamps) lonstart, latstart, runtime = (25, -35, delta(days=7)) psetN = ParticleSet(fieldsetNetcdf, pclass=JITParticle, lon=lonstart, lat=latstart) psetN.execute(AdvectionRK4, runtime=runtime, dt=dt) psetT = ParticleSet(fieldsetTimestamps, pclass=JITParticle, lon=lonstart, lat=latstart) psetT.execute(AdvectionRK4, runtime=runtime, dt=dt) assert np.allclose(psetN.lon[0], psetT.lon[0]) assert np.allclose(psetN.lat[0], psetT.lat[0])
def test_analyticalAgrid(mode): lon = np.arange(0, 15, dtype=np.float32) lat = np.arange(0, 15, dtype=np.float32) U = np.ones((lat.size, lon.size), dtype=np.float32) V = np.ones((lat.size, lon.size), dtype=np.float32) fieldset = FieldSet.from_data({ 'U': U, 'V': V }, { 'lon': lon, 'lat': lat }, mesh='flat') pset = ParticleSet(fieldset, pclass=ptype[mode], lon=1, lat=1) failed = False try: pset.execute(AdvectionAnalytical, runtime=1) except NotImplementedError: failed = True assert failed
def test_variable_init(fieldset, mode, npart=10): """Test that checks correct initialisation of custom variables""" class TestParticle(ptype[mode]): p_float = Variable('p_float', dtype=np.float32, initial=10.) p_double = Variable('p_double', dtype=np.float64, initial=11.) p_int = Variable('p_int', dtype=np.int32, initial=12.) pset = ParticleSet(fieldset, pclass=TestParticle, lon=np.linspace(0, 1, npart), lat=np.linspace(1, 0, npart)) def addOne(particle, fieldset, time): particle.p_float += 1. particle.p_double += 1. particle.p_int += 1 pset.execute(pset.Kernel(AdvectionRK4) + addOne, runtime=1., dt=1.) assert np.allclose([p.p_float for p in pset], 11., rtol=1e-12) assert np.allclose([p.p_double for p in pset], 12., rtol=1e-12) assert np.allclose([p.p_int for p in pset], 13, rtol=1e-12)
def test_variable_write_double(fieldset, mode, tmpdir): filepath = tmpdir.join("pfile_variable_write_double.nc") def Update_lon(particle, fieldset, time): particle.lon += 0.1 pset = ParticleSet(fieldset, pclass=ptype[mode], lon=[0], lat=[0], lonlatdepth_dtype=np.float64) ofile = pset.ParticleFile(name=filepath, outputdt=0.00001) pset.execute(pset.Kernel(Update_lon), endtime=0.001, dt=0.00001, output_file=ofile) ncfile = close_and_compare_netcdffiles(filepath, ofile) lons = ncfile.variables['lon'][:] assert (isinstance(lons[0, 0], np.float64)) ncfile.close()
def test_sampling_multiple_grid_sizes(mode, ugridfactor): xdim, ydim = 10, 20 U = Field('U', np.zeros((ydim * ugridfactor, xdim * ugridfactor), dtype=np.float32), lon=np.linspace(0., 1., xdim * ugridfactor, dtype=np.float32), lat=np.linspace(0., 1., ydim * ugridfactor, dtype=np.float32)) V = Field('V', np.zeros((ydim, xdim), dtype=np.float32), lon=np.linspace(0., 1., xdim, dtype=np.float32), lat=np.linspace(0., 1., ydim, dtype=np.float32)) fieldset = FieldSet(U, V) pset = ParticleSet(fieldset, pclass=pclass(mode), lon=[0.8], lat=[0.9]) if ugridfactor > 1: assert fieldset.U.grid is not fieldset.V.grid else: assert fieldset.U.grid is fieldset.V.grid pset.execute(AdvectionRK4, runtime=10, dt=1) assert np.isclose(pset.lon[0], 0.8) assert np.all((0 <= pset.xi) & (pset.xi < xdim * ugridfactor))
def test_pset_repeated_release_delayed_adding(fieldset, mode, repeatdt, npart=10): class MyParticle(ptype[mode]): sample_var = Variable('sample_var', initial=0.) pset = ParticleSet(fieldset, lon=[0], lat=[0], pclass=MyParticle, repeatdt=repeatdt) def IncrLon(particle, fieldset, time, dt): particle.sample_var += 1. for i in range(npart): assert len(pset) == (i // repeatdt) + 1 pset.execute(IncrLon, dt=1., runtime=1.) assert np.allclose([p.sample_var for p in pset], np.arange(npart, -1, -repeatdt))
def test_moving_eddy(fieldset_moving, mode, method, rtol, diffField, npart=1): fieldset = fieldset_moving if diffField: fieldset.add_field( Field('Kh_zonal', np.zeros(fieldset.U.data.shape), grid=fieldset.U.grid)) fieldset.add_field( Field('Kh_meridional', np.zeros(fieldset.V.data.shape), grid=fieldset.V.grid)) fieldset.add_constant('dres', 0.1) lon = np.linspace(12000, 21000, npart) lat = np.linspace(12500, 12500, npart) pset = ParticleSet(fieldset, pclass=ptype[mode], lon=lon, lat=lat) endtime = delta(hours=6).total_seconds() pset.execute(kernel[method], dt=delta(minutes=3), endtime=endtime) exp_lon = [truth_moving(x, y, endtime)[0] for x, y, in zip(lon, lat)] exp_lat = [truth_moving(x, y, endtime)[1] for x, y, in zip(lon, lat)] assert np.allclose(pset.lon, exp_lon, rtol=rtol) assert np.allclose(pset.lat, exp_lat, rtol=rtol)
def test_sampling_multiple_grid_sizes(mode): """Sampling test that tests for FieldSet with different grid sizes While this currently works fine in Scipy mode, it fails in JIT mode with an out_of_bounds_error because there is only one (xi, yi, zi) for each particle A solution would be to define xi, yi, zi for each field separately """ xdim = 10 ydim = 20 gf = 10 # factor by which the resolution of U is higher than of V U = Field('U', np.zeros((ydim*gf, xdim*gf), dtype=np.float32), lon=np.linspace(0., 1., xdim*gf, dtype=np.float32), lat=np.linspace(0., 1., ydim*gf, dtype=np.float32)) V = Field('V', np.zeros((ydim, xdim), dtype=np.float32), lon=np.linspace(0., 1., xdim, dtype=np.float32), lat=np.linspace(0., 1., ydim, dtype=np.float32)) fieldset = FieldSet(U, V) pset = ParticleSet(fieldset, pclass=pclass(mode), lon=[0.8], lat=[0.9]) pset.execute(AdvectionRK4, runtime=10, dt=1) assert np.isclose(pset[0].lon, 0.8)
def test_EOSseawaterproperties_kernels(mode): fieldset = FieldSet.from_data(data={'U': 0, 'V': 0, 'psu_salinity': 40, 'temperature': 40, 'potemperature': 36.89073}, dimensions={'lat': 0, 'lon': 0, 'depth': 0}) fieldset.add_constant('refpressure', np.float(0)) class PoTempParticle(ptype[mode]): potemp = Variable('potemp', dtype=np.float32) pressure = Variable('pressure', dtype=np.float32, initial=10000) pset = ParticleSet(fieldset, pclass=PoTempParticle, lon=5, lat=5, depth=1000) pset.execute(PtempFromTemp, runtime=0, dt=0) assert np.allclose(pset[0].potemp, 36.89073) class TempParticle(ptype[mode]): temp = Variable('temp', dtype=np.float32) pressure = Variable('pressure', dtype=np.float32, initial=10000) pset = ParticleSet(fieldset, pclass=TempParticle, lon=5, lat=5, depth=1000) pset.execute(TempFromPtemp, runtime=0, dt=0) assert np.allclose(pset[0].temp, 40) class TPressureParticle(ptype[mode]): pressure = Variable('pressure', dtype=np.float32) pset = ParticleSet(fieldset, pclass=TempParticle, lon=5, lat=30, depth=7321.45) pset.execute(PressureFromLatDepth, runtime=0, dt=0) assert np.allclose(pset[0].pressure, 7500, atol=1e-2)
def test_fieldset_defer_loading_function(zdim, scale_fac, tmpdir, filename='test_parcels_defer_loading'): filepath = tmpdir.join(filename) data0, dims0 = generate_fieldset(3, 3, zdim, 10) data0['U'][:, 0, :, :] = np.nan # setting first layer to nan, which will be changed to zero (and all other layers to 1) dims0['time'] = np.arange(0, 10, 1) * 3600 dims0['depth'] = np.arange(0, zdim, 1) fieldset_out = FieldSet.from_data(data0, dims0) fieldset_out.write(filepath) fieldset = FieldSet.from_parcels(filepath) # testing for combination of deferred-loaded and numpy Fields fieldset.add_field(Field('numpyfield', np.zeros((10, zdim, 3, 3)), grid=fieldset.U.grid)) # testing for scaling factors fieldset.U.set_scaling_factor(scale_fac) dFdx, dFdy = fieldset.V.gradient() dz = np.gradient(fieldset.U.depth) DZ = np.moveaxis(np.tile(dz, (fieldset.U.grid.ydim, fieldset.U.grid.xdim, 1)), [0, 1, 2], [1, 2, 0]) def compute(fieldset): # Calculating vertical weighted average for f in [fieldset.U, fieldset.V]: for tind in f.loaded_time_indices: f.data[tind, :] = np.sum(f.data[tind, :] * DZ, axis=0) / sum(dz) fieldset.compute_on_defer = compute fieldset.computeTimeChunk(1, 1) assert np.allclose(fieldset.U.data, scale_fac*(zdim-1.)/zdim) assert np.allclose(dFdx.data, 0) pset = ParticleSet(fieldset, JITParticle, 0, 0) def DoNothing(particle, fieldset, time): return ErrorCode.Success pset.execute(DoNothing, dt=3600) assert np.allclose(fieldset.U.data, scale_fac*(zdim-1.)/zdim) assert np.allclose(dFdx.data, 0)
def test_pset_create_fromparticlefile(fieldset, mode, restart, tmpdir): filename = tmpdir.join("pset_fromparticlefile.nc") lon = np.linspace(0, 1, 10, dtype=np.float32) lat = np.linspace(1, 0, 10, dtype=np.float32) class TestParticle(ptype[mode]): p = Variable('p', np.float32, initial=0.33) p2 = Variable('p2', np.float32, initial=1, to_write=False) p3 = Variable('p3', np.float32, to_write='once') pset = ParticleSet(fieldset, lon=lon, lat=lat, depth=[4] * len(lon), pclass=TestParticle, p3=np.arange(len(lon))) pfile = pset.ParticleFile(filename, outputdt=1) def Kernel(particle, fieldset, time): particle.p = 2. if particle.lon == 1.: particle.delete() pset.execute(Kernel, runtime=2, dt=1, output_file=pfile) pfile.close() pset_new = ParticleSet.from_particlefile(fieldset, pclass=TestParticle, filename=filename, restart=restart, repeatdt=1) for var in ['lon', 'lat', 'depth', 'time', 'p', 'p2', 'p3']: assert np.allclose([getattr(p, var) for p in pset], [getattr(p, var) for p in pset_new]) if restart: assert np.allclose([p.id for p in pset], [p.id for p in pset_new]) pset_new.execute(Kernel, runtime=2, dt=1) assert len(pset_new) == 3 * len(pset)
def test_fieldKh_Brownian(mesh, mode, xdim=200, ydim=100, kh_zonal=100, kh_meridional=50): mesh_conversion = 1 / 1852. / 60 if mesh == 'spherical' else 1 fieldset = zeros_fieldset(mesh=mesh, xdim=xdim, ydim=ydim, mesh_conversion=mesh_conversion) fieldset.add_constant_field("Kh_zonal", kh_zonal, mesh=mesh) fieldset.add_constant_field("Kh_meridional", kh_meridional, mesh=mesh) npart = 1000 runtime = delta(days=1) ParcelsRandom.seed(1234) pset = ParticleSet(fieldset=fieldset, pclass=ptype[mode], lon=np.zeros(npart), lat=np.zeros(npart)) pset.execute(pset.Kernel(DiffusionUniformKh), runtime=runtime, dt=delta(hours=1)) expected_std_lon = np.sqrt(2 * kh_zonal * mesh_conversion**2 * runtime.total_seconds()) expected_std_lat = np.sqrt(2 * kh_meridional * mesh_conversion**2 * runtime.total_seconds()) lats = pset.lat lons = pset.lon tol = 200 * mesh_conversion # effectively 200 m errors assert np.allclose(np.std(lats), expected_std_lat, atol=tol) assert np.allclose(np.std(lons), expected_std_lon, atol=tol) assert np.allclose(np.mean(lons), 0, atol=tol) assert np.allclose(np.mean(lats), 0, atol=tol)
def test_fieldset_defer_loading_function(zdim, scale_fac, tmpdir, filename='test_parcels_defer_loading'): filepath = tmpdir.join(filename) data0, dims0 = generate_fieldset(3, 3, zdim, 10) data0['U'][:, 0, :, :] = np.nan # setting first layer to nan, which will be changed to zero (and all other layers to 1) dims0['time'] = np.arange(0, 10, 1) * 3600 dims0['depth'] = np.arange(0, zdim, 1) fieldset_out = FieldSet.from_data(data0, dims0) fieldset_out.write(filepath) fieldset = FieldSet.from_parcels(filepath, chunksize={'time': ('time_counter', 1), 'depth': ('depthu', 1), 'lat': ('y', 2), 'lon': ('x', 2)}) # testing for combination of deferred-loaded and numpy Fields with pytest.raises(ValueError): fieldset.add_field(Field('numpyfield', np.zeros((10, zdim, 3, 3)), grid=fieldset.U.grid)) # testing for scaling factors fieldset.U.set_scaling_factor(scale_fac) dz = np.gradient(fieldset.U.depth) DZ = np.moveaxis(np.tile(dz, (fieldset.U.grid.ydim, fieldset.U.grid.xdim, 1)), [0, 1, 2], [1, 2, 0]) def compute(fieldset): # Calculating vertical weighted average for f in [fieldset.U, fieldset.V]: for tind in f.loaded_time_indices: data = da.sum(f.data[tind, :] * DZ, axis=0) / sum(dz) data = da.broadcast_to(data, (1, f.grid.zdim, f.grid.ydim, f.grid.xdim)) f.data = f.data_concatenate(f.data, data, tind) fieldset.compute_on_defer = compute fieldset.computeTimeChunk(1, 1) assert isinstance(fieldset.U.data, da.core.Array) assert np.allclose(fieldset.U.data, scale_fac*(zdim-1.)/zdim) pset = ParticleSet(fieldset, JITParticle, 0, 0) def DoNothing(particle, fieldset, time): return ErrorCode.Success pset.execute(DoNothing, dt=3600) assert np.allclose(fieldset.U.data, scale_fac*(zdim-1.)/zdim)
def test_c_kernel(fieldset, mode, c_inc): coord_type = np.float32 if c_inc == 'str' else np.float64 pset = ParticleSet(fieldset, pclass=ptype[mode], lon=[0.5], lat=[0], lonlatdepth_dtype=coord_type) def func(U, lon, dt): u = U.data[0, 2, 1] return lon + u * dt if c_inc == 'str': c_include = """ static inline StatusCode func(CField *f, float *lon, double *dt) { float data2D[2][2][2]; StatusCode status = getCell2D(f, 1, 2, 0, data2D, 1); CHECKSTATUS(status); float u = data2D[0][0][0]; *lon += u * *dt; return SUCCESS; } """ else: c_include = path.join(path.dirname(__file__), 'customed_header.h') def ckernel(particle, fieldset, time): func('parcels_customed_Cfunc_pointer_args', fieldset.U, particle.lon, particle.dt) def pykernel(particle, fieldset, time): particle.lon = func(fieldset.U, particle.lon, particle.dt) if mode == 'scipy': kernel = pset.Kernel(pykernel) else: kernel = pset.Kernel(ckernel, c_include=c_include) pset.execute(kernel, endtime=3., dt=3.) assert np.allclose(pset.lon[0], 0.81578948)
def test_fieldset_sample_geographic(fieldset_geometric, mode, k_sample_uv, npart=120): """ Sample a fieldset with conversion to geographic units (degrees). """ fieldset = fieldset_geometric lon = np.linspace(-170, 170, npart) lat = np.linspace(-80, 80, npart) pset = ParticleSet(fieldset, pclass=pclass(mode), lon=lon, lat=np.zeros(npart) + 70.) pset.execute(pset.Kernel(k_sample_uv), endtime=1., dt=1.) assert np.allclose(pset.v, lon, rtol=1e-6) pset = ParticleSet(fieldset, pclass=pclass(mode), lat=lat, lon=np.zeros(npart) - 45.) pset.execute(pset.Kernel(k_sample_uv), endtime=1., dt=1.) assert np.allclose(pset.u, lat, rtol=1e-6)