def test_get_move(self): """ Test the get_move(...) results in WindMover match the expected delta """ for ix in range(2): curr_time = sec_to_date(date_to_sec(self.model_time) + self.time_step * ix) self.wm.prepare_for_model_step(self.sc, self.time_step, curr_time) delta = self.wm.get_move(self.sc, self.time_step, curr_time) actual = self._expected_move() # the results should be independent of model time tol = 1e-8 msg = ('{0} is not within a tolerance of ' '{1}'.format('WindMover.get_move()', tol)) np.testing.assert_allclose(delta, actual, tol, tol, msg, 0) assert self.wm.active ts = date_to_sec(curr_time) - date_to_sec(self.model_time) print ('Time step [sec]:\t{0}' 'C++ delta-move:\n{1}' 'Expected delta-move:\n{2}' ''.format(ts, delta, actual)) self.wm.model_step_is_done()
def test_variable_wind_after_model_time(self): ''' test to make sure the wind mover is behaving properly with out-of-bounds winds. A variable wind should not extrapolate if it is out of bounds, so prepare_for_model_step() should fail with an exception in this case. ''' wind_time = datetime(2012, 8, 21, 13) # one day after model time time_series = (np.zeros((3, ), dtype=datetime_value_2d) .view(dtype=np.recarray)) time_series.time = [sec_to_date(date_to_sec(wind_time) + self.time_step * i) for i in range(3)] time_series.value = np.array(((2., 25.), (2., 25.), (2., 25.))) wind = Wind(timeseries=time_series.reshape(3), units='meter per second') wm = WindMover(wind) wm.prepare_for_model_run() for ix in range(2): curr_time = sec_to_date(date_to_sec(self.model_time) + self.time_step * ix) with raises(RuntimeError): wm.prepare_for_model_step(self.sc, self.time_step, curr_time)
def write_output(self, step_num, islast_step=False): """ Generate image from data """ # I don't think we need this for this outputter: # - it does stuff with cache initialization super(IceImageOutput, self).write_output(step_num, islast_step) if (self.on is False or not self._write_step or len(self.ice_movers) == 0): return None # fixme -- doing all this cache stuff just to get the timestep.. # maybe timestep should be passed in. for sc in self.cache.load_timestep(step_num).items(): model_time = date_to_sec(sc.current_time_stamp) iso_time = sc.current_time_stamp.isoformat() thick_image, conc_image, bb = self.render_images(model_time) # info to return to the caller web_mercator = 'EPSG:3857' equirectangular = 'EPSG:32662' output_dict = {'step_num': step_num, 'time_stamp': iso_time, 'thickness_image': thick_image, 'concentration_image': conc_image, 'bounding_box': bb, 'projection': equirectangular, } return output_dict
def write_output(self, step_num, islast_step=False): """ Generate image from data """ # I don't think we need this for this outputter: # - it does stuff with cache initialization super(IceImageOutput, self).write_output(step_num, islast_step) if self.on is False or not self._write_step or self.ice_mover is None: return None ## fixme -- doing all this cache stuff just to get the timestep.. ## maybe timestep should be passed in. for sc in self.cache.load_timestep(step_num).items(): pass model_time = date_to_sec(sc.current_time_stamp) thick_image, conc_image = self.render_images(model_time) ## fixme: Can we really loop through the movers? ## or should there be one IceImage outputter for each Ice Mover. ## here is where we render.... # do something with self.get_coverage_fc(ice_coverage, mover_triangles)) # do somethign with self.get_thickness_fc(ice_thickness, mover_triangles)) # info to return to the caller output_dict = {'step_num': step_num, 'time_stamp': sc.current_time_stamp.isoformat(), 'thickness_image': thick_image, 'concentration_image': conc_image, 'bounding_box': ((-85.0, 20.0),(-55.0, 45.0)), 'projection': ("EPSG:3857"), } return output_dict
def check_time(self, wind, model_time): ''' Should have an option to extrapolate but for now we do by default TODO, FIXME: This function does not appear to be used by anything. Removing it does not break any of the unit tests. If it is not used, it should probably go away. ''' new_model_time = model_time if wind is not None: if model_time is not None: timeval = date_to_sec(model_time) start_time = wind.get_start_time() end_time = wind.get_end_time() if end_time == start_time: return model_time if timeval < start_time: new_model_time = sec_to_datetime(start_time) if timeval > end_time: new_model_time = sec_to_datetime(end_time) else: return model_time return new_model_time
def test_move_gridcur_series(self): """ test move for a gridCur file series (first time in first file) """ time = datetime.datetime(2002, 1, 30, 1) self.cm.model_time = time_utils.date_to_sec(time) time_grid_file = testdata['GridCurrentMover']['series_gridCur'] topology_file = r"" self.gcm.text_read(time_grid_file, topology_file) self.cm.ref[:]['long'] = -119.933264 # for gridCur test self.cm.ref[:]['lat'] = 34.138736 self.check_move() actual = np.empty((self.cm.num_le, ), dtype=world_point) actual[:]['lat'] = -0.0034527536849574456 actual[:]['long'] = 0.005182449331779978 actual[:]['z'] = 0. tol = 1e-5 msg = r"{0} move is not within a tolerance of {1}" np.testing.assert_allclose(self.cm.delta['lat'], actual['lat'], tol, tol, msg.format('gridcur series', tol), 0) np.testing.assert_allclose(self.cm.delta['long'], actual['long'], tol, tol, msg.format('gridcur series', tol), 0) # np.testing.assert_equal(self.cm.delta, actual, # "test_move_gridcur_series() failed", 0) np.all(self.cm.delta['z'] == 0)
def test_move_ptcur_extrapolate(self): """ test move for a ptCur grid (first time in file) """ # time before first time in file time = datetime.datetime(2000, 2, 14, 8) self.cm.model_time = time_utils.date_to_sec(time) time_grid_file = testdata['GridCurrentMover']['ptCur'] self.gcm.text_read(time_grid_file) # result of move should be same as first step for ptCur test self.gcm.extrapolate_in_time(True) self.cm.ref[:]['long'] = -124.686928 self.cm.ref[:]['lat'] = 48.401124 self.check_move() actual = np.empty((self.cm.num_le, ), dtype=world_point) actual[:]['lat'] = .0161987 actual[:]['long'] = -.02439887 tol = 1e-5 msg = r"{0} move is not within a tolerance of {1}" np.testing.assert_allclose(self.cm.delta['lat'], actual['lat'], tol, tol, msg.format('ptcur', tol), 0) np.testing.assert_allclose(self.cm.delta['long'], actual['long'], tol, tol, msg.format('ptcur', tol), 0)
def test_move_curv_series(self): """ Test a curvilinear file series - time in first file - time in second file """ # time = datetime.datetime(2009, 8, 2, 0) # first file time = datetime.datetime(2009, 8, 9, 0) # second file self.cm.model_time = time_utils.date_to_sec(time) time_grid_file = testdata['GridCurrentMover']['series_curv'] topology_file = testdata['GridCurrentMover']['series_top'] self.gcm.text_read(time_grid_file, topology_file) self.cm.ref[:]['long'] = -157.795728 # for HiROMS self.cm.ref[:]['lat'] = 21.069288 self.check_move() actual = np.empty((self.cm.num_le, ), dtype=world_point) # actual[:]['lat'] = -.003850193 # file 2 # actual[:]['long'] = .000152012 # updated to new curvilinear algorithm actual[:]['lat'] = .00292 # file 2 actual[:]['long'] = .00051458 tol = 1e-5 msg = r"{0} move is not within a tolerance of {1}" np.testing.assert_allclose(self.cm.delta['lat'], actual['lat'], tol, tol, msg.format('HiROMS', tol), 0) np.testing.assert_allclose(self.cm.delta['long'], actual['long'], tol, tol, msg.format('HiROMS', tol), 0)
def test_move_curv_no_top(self): """ test move for a curvilinear grid (first time in file) """ time = datetime.datetime(2008, 1, 29, 17) self.cm.model_time = time_utils.date_to_sec(time) time_grid_file = testdata['GridCurrentMover']['curr_curv'] self.gcm.text_read(time_grid_file, topology_file=None) topology_file2 = os.path.join( os.path.split(time_grid_file)[0], 'NYTopologyNew.dat') self.gcm.export_topology(topology_file2) self.cm.ref[:]['long'] = -74.03988 # for NY self.cm.ref[:]['lat'] = 40.536092 self.check_move() actual = np.empty((self.cm.num_le, ), dtype=world_point) actual[:]['lat'] = .000911 actual[:]['long'] = -.001288 tol = 1e-5 msg = r"{0} move is not within a tolerance of {1}" np.testing.assert_allclose(self.cm.delta['lat'], actual['lat'], tol, tol, msg.format('ny_cg.nc', tol), 0) np.testing.assert_allclose(self.cm.delta['long'], actual['long'], tol, tol, msg.format('ny_cg.nc', tol), 0)
def test_move_reg(self): """ test move for a regular grid (first time in file) """ time = datetime.datetime(1999, 11, 29, 21) self.cm.model_time = time_utils.date_to_sec(time) time_grid_file = testdata['GridCurrentMover']['curr_reg'] self.gcm.text_read(time_grid_file) self.cm.ref[:]['long'] = 3.104588 # for simple example self.cm.ref[:]['lat'] = 52.016468 self.check_move() actual = np.empty((self.cm.num_le, ), dtype=world_point) actual[:]['lat'] = .003354610952486354 actual[:]['long'] = .0010056182923228838 actual[:]['z'] = 0. tol = 1e-5 msg = r"{0} move is not within a tolerance of {1}" np.testing.assert_allclose(self.cm.delta['lat'], actual['lat'], tol, tol, msg.format('test.cdf', tol), 0) np.testing.assert_allclose(self.cm.delta['long'], actual['long'], tol, tol, msg.format('test.cdf', tol), 0) # np.testing.assert_equal(self.cm.delta['z'], actual['z'], # "test_move_reg() failed", 0) np.all(self.cm.delta['z'] == 0)
def write_output(self, step_num, islast_step=False): 'dump data in geojson format' super(CurrentJsonOutput, self).write_output(step_num, islast_step) if self.on is False or not self._write_step: return None for sc in self.cache.load_timestep(step_num).items(): model_time = date_to_sec(sc.current_time_stamp) iso_time = sc.current_time_stamp.isoformat() json_ = {} for cm in self.current_movers: velocities = cm.get_scaled_velocities(model_time) velocities = self.get_rounded_velocities(velocities) x = velocities[:, 0] y = velocities[:, 1] direction = np.arctan2(y, x) - np.pi / 2 magnitude = np.sqrt(x**2 + y**2) direction = np.round(direction, 2) magnitude = np.round(magnitude, 2) json_[cm.id] = { 'magnitude': magnitude.tolist(), 'direction': direction.tolist() } return json_
def test_constant_wind_after_model_time(self): ''' test to make sure the wind mover is behaving properly with out-of-bounds winds. A constant wind should extrapolate if it is out of bounds, so prepare_for_model_step() should not fail. We are testing that the wind extrapolates properly, so the windages should be updated in the same way as the in-bounds test ''' wind_time = datetime(2012, 8, 21, 13) # one day after model time wind = Wind(timeseries=np.array((wind_time, (2., 25.)), dtype=datetime_value_2d).reshape(1), units='meter per second') wm = WindMover(wind) wm.prepare_for_model_run() for ix in range(2): curr_time = sec_to_date(date_to_sec(self.model_time) + self.time_step * ix) print 'curr_time = ', curr_time old_windages = np.copy(self.sc['windages']) wm.prepare_for_model_step(self.sc, self.time_step, curr_time) mask = self.sc['windage_persist'] == -1 assert np.all(self.sc['windages'][mask] == old_windages[mask]) mask = self.sc['windage_persist'] > 0 assert np.all(self.sc['windages'][mask] != old_windages[mask])
def write_output(self, step_num, islast_step=False): 'dump data in geojson format' super(IceGeoJsonOutput, self).write_output(step_num, islast_step) if self.on is False or not self._write_step: return None for sc in self.cache.load_timestep(step_num).items(): pass model_time = date_to_sec(sc.current_time_stamp) geojson = {} for mover in self.ice_movers: grid_data = mover.get_grid_data() ice_coverage, ice_thickness = mover.get_ice_fields(model_time) geojson[mover.id] = [] geojson[mover.id].append( self.get_coverage_fc(ice_coverage, grid_data)) geojson[mover.id].append( self.get_thickness_fc(ice_thickness, grid_data)) # default geojson should not output data to file output_info = { 'time_stamp': sc.current_time_stamp.isoformat(), 'feature_collections': geojson } return output_info
def _convert(x): """ helper method for the next 4 tests """ y = time_utils.date_to_sec(x) return time_utils.sec_to_date(y)
def test_move_curv_no_top(self): """ test move for a curvilinear grid (first time in file) """ time = datetime.datetime(2008, 1, 29, 17) self.cm.model_time = time_utils.date_to_sec(time) time_grid_file = testdata['GridCurrentMover']['curr_curv'] self.gcm.text_read(time_grid_file, topology_file=None) topology_file2 = os.path.join(os.path.split(time_grid_file)[0], 'NYTopologyNew.dat') self.gcm.export_topology(topology_file2) self.cm.ref[:]['long'] = -74.03988 # for NY self.cm.ref[:]['lat'] = 40.536092 self.check_move() actual = np.empty((self.cm.num_le, ), dtype=world_point) actual[:]['lat'] = .000911 actual[:]['long'] = -.001288 tol = 1e-5 msg = r"{0} move is not within a tolerance of {1}" np.testing.assert_allclose(self.cm.delta['lat'], actual['lat'], tol, tol, msg.format('ny_cg.nc', tol), 0) np.testing.assert_allclose(self.cm.delta['long'], actual['long'], tol, tol, msg.format('ny_cg.nc', tol), 0)
def test_grid_wind_curv(): # curvlinear grid curv = Grid(wind_file, topology_file, grid_type=2) time = date_to_sec(datetime(2006, 3, 31, 21)) vel = curv.get_value(time, (-122.934656, 38.27594)) print "Curv grid - vel: {0}\n".format(vel) assert vel.item() != 0
def test_get_move_exceptions(self): curr_time = sec_to_date(date_to_sec(self.model_time) + self.time_step) tmp_windages = self.sc._data_arrays['windages'] del self.sc._data_arrays['windages'] with pytest.raises(KeyError): self.wm.get_move(self.sc, self.time_step, curr_time) self.sc._data_arrays['windages'] = tmp_windages
def test_prepare_for_model_step(): """ explicitly test to make sure windages are being updated for persistence != 0 and windages are not being changed for persistance == -1 """ time_step = 15 * 60 # seconds model_time = datetime(2012, 8, 20, 13) # yyyy/month/day/hr/min/sec sc = sample_sc_release(5, (3., 6., 0.), model_time) sc['windage_persist'][:2] = -1 wind = Wind(timeseries=np.array((model_time, (2., 25.)), dtype=datetime_value_2d).reshape(1), units='meter per second') wm = WindMover(wind) wm.prepare_for_model_run() for ix in range(2): curr_time = sec_to_date(date_to_sec(model_time) + time_step * ix) old_windages = np.copy(sc['windages']) wm.prepare_for_model_step(sc, time_step, curr_time) mask = [sc['windage_persist'] == -1] assert np.all(sc['windages'][mask] == old_windages[mask]) mask = [sc['windage_persist'] > 0] assert np.all(sc['windages'][mask] != old_windages[mask])
def _convert(x): """ helper method for the next 4 tests """ y = date_to_sec(x) return sec_to_date(y)
def write_output(self, step_num, islast_step=False): 'dump data in geojson format' super(IceGeoJsonOutput, self).write_output(step_num, islast_step) if self.on is False or not self._write_step: return None for sc in self.cache.load_timestep(step_num).items(): pass model_time = date_to_sec(sc.current_time_stamp) geojson = {} for mover in self.ice_movers: mover_triangles = self.get_triangles(mover) ice_coverage, ice_thickness = mover.get_ice_fields(model_time) geojson[mover.id] = [] geojson[mover.id].append(self.get_coverage_fc(ice_coverage, mover_triangles)) geojson[mover.id].append(self.get_thickness_fc(ice_thickness, mover_triangles)) # default geojson should not output data to file output_info = {'time_stamp': sc.current_time_stamp.isoformat(), 'feature_collections': geojson } return output_info
def write_output(self, step_num, islast_step=False): 'dump data in geojson format' super(IceJsonOutput, self).write_output(step_num, islast_step) if self.on is False or not self._write_step: return None for sc in self.cache.load_timestep(step_num).items(): pass model_time = date_to_sec(sc.current_time_stamp) raw_json = {} for mover in self.ice_movers: ice_coverage, ice_thickness = mover.get_ice_fields(model_time) raw_json[mover.id] = {"thickness": [], "concentration": []} raw_json[mover.id]["thickness"] = ice_thickness.tolist() raw_json[mover.id]["concentration"] = ice_coverage.tolist() output_info = {'time_stamp': sc.current_time_stamp.isoformat(), 'data': raw_json} return output_info
def write_output(self, step_num, islast_step=False): 'dump data in geojson format' super(CurrentJsonOutput, self).write_output(step_num, islast_step) if self.on is False or not self._write_step: return None for sc in self.cache.load_timestep(step_num).items(): model_time = date_to_sec(sc.current_time_stamp) iso_time = sc.current_time_stamp.isoformat() json_ = {} for cm in self.current_movers: velocities = cm.get_scaled_velocities(model_time) velocities = self.get_rounded_velocities(velocities) x = velocities[:,0] y = velocities[:,1] direction = np.arctan2(y,x) - np.pi/2 magnitude = np.sqrt(x**2 + y**2) direction = np.round(direction,2) magnitude = np.round(magnitude,2) json_[cm.id]={'magnitude':magnitude.tolist(), 'direction':direction.tolist() } return json_
def datetime_to_seconds(self, model_time): """ Put the time conversion call here - in case we decide to change it, it only updates here """ return time_utils.date_to_sec(model_time)
def get_timeseries(self, datetime=None, format='uv'): """ Returns the timeseries in requested format. If datetime=None, then the original timeseries that was entered is returned. If datetime is a list containing datetime objects, then the value for each of those date times is determined by the underlying C++ object and the timeseries is returned. The output format is defined by the strings 'r-theta', 'uv' :param datetime: [optional] datetime object or list of datetime objects for which the value is desired :type datetime: datetime object :param format: output format for the times series: either 'r-theta' or 'uv' :type format: either string or integer value defined by basic_types.ts_format.* (see cy_basic_types.pyx) :returns: numpy array containing dtype=basic_types.datetime_value_2d. Contains user specified datetime and the corresponding values in user specified ts_format """ if datetime is None: datetimeval = to_datetime_value_2d(self.ossm.timeseries, format) else: datetime = np.asarray(datetime, dtype='datetime64[s]').reshape(-1) timeval = np.zeros((len(datetime), ), dtype=basic_types.time_value_pair) timeval['time'] = date_to_sec(datetime) timeval['value'] = self.ossm.get_time_value(timeval['time']) datetimeval = to_datetime_value_2d(timeval, format) return datetimeval
def write_output(self, step_num, islast_step=False): """ Generate image from data """ # I don't think we need this for this outputter: # - it does stuff with cache initialization super(IceImageOutput, self).write_output(step_num, islast_step) if (self.on is False or not self._write_step or len(self.ice_movers) == 0): return None # fixme -- doing all this cache stuff just to get the timestep.. # maybe timestep should be passed in. for sc in self.cache.load_timestep(step_num).items(): model_time = date_to_sec(sc.current_time_stamp) iso_time = sc.current_time_stamp.isoformat() thick_image, conc_image, bb = self.render_images(model_time) # web_mercator = 'EPSG:3857' equirectangular = 'EPSG:32662' # info to return to the caller output_dict = { 'step_num': step_num, 'time_stamp': iso_time, 'thickness_image': thick_image, 'concentration_image': conc_image, 'bounding_box': bb, 'projection': equirectangular, } return output_dict
def write_output(self, step_num, islast_step=False): 'dump data in geojson format' super(IceJsonOutput, self).write_output(step_num, islast_step) if self.on is False or not self._write_step: return None for sc in self.cache.load_timestep(step_num).items(): pass model_time = date_to_sec(sc.current_time_stamp) raw_json = {} for mover in self.ice_movers: ice_coverage, ice_thickness = mover.get_ice_fields(model_time) raw_json[mover.id] = {"thickness": [], "concentration": []} raw_json[mover.id]["thickness"] = ice_thickness.tolist() raw_json[mover.id]["concentration"] = ice_coverage.tolist() output_info = { 'time_stamp': sc.current_time_stamp.isoformat(), 'data': raw_json } return output_info
def write_output(self, step_num, islast_step=False): 'dump data in geojson format' super(TrajectoryGeoJsonOutput, self).write_output(step_num, islast_step) if not self._write_step: return None # one feature per element client; replaced with multipoint # because client performance is much more stable with one # feature per step rather than (n) features per step.features = [] features = [] for sc in self.cache.load_timestep(step_num).items(): time = date_to_sec(sc.current_time_stamp) position = self._dataarray_p_types(sc['positions']) status = self._dataarray_p_types(sc['status_codes']) sc_type = 'uncertain' if sc.uncertain else 'forecast' # break elements into multipoint features based on their status code # evaporated : 10 # in_water : 2 # not_released : 0 # off_maps : 7 # on_land : 3 # to_be_removed : 12 points = {} for ix, pos in enumerate(position): st_code = status[ix] if st_code not in points: points[st_code] = [] points[st_code].append(pos[:2]) for k in points: feature = Feature(geometry=MultiPoint(points[k]), id="1", properties={ 'sc_type': sc_type, 'status_code': k, }) if sc.uncertain: features.insert(0, feature) else: features.append(feature) geojson = FeatureCollection(features) # default geojson should not output data to file # read data from file and send it to web client output_info = { 'time_stamp': sc.current_time_stamp.isoformat(), 'feature_collection': geojson } if self.output_dir: output_filename = self.output_to_file(geojson, step_num) output_info.update({'output_filename': output_filename}) return output_info
def test_get_time_value(): 'make sure get_time_value goes to correct C++ derived class function' shio = CyShioTime(shio_file) t = time_utils.date_to_sec(datetime(2012, 8, 20, 13)) time = [t + 3600.*dt for dt in range(10)] vel_rec = shio.get_time_value(time) assert all(vel_rec['u'] != 0) assert all(vel_rec['v'] == 0)
def test_grid_wind_curv(): # curvlinear grid curv = CyTimeGridWindCurv(testdata['GridWindMover']['wind_curv'], testdata['GridWindMover']['top_curv']) time = date_to_sec(datetime(2006, 3, 31, 21)) vel = curv.get_value(time, (-122.934656, 38.27594)) print "Curv grid - vel: {0}\n".format(vel) assert vel.item() != 0
class TestRandomMover: """ gnome.RandomMover() test """ num_le = 5 # start_pos = np.zeros((num_le,3), dtype=basic_types.world_point_type) start_pos = (0., 0., 0.) rel_time = datetime.datetime(2012, 8, 20, 13) # yyyy/month/day/hr/min/sec model_time = sec_to_date(date_to_sec(rel_time) + 1) time_step = 15 * 60 # seconds mover = RandomMover() def reset_pos(self): self.pSpill['positions'] = (0., 0., 0.) print self.pSpill['positions'] def test_string_representation_matches_repr_method(self): """ Just print repr and str """ print print repr(self.mover) print str(self.mover) assert True def test_id_matches_builtin_id(self): # It is not a good assumption that the obj.id property # will always contain the id(obj) value. For example it could # have been overloaded with, say, a uuid1() generator. # assert id(self.mover) == self.mover.id pass def test_change_diffusion_coef(self): self.mover.diffusion_coef = 200000 assert self.mover.diffusion_coef == 200000 def test_change_uncertain_factor(self): self.mover.uncertain_factor = 3 assert self.mover.uncertain_factor == 3 def test_prepare_for_model_step(self): """ Simply tests the method executes without exceptions """ pSpill = sample_sc_release(self.num_le, self.start_pos) self.mover.prepare_for_model_step(pSpill, self.time_step, self.model_time) assert True
def write_output(self, step_num, islast_step=False): 'dump data in geojson format' super(TrajectoryGeoJsonOutput, self).write_output(step_num, islast_step) if not self._write_step: return None # one feature per element client; replaced with multipoint # because client performance is much more stable with one # feature per step rather than (n) features per step.features = [] features = [] for sc in self.cache.load_timestep(step_num).items(): time = date_to_sec(sc.current_time_stamp) position = self._dataarray_p_types(sc['positions']) status = self._dataarray_p_types(sc['status_codes']) sc_type = 'uncertain' if sc.uncertain else 'forecast' # break elements into multipoint features based on their # status code # evaporated : 10 # in_water : 2 # not_released : 0 # off_maps : 7 # on_land : 3 # to_be_removed : 12 points = {} for ix, pos in enumerate(position): st_code = status[ix] if st_code not in points: points[st_code] = [] points[st_code].append(pos[:2]) for k in points: feature = Feature(geometry=MultiPoint(points[k]), id="1", properties={ 'sc_type': sc_type, 'status_code': k, }) if sc.uncertain: features.insert(0, feature) else: features.append(feature) geojson = FeatureCollection(features) # default geojson should not output data to file # read data from file and send it to web client output_info = {'time_stamp': sc.current_time_stamp.isoformat(), 'feature_collection': geojson } if self.output_dir: output_filename = self.output_to_file(geojson, step_num) output_info.update({'output_filename': output_filename}) return output_info
def test_get_move_exceptions(self): curr_time = sec_to_date(date_to_sec(self.model_time) + self.time_step) tmp_windages = self.sc._data_arrays['windages'] del self.sc._data_arrays['windages'] with raises(KeyError): self.wm.get_move(self.sc, self.time_step, curr_time) self.sc._data_arrays['windages'] = tmp_windages
def test_move_tri_tide(self): """ test move for a triangular grid (first time in file) """ time = datetime.datetime(2014, 6, 9, 0) self.cm.model_time = time_utils.date_to_sec(time) self.cm.uncertain = True time_grid_file = get_datafile(os.path.join(cur_dir, 'PQBayCur.nc4' )) topology_file = get_datafile(os.path.join(cur_dir, 'PassamaquoddyTOP.dat' )) tide_file = get_datafile(os.path.join(tide_dir, 'EstesHead.txt' )) yeardata_path = os.path.join(os.path.dirname(gnome.__file__), 'data/yeardata/') self.shio = cy_shio_time.CyShioTime(tide_file) self.ccm.set_shio(self.shio) self.ccm.text_read(time_grid_file, topology_file) self.shio.set_shio_yeardata_path(yeardata_path) self.cm.ref[:]['long'] = -66.991344 # for Passamaquoddy self.cm.ref[:]['lat'] = 45.059316 #self.check_move() self.check_move_certain_uncertain(self.ccm.uncertain_time_delay) actual = np.empty((self.cm.num_le, ), dtype=world_point) actual[:]['lat'] = -.000440779 actual[:]['long'] = .00016611 tol = 1e-5 msg = r"{0} move is not within a tolerance of {1}" np.testing.assert_allclose( self.cm.delta['lat'], actual['lat'], tol, tol, msg.format('ches_bay', tol), 0, ) np.testing.assert_allclose( self.cm.delta['long'], actual['long'], tol, tol, msg.format('ches_bay', tol), 0, ) #check that certain and uncertain are the same if uncertainty is time delayed #self.ccm.uncertain_time_delay = 3 self.ccm.uncertain_time_delay = 10800 # cython expects time_delay in seconds self.check_move_certain_uncertain(self.ccm.uncertain_time_delay)
def get_timeseries( self, datetime=None, units=None, format='r-theta', ): """ Returns the timeseries in the requested format. If datetime=None, then the original timeseries that was entered is returned. If datetime is a list containing datetime objects, then the wind value for each of those date times is determined by the underlying CyOSSMTime object and the timeseries is returned. The output format is defined by the strings 'r-theta', 'uv' :param datetime: [optional] datetime object or list of datetime objects for which the value is desired :type datetime: datetime object :param units: [optional] outputs data in these units. Default is to output data in units :type units: string. Uses the hazpy.unit_conversion module. hazpy.unit_conversion throws error for invalid units :param format: output format for the times series: either 'r-theta' or 'uv' :type format: either string or integer value defined by basic_types.ts_format.* (see cy_basic_types.pyx) :returns: numpy array containing dtype=basic_types.datetime_value_2d. Contains user specified datetime and the corresponding values in user specified ts_format """ if datetime is None: datetimeval = \ convert.to_datetime_value_2d(self.ossm.timeseries, format) else: datetime = np.asarray(datetime, dtype='datetime64[s]' ).reshape(-1) timeval = np.zeros((len(datetime), ), dtype=basic_types.time_value_pair) timeval['time'] = time_utils.date_to_sec(datetime) timeval['value'] = self.ossm.get_time_value(timeval['time']) datetimeval = convert.to_datetime_value_2d(timeval, format) if units is not None: datetimeval['value'] = \ self._convert_units(datetimeval['value'], format, 'meter per second', units) else: datetimeval['value'] = \ self._convert_units(datetimeval['value'], format, 'meter per second', self.units) return datetimeval
def prepare_for_model_step(self, model_time): """ Make sure we are up to date with the referenced time series """ model_time = date_to_sec(model_time) if self.ossm.check_time_in_range(model_time): return self.create_running_average_timeseries(self._past_hours_to_average, model_time)
def __init__(self): time = datetime.datetime(2012, 8, 20, 13) self.model_time = time_utils.date_to_sec(time) # ############### # init. arrays # # ############### self.ref[:] = 1. self.ref[:]['z'] = 0 # on surface by default self.status[:] = oil_status.in_water
def prepare_for_model_run(self, model_time): """ Make sure we are up to date with the referenced time series """ if self.wind is None: msg = "wind object not defined for WindMover" raise ReferencedObjectNotSet(msg) model_time = date_to_sec(model_time) self.create_running_average_timeseries(self._past_hours_to_average, model_time)
def to_time_value_pair(datetime_value, in_ts_format=None): """ converts a numpy array containing basic_types.datetime_value_2d in user specified basic_types.ts_format into a time_value_pair array or it takes a basic_types.datetime_value_1d array and converts it to a time_value_pair array -- for 1d data, assume the ['value'] contains the 'u' component and set the 'v' component to 0.0 :param datetime_value: numpy array of type basic_types.datetime_value_2d or basic_types.datetime_value_1d :param in_ts_format=None: format of the datetime_value_2d array - not required when converting from datetime_value_1d. Can be defined by a string 'r-theta', 'uv' or by an integer defined by one of the options given in basic_types.ts_format. """ if(datetime_value.dtype != basic_types.datetime_value_2d and datetime_value.dtype != basic_types.datetime_value_1d): raise ValueError('Method expects a numpy array containing ' 'basic_types.datetime_value_2d or basic_types.datetime_value_1d') # convert datetime_value_2d to time_value_pair time_value_pair = np.zeros((len(datetime_value), ), dtype=basic_types.time_value_pair) time_value_pair['time'] = \ time_utils.date_to_sec(datetime_value['time']) if datetime_value.dtype == basic_types.datetime_value_1d: time_value_pair['value']['u'] = datetime_value['value'][:, 0] else: if in_ts_format is None: raise ValueError("for datetime_value_2d data conversion, the " "format defined by 'in_ts_format', cannot be None ") if isinstance(in_ts_format, basestring): in_ts_format = tsformat(in_ts_format) if in_ts_format == basic_types.ts_format.magnitude_direction: uv = transforms.r_theta_to_uv_wind(datetime_value['value']) time_value_pair['value']['u'] = uv[:, 0] time_value_pair['value']['v'] = uv[:, 1] elif in_ts_format == basic_types.ts_format.uv: time_value_pair['value']['u'] = datetime_value['value'][:, 0] time_value_pair['value']['v'] = datetime_value['value'][:, 1] else: raise ValueError('in_ts_format is not one of the two supported ' 'types: basic_types.ts_format.magnitude_direction, ' 'basic_types.ts_format.uv') return time_value_pair
def to_time_value_pair(datetime_value, in_ts_format=None): """ converts a numpy array containing basic_types.datetime_value_2d in user specified basic_types.ts_format into a time_value_pair array or it takes a basic_types.datetime_value_1d array and converts it to a time_value_pair array -- for 1d data, assume the ['value'] contains the 'u' component and set the 'v' component to 0.0 :param datetime_value: numpy array of type basic_types.datetime_value_2d or basic_types.datetime_value_1d :param in_ts_format=None: format of the datetime_value_2d array - not required when converting from datetime_value_1d. Can be defined by a string 'r-theta', 'uv' or by an integer defined by one of the options given in basic_types.ts_format. """ if (datetime_value.dtype not in (basic_types.datetime_value_2d, basic_types.datetime_value_1d)): raise ValueError('Method expects a numpy array containing ' 'basic_types.datetime_value_2d or ' 'basic_types.datetime_value_1d') # convert datetime_value_2d to time_value_pair time_value_pair = np.zeros((len(datetime_value), ), dtype=basic_types.time_value_pair) time_value_pair['time'] = time_utils.date_to_sec(datetime_value['time']) if datetime_value.dtype == basic_types.datetime_value_1d: time_value_pair['value']['u'] = datetime_value['value'][:] else: if in_ts_format is None: raise ValueError("for datetime_value_2d data conversion, " "the format defined by 'in_ts_format' " "cannot be None ") if isinstance(in_ts_format, basestring): in_ts_format = tsformat(in_ts_format) if in_ts_format == basic_types.ts_format.magnitude_direction: uv = transforms.r_theta_to_uv_wind(datetime_value['value']) time_value_pair['value']['u'] = uv[:, 0] time_value_pair['value']['v'] = uv[:, 1] elif in_ts_format == basic_types.ts_format.uv: time_value_pair['value']['u'] = datetime_value['value'][:, 0] time_value_pair['value']['v'] = datetime_value['value'][:, 1] else: raise ValueError('in_ts_format is not one of the two supported ' 'types: ' 'basic_types.ts_format.magnitude_direction, ' 'basic_types.ts_format.uv') return time_value_pair
def test_move_curv(self): """ test move for a curvilinear grid (first time in file) """ time = datetime.datetime(2006, 3, 31, 21) self.cm.model_time = date_to_sec(time) self.cm.uncertain = True time_grid_file = get_datafile(os.path.join(winds_dir, 'WindSpeedDirSubset.nc')) topology_file = get_datafile(os.path.join(winds_dir, 'WindSpeedDirSubsetTop.dat')) self.gcm.text_read(time_grid_file, topology_file) self.cm.ref[:]['long'] = -122.934656 # for NWS off CA self.cm.ref[:]['lat'] = 38.27594 #self.check_move() self.check_move_certain_uncertain(self.gcm.uncertain_time_delay) actual = np.empty((self.cm.num_le, ), dtype=world_point) actual[:]['lat'] = 0.0009890068148185598 actual[:]['long'] = 0.0012165959734995123 actual[:]['z'] = 0. tol = 1e-5 msg = '{0} move is not within a tolerance of {1}' np.testing.assert_allclose( self.cm.delta['lat'], actual['lat'], tol, tol, msg.format('WindSpeedDirSubset.nc', tol), 0, ) np.testing.assert_allclose( self.cm.delta['long'], actual['long'], tol, tol, msg.format('WindSpeedDirSubset.nc', tol), 0, ) #check that certain and uncertain are the same if uncertainty is time delayed #self.gcm.uncertain_time_delay = 3 self.gcm.uncertain_time_delay = 10800 # cython expects time_delay in seconds self.check_move_certain_uncertain(self.gcm.uncertain_time_delay) # np.testing.assert_equal(self.cm.delta, actual, # "test_move_curv() failed", 0) np.all(self.cm.delta['z'] == 0)
def to_time_value_pair(datetime_value_2d, in_ts_format): """ converts a numpy array containing basic_types.datetime_value_2d in user specified basic_types.ts_format into a time_value_pair array :param datetime_value_2d: numpy array of type basic_types.datetime_value_2d :param in_ts_format: format of the array. Can be defined by a string 'r-theta', 'uv' or by an integer defined by one of the options given in basic_types.ts_format """ # print (datetime_value_2d.dtype, basic_types.datetime_value_2d) if datetime_value_2d.dtype != basic_types.datetime_value_2d: raise ValueError("Method expects a numpy array containing basic_types.datetime_value_2d") # convert datetime_value_2d to time_value_pair time_value_pair = np.zeros((len(datetime_value_2d),), dtype=basic_types.time_value_pair) if type(in_ts_format) is str: in_ts_format = tsformat(in_ts_format) if in_ts_format == basic_types.ts_format.magnitude_direction: time_value_pair["time"] = time_utils.date_to_sec(datetime_value_2d["time"]) uv = transforms.r_theta_to_uv_wind(datetime_value_2d["value"]) time_value_pair["value"]["u"] = uv[:, 0] time_value_pair["value"]["v"] = uv[:, 1] elif in_ts_format == basic_types.ts_format.uv: time_value_pair["time"] = time_utils.date_to_sec(datetime_value_2d["time"]) time_value_pair["value"]["u"] = datetime_value_2d["value"][:, 0] time_value_pair["value"]["v"] = datetime_value_2d["value"][:, 1] else: raise ValueError( "in_ts_format is not one of the two supported types: basic_types.ts_format.magnitude_direction, basic_types.ts_format.uv" ) return time_value_pair
def test_variable_wind_after_model_time_with_extrapolation(self): ''' test to make sure the wind mover is behaving properly with out-of-bounds winds. A variable wind can extrapolate if it is configured to do so, so prepare_for_model_step() should succeed in this case. We are testing that the wind extrapolates properly, so the windages should be updated in the same way as the in-bounds test ''' wind_time = datetime(2012, 8, 21, 13) # one day after model time time_series = (np.zeros((3, ), dtype=datetime_value_2d) .view(dtype=np.recarray)) time_series.time = [sec_to_date(date_to_sec(wind_time) + self.time_step * i) for i in range(3)] time_series.value = np.array(((2., 25.), (2., 25.), (2., 25.))) wind = Wind(timeseries=time_series.reshape(3), extrapolation_is_allowed=True, units='meter per second') wm = WindMover(wind) wm.prepare_for_model_run() for ix in range(2): curr_time = sec_to_date(date_to_sec(self.model_time) + self.time_step * ix) old_windages = np.copy(self.sc['windages']) wm.prepare_for_model_step(self.sc, self.time_step, curr_time) mask = self.sc['windage_persist'] == -1 assert np.all(self.sc['windages'][mask] == old_windages[mask]) mask = self.sc['windage_persist'] > 0 assert np.all(self.sc['windages'][mask] != old_windages[mask])
def prepare_for_model_step(self, model_time): """ Make sure we are up to date with the referenced time series """ model_time = date_to_sec(model_time) if self.ossm.check_time_in_range(model_time): return else: if self.wind.ossm.check_time_in_range(model_time): # there is wind data for this time so create # a new running average self.create_running_average_timeseries(self._past_hours_to_average, model_time) self.create_running_average_timeseries(self._past_hours_to_average, model_time)
def test_move_curv(self): """ test move for a curvilinear grid (first time in file) """ time = datetime.datetime(2006, 3, 31, 21) self.cm.model_time = date_to_sec(time) self.cm.uncertain = True self.gcm.text_read(testdata['GridWindMover']['wind_curv'], testdata['GridWindMover']['top_curv']) self.cm.ref[:]['long'] = -122.934656 # for NWS off CA self.cm.ref[:]['lat'] = 38.27594 #self.check_move() self.check_move_certain_uncertain(self.gcm.uncertain_time_delay) actual = np.empty((self.cm.num_le, ), dtype=world_point) actual[:]['lat'] = 0.0009890068148185598 actual[:]['long'] = 0.0012165959734995123 actual[:]['z'] = 0. tol = 1e-5 msg = '{0} move is not within a tolerance of {1}' np.testing.assert_allclose( self.cm.delta['lat'], actual['lat'], tol, tol, msg.format('WindSpeedDirSubset.nc', tol), 0, ) np.testing.assert_allclose( self.cm.delta['long'], actual['long'], tol, tol, msg.format('WindSpeedDirSubset.nc', tol), 0, ) #check that certain and uncertain are the same if uncertainty is time delayed #self.gcm.uncertain_time_delay = 3 self.gcm.uncertain_time_delay = 10800 # cython expects time_delay in seconds self.check_move_certain_uncertain(self.gcm.uncertain_time_delay) # np.testing.assert_equal(self.cm.delta, actual, # "test_move_curv() failed", 0) np.all(self.cm.delta['z'] == 0)