def test_date_time_range_mismatch(self): cache_dir = os.path.join(self.temp_dir, self.test_date_time_range_mismatch.__name__) # missing first two entries water_level_response = \ ('Date Time, Water Level, Sigma, O or I (for verified), F, R, L, Quality\n' '2000-10-30 12:12,1.003,0.003,0,0,0,0,v\n' '2000-10-30 12:18,1.004,0.004,0,0,0,0,v\n' '2000-10-30 12:24,1.005,0.005,0,0,0,0,v\n') # missing last two entries predictions_response = ('Date Time, Prediction\n' '2000-10-30 12:00,1.101\n' '2000-10-30 12:06,1.102\n' '2000-10-30 12:12,1.103\n') # monkey patch urllib to return mock data def mock_read_response(url): if 'product=water_level' in url: return water_level_response if 'product=predictions' in url: return predictions_response raise AssertionError self._monkey_patch_urlopen(mock_read_response) # should raise ValueError fetch_noaa_tide_data(self.station, self.begin_date, self.end_date, cache_dir=cache_dir)
def test_date_time_range_mismatch(self): cache_dir = os.path.join(self.temp_dir, self.test_date_time_range_mismatch.__name__) # missing first two entries water_level_response = \ ('Date Time, Water Level, Sigma, O, F, R, L, Quality\n' '2000-10-30 12:12,1.003,0.003,0,0,0,0,v\n' '2000-10-30 12:18,1.004,0.004,0,0,0,0,v\n' '2000-10-30 12:24,1.005,0.005,0,0,0,0,v\n') # missing last two entries predictions_response = ('Date Time, Prediction\n' '2000-10-30 12:00,1.101\n' '2000-10-30 12:06,1.102\n' '2000-10-30 12:12,1.103\n') # monkey patch urllib to return mock data def mock_read_response(url): if 'product=water_level' in url: return water_level_response if 'product=predictions' in url: return predictions_response raise AssertionError self._monkey_patch_urlopen(mock_read_response) # should raise ValueError fetch_noaa_tide_data(self.station, self.begin_date, self.end_date, cache_dir=cache_dir)
def test_api_error(self): cache_dir = os.path.join(self.temp_dir, self.test_api_error.__name__) # configure endpoint to return an error self._monkey_patch_urlopen(lambda url: 'Something went wrong') # should raise ValueError fetch_noaa_tide_data(self.station, self.begin_date, self.end_date, cache_dir=cache_dir)
def gauge_afteraxes(cd): axes = plt.gca() surgeplot.plot_landfall_gauge(cd.gaugesoln, axes) # fetch real data noaaArr = [ "8557380", "8639348", "8662245", "2695540", "8531680", "8510560" ] gaugeNumber = cd.gaugeno if (gaugeNumber < 7): # only looking at gauge 1-6 because rest of data not from NOAA Gauges realData = geoutil.fetch_noaa_tide_data( noaaArr[gaugeNumber - 1], datetime.datetime(2015, 9, 30, hour=12), datetime.datetime(2015, 10, 6, hour=6)) values = realData[1] - realData[2] # de-tide NOAA data times = [] for time in realData[0]: times.append( (time - numpy.datetime64("2015-10-02T12:00")).astype(float) / 1440) plt.plot(times, values, color="g", label="real") # Fix up plot - in particular fix time labels axes.set_title('Station %s' % cd.gaugeno) axes.set_xlabel('Days relative to landfall') axes.set_ylabel('Surface (m)') axes.set_xlim([-2, 3.75]) axes.set_ylim([0, 4]) axes.set_xticks([-2, -1, 0, 1, 2, 3]) axes.set_xticklabels( [r"$-2$", r"$-1$", r"$0$", r"$1$", r"$2$", r"$3$"]) axes.grid(True)
def _fetch_and_assert(self, date_time_expected, water_level_expected, prediction_expected, cache_dir): # fetch data date_time, water_level, prediction = fetch_noaa_tide_data( self.station, self.begin_date, self.end_date, cache_dir=cache_dir) # make sure data was correctly retrieved np.testing.assert_equal(date_time, date_time_expected) np.testing.assert_equal(water_level, water_level_expected) np.testing.assert_equal(prediction, prediction_expected)
def get_actual_water_levels(station_id): # Fetch water levels and tide predictions for given station date_time, water_level, tide = fetch_noaa_tide_data(station_id, begin_date, end_date) # Calculate times relative to landfall seconds_rel_landfall = (date_time - landfall_time) / np.timedelta64(1, 's') # Subtract tide predictions from measured water levels water_level -= tide return seconds_rel_landfall, water_level
def get_actual_water_levels(station_id): # Fetch water levels and tide predictions for given station date_time, water_level, tide = fetch_noaa_tide_data(station_id, begin_date, end_date) # Calculate times relative to landfall secs_rel_landfall = (date_time - landfall_time) / np.timedelta64(1, 's') # Subtract tide predictions from measured water levels water_level -= tide return secs_rel_landfall, water_level
def test_api_error(self): cache_dir = os.path.join(self.temp_dir, self.test_api_error.__name__) # configure endpoint to return an error self._monkey_patch_urlopen(lambda url: 'Something went wrong') # should return None d, w, p = fetch_noaa_tide_data(self.station, self.begin_date, self.end_date, cache_dir=cache_dir) assert d == None, '*** expected d == None'
def gauge_afteraxes(cd): axes = plt.gca() surgeplot.plot_landfall_gauge(cd.gaugesoln, axes) gauge_id = ['8770822', '8768094', '8764227', '8761305', '8760922'] gauge_title = [ 'Texas Point, Sabine Pass, TX', 'Calcasieu Pass, LA', 'LAWMA, Amerada Pass, LA', 'Shell Beach, LA', 'Pilots Station East, S.W. Pass, LA' ] if (cd.gaugeno < 6): realData = util.fetch_noaa_tide_data(gauge_id[cd.gaugeno - 1], datetime.datetime(2019, 7, 10, hour=12), datetime.datetime(2019, 7, 16, hour=12), datum='MLLW') values = realData[1] - realData[2] times = [] for time in realData[0]: times.append( (time - numpy.datetime64("2019-07-13T15:00")).astype(float) / 1440) plt.plot(times, values, color='orange', label='real') axes.set_title('Gauge %s: %s' % (cd.gaugeno, gauge_title[cd.gaugeno - 1])) # i for i in gauge_title axes.set_xlabel('Days relative to landfall') axes.set_ylabel('Surface (m)') axes.set_xlim([-2, 1]) axes.set_ylim([-1.0, 2.5]) axes.set_xticks([-2, -1, 0, 1]) axes.set_xticklabels([r"$-2$", r"$-1$", r"$0$", r"$1$"]) axes.grid(True)