def _prep_plot_data(grid_T_hr, timezone, tidal_predictions): results_t_start, results_t_end = nc_tools.timestamp(grid_T_hr, (0, -1)) ttide = shared.get_tides("Point Atkinson", tidal_predictions) ttide.time = ttide.time.dt.tz_convert(pytz.timezone(timezone)) plot_data = namedtuple("PlotData", "results_t_start, results_t_end, ttide") return plot_data(results_t_start.to(timezone), results_t_end.to(timezone), ttide)
def _prep_plot_data(grids_15m, tidal_predictions, weather_path): max_ssh, max_ssh_time, risk_levels = {}, {}, {} u_wind_4h_avg, v_wind_4h_avg, max_wind_avg = {}, {}, {} for name in places.TIDE_GAUGE_SITES: ssh_ts = nc_tools.ssh_timeseries_at_point(grids_15m[name], 0, 0, datetimes=True) ttide = shared.get_tides(name, tidal_predictions) max_ssh[name], max_ssh_time[name] = shared.find_ssh_max(name, ssh_ts, ttide) risk_levels[name] = stormtools.storm_surge_risk_level( name, max_ssh[name], ttide ) wind_avg = wind_tools.calc_wind_avg_at_point( arrow.get(max_ssh_time[name]), weather_path, places.PLACES[name]["wind grid ji"], avg_hrs=-4, ) u_wind_4h_avg[name], v_wind_4h_avg[name] = wind_avg max_wind_avg[name], _ = wind_tools.wind_speed_dir( u_wind_4h_avg[name], v_wind_4h_avg[name] ) plot_data = namedtuple( "PlotData", "ssh_ts, max_ssh, max_ssh_time, risk_levels, " "u_wind_4h_avg, v_wind_4h_avg, max_wind_avg", ) return plot_data( ssh_ts, max_ssh, max_ssh_time, risk_levels, u_wind_4h_avg, v_wind_4h_avg, max_wind_avg, )
def plot_wlev_residual_NOAA(t_orig, elements, figsize=(20, 6)): """Plots the water level residual as calculated by the function calculate_wlev_residual_NOAA and has the option to also plot the observed water levels and predicted tides over the course of one day. :arg t_orig: The beginning of the date range of interest. :type t_orig: datetime object :arg elements: Elements included in figure. 'residual' for residual only and 'all' for residual, observed water level, and predicted tides. :type elements: string :arg figsize: Figure size (width, height) in inches. :type figsize: 2-tuple :returns: fig """ tides = shared.get_tides("Neah Bay", path=paths["tides"]) residual, obs = obs_residual_ssh_NOAA("Neah Bay", tides, t_orig, t_orig) # Figure fig, ax = plt.subplots(1, 1, figsize=figsize) # Plot ax.plot( obs.time, residual, colours["residual"], label="Observed Residual", linewidth=2.5, ) if elements == "all": ax.plot( obs.time, obs.wlev, colours["observed"], label="Observed Water Level", lw=2.5, ) ax.plot( tides.time, tides.pred[tides.time == obs.time], colours["predicted"], label="Tidal Predictions", linewidth=2.5, ) if elements == "residual": pass ax.set_title("Residual of the observed water levels at" " Neah Bay: {t:%d-%b-%Y}".format(t=t_orig)) ax.set_ylim([-3.0, 3.0]) ax.set_xlabel("[hrs]") hfmt = mdates.DateFormatter("%m/%d %H:%M") ax.xaxis.set_major_formatter(hfmt) ax.legend(loc=2, ncol=3) ax.grid() return fig
def _prep_plot_data( place, grid_T_hr, grids_15m, bathy, timezone, weather_path, tidal_predictions, ): ssh_hr = grid_T_hr.variables['sossheig'] time_ssh_hr = nc_tools.timestamp( grid_T_hr, range(grid_T_hr.variables['time_counter'].size)) try: j, i = places.PLACES[place]['NEMO grid ji'] except KeyError as e: raise KeyError(f'place name or info key not found in ' f'salishsea_tools.places.PLACES: {e}') itime_max_ssh = np.argmax(ssh_hr[:, j, i]) time_max_ssh_hr = time_ssh_hr[itime_max_ssh] ssh_15m_ts = nc_tools.ssh_timeseries_at_point(grids_15m[place], 0, 0, datetimes=True) ttide = shared.get_tides(place, tidal_predictions) ssh_corr = shared.correct_model_ssh(ssh_15m_ts.ssh, ssh_15m_ts.time, ttide) max_ssh_15m, time_max_ssh_15m = shared.find_ssh_max( place, ssh_15m_ts, ttide) tides_15m = shared.interp_to_model_time(ssh_15m_ts.time, ttide.pred_all, ttide.time) residual = ssh_corr - tides_15m max_ssh_residual = residual[ssh_15m_ts.time == time_max_ssh_15m][0] wind_4h_avg = wind_tools.calc_wind_avg_at_point( arrow.get(time_max_ssh_15m), weather_path, places.PLACES[place]['wind grid ji'], avg_hrs=-4) wind_4h_avg = wind_tools.wind_speed_dir(*wind_4h_avg) plot_data = namedtuple( 'PlotData', 'ssh_max_field, time_max_ssh_hr, ssh_15m_ts, ssh_corr, ' 'max_ssh_15m, time_max_ssh_15m, residual, max_ssh_residual, ' 'wind_4h_avg, ' 'ttide, bathy') return plot_data( ssh_max_field=ssh_hr[itime_max_ssh], time_max_ssh_hr=time_max_ssh_hr.to(timezone), ssh_15m_ts=ssh_15m_ts, ssh_corr=ssh_corr, max_ssh_15m=max_ssh_15m - places.PLACES[place]['mean sea lvl'], time_max_ssh_15m=arrow.get(time_max_ssh_15m).to(timezone), residual=residual, max_ssh_residual=max_ssh_residual, wind_4h_avg=wind_4h_avg, ttide=ttide, bathy=bathy, )
def plot_residual_forcing(ax, runs_list, t_orig): """Plots the observed water level residual at Neah Bay against forced residuals from existing ssh*.txt files for Neah Bay. Function may produce none, any, or all (nowcast, forecast, forecast 2) forced residuals depending on availability for specified date (runs_list). :arg ax: The axis where the residuals are plotted. :type ax: axis object :arg runs_list: Runs that are verified as complete. :type runs_list: list :arg t_orig: Date being considered. :type t_orig: datetime object """ # truncation times sdt = t_orig.replace(tzinfo=tz.tzutc()) edt = sdt + datetime.timedelta(days=1) # retrieve observations, tides and residual tides = shared.get_tides("Neah Bay", path=paths["tides"]) res_obs, obs = obs_residual_ssh_NOAA("Neah Bay", tides, sdt, sdt) # truncate and plot res_obs_trun, time_trun = analyze.truncate_data(np.array(res_obs), np.array(obs.time), sdt, edt) ax.plot(time_trun, res_obs_trun, colours["observed"], label="observed", lw=2.5) # plot forcing for each simulation for mode in runs_list: filename_NB, run_date = analyze.create_path(mode, t_orig, "ssh*.txt") if filename_NB: dates, surge, fflag = NeahBay_forcing_anom(filename_NB, run_date, paths["tides"]) surge_t, dates_t = analyze.truncate_data(np.array(surge), np.array(dates), sdt, edt) ax.plot(dates_t, surge_t, label=mode, lw=2.5, color=colours[mode]) ax.set_title("Comparison of observed and forced sea surface" " height residuals at Neah Bay:" "{t_forcing:%d-%b-%Y}".format(t_forcing=t_orig))
def get_error_forcing(runs_list, t_orig): """Sets up the calculation for the forcing residual error. :arg runs_list: Runs that have been verified as complete. :type runs_list: list :arg t_orig: Date being considered. :type t_orig: datetime object :returns: error_frc_dict, t_frc_dict """ # truncation times sdt = t_orig.replace(tzinfo=tz.tzutc()) edt = sdt + datetime.timedelta(days=1) # retrieve observed residual tides = shared.get_tides("Neah Bay", path=paths["tides"]) res_obs, obs = obs_residual_ssh_NOAA("Neah Bay", tides, sdt, sdt) res_obs_trun, time_trun = analyze.truncate_data(np.array(res_obs), np.array(obs.time), sdt, edt) # calculate forcing error error_frc_dict = {} t_frc_dict = {} for mode in runs_list: filename_NB, run_date = analyze.create_path(mode, t_orig, "ssh*.txt") if filename_NB: dates, surge, fflag = NeahBay_forcing_anom(filename_NB, run_date, paths["tides"]) surge_t, dates_t = analyze.truncate_data(np.array(surge), np.array(dates), sdt, edt) error_frc = analyze.calculate_error(surge_t, dates_t, res_obs_trun, obs.time) error_frc_dict[mode] = error_frc t_frc_dict[mode] = dates_t return error_frc_dict, t_frc_dict
def compare_errors(name, mode, start, end, grid_B, figsize=(20, 12)): """compares the model and forcing error at a station between dates start and end for a simulation mode.""" # array of dates for iteration numdays = (end - start).days dates = [ start + datetime.timedelta(days=num) for num in range(0, numdays + 1) ] dates.sort() # intiialize figure and arrays fig, axs = plt.subplots(3, 1, figsize=figsize) force, model, time, daily_time = combine_errors(name, mode, dates, grid_B) ttide = shared.get_tides(name, path=paths["tides"]) # Plotting time series ax = axs[0] ax.plot(time, force["error"], "b", label="Forcing error", lw=2) ax.plot(time, model["error"], "g", lw=2, label="Model error") ax.set_title("Comparison of {mode} error at" " {name}".format(mode=mode, name=name)) ax.set_ylim([-0.4, 0.4]) hfmt = mdates.DateFormatter("%m/%d %H:%M") # Plotting daily mean ax = axs[1] ax.plot(daily_time, force["daily"], "b", label="Forcing daily mean error", lw=2) ax.plot( [time[0], time[-1]], [np.nanmean(force["error"]), np.nanmean(force["error"])], "--b", label="Mean forcing error", lw=2, ) ax.plot(daily_time, model["daily"], "g", lw=2, label="Model daily mean error") ax.plot( [time[0], time[-1]], [np.nanmean(model["error"]), np.nanmean(model["error"])], "--g", label="Mean model error", lw=2, ) ax.set_title("Comparison of {mode} daily mean error at" " {name}".format(mode=mode, name=name)) ax.set_ylim([-0.4, 0.4]) # Plot tides ax = axs[2] ax.plot(ttide.time, ttide.pred_all, "k", lw=2, label="tides") ax.set_title("Tidal predictions") ax.set_ylim([-3, 3]) # format axes hfmt = mdates.DateFormatter("%m/%d %H:%M") for ax in axs: ax.xaxis.set_major_formatter(hfmt) ax.legend(loc=2, ncol=4) ax.grid() ax.set_xlim([start, end + datetime.timedelta(days=1)]) ax.set_ylabel("[m]") return fig
def get_error_model(names, runs_list, grid_B, t_orig): """Sets up the calculation for the model residual error. :arg names: Names of station. :type names: list of strings :arg runs_list: Runs that have been verified as complete. :type runs_list: list :arg grid_B: Bathymetry dataset for the Salish Sea NEMO model. :type grid_B: :class:`netCDF4.Dataset` :arg t_orig: Date being considered. :type t_orig: datetime object :returns: error_mod_dict, t_mod_dict, t_orig_dict """ bathy, X, Y = tidetools.get_bathy_data(grid_B) t_orig_obs = t_orig + datetime.timedelta(days=-1) t_final_obs = t_orig + datetime.timedelta(days=1) # truncation times sdt = t_orig.replace(tzinfo=tz.tzutc()) edt = sdt + datetime.timedelta(days=1) error_mod_dict = {} t_mod_dict = {} for name in names: error_mod_dict[name] = {} t_mod_dict[name] = {} # Look up model grid lat = SITES[name]["lat"] lon = SITES[name]["lon"] j, i = geo_tools.find_closest_model_point(lon, lat, X, Y, land_mask=bathy.mask) # Observed residuals and wlevs and tides ttide = shared.get_tides(name, path=paths["tides"]) res_obs, wlev_meas = obs_residual_ssh(name, ttide, t_orig_obs, t_final_obs) res_obs_trun, time_obs_trun = analyze.truncate_data( np.array(res_obs), np.array(wlev_meas.time), sdt, edt) for mode in runs_list: filename, run_date = analyze.create_path( mode, t_orig, "SalishSea_1h_*_grid_T.nc") grid_T = nc.Dataset(filename) res_mod, t_model, ssh_corr, ssh_mod = model_residual_ssh( grid_T, j, i, ttide) # Truncate res_mod_trun, t_mod_trun = analyze.truncate_data( res_mod, t_model, sdt, edt) # Error error_mod = analyze.calculate_error(res_mod_trun, t_mod_trun, res_obs_trun, time_obs_trun) error_mod_dict[name][mode] = error_mod t_mod_dict[name][mode] = t_mod_trun return error_mod_dict, t_mod_dict
def plot_residual_model(axs, names, runs_list, grid_B, t_orig): """Plots the observed sea surface height residual against the sea surface height model residual (calculate_residual) at specified stations. Function may produce none, any, or all (nowcast, forecast, forecast 2) model residuals depending on availability for specified date (runs_list). :arg ax: The axis where the residuals are plotted. :type ax: list of axes :arg names: Names of station. :type names: list of names :arg runs_list: Runs that have been verified as complete. :type runs_list: list :arg grid_B: Bathymetry dataset for the Salish Sea NEMO model. :type grid_B: :class:`netCDF4.Dataset` :arg t_orig: Date being considered. :type t_orig: datetime object """ bathy, X, Y = tidetools.get_bathy_data(grid_B) t_orig_obs = t_orig + datetime.timedelta(days=-1) t_final_obs = t_orig + datetime.timedelta(days=1) # truncation times sdt = t_orig.replace(tzinfo=tz.tzutc()) edt = sdt + datetime.timedelta(days=1) for ax, name in zip(axs, names): # Identify model grid point lat = SITES[name]["lat"] lon = SITES[name]["lon"] j, i = geo_tools.find_closest_model_point(lon, lat, X, Y, land_mask=bathy.mask) # Observed residuals and wlevs and tides ttide = shared.get_tides(name, path=paths["tides"]) res_obs, wlev_meas = obs_residual_ssh(name, ttide, t_orig_obs, t_final_obs) # truncate and plot res_obs_trun, time_obs_trun = analyze.truncate_data( np.array(res_obs), np.array(wlev_meas.time), sdt, edt) ax.plot(time_obs_trun, res_obs_trun, c=colours["observed"], lw=2.5, label="observed") for mode in runs_list: filename, run_date = analyze.create_path( mode, t_orig, "SalishSea_1h_*_grid_T.nc") grid_T = nc.Dataset(filename) res_mod, t_model, ssh_corr, ssh_mod = model_residual_ssh( grid_T, j, i, ttide) # truncate and plot res_mod_trun, t_mod_trun = analyze.truncate_data( res_mod, t_model, sdt, edt) ax.plot(t_mod_trun, res_mod_trun, label=mode, c=colours[mode], lw=2.5) ax.set_title("Comparison of modelled sea surface height residuals at" " {station}: {t:%d-%b-%Y}".format(station=name, t=t_orig))
def _prep_plot_data( place, ssh_fcst_dataset_url_tmpl, tidal_predictions, forecast_hrs, weather_path, bathy, grid_T_hr_path, ): # NEMO sea surface height forecast dataset ssh_forecast = _get_ssh_forecast(place, ssh_fcst_dataset_url_tmpl) # CHS water level observations dataset try: obs_1min = (data_tools.get_chs_tides( "obs", place, arrow.get(str(ssh_forecast.time.values[0])) - timedelta(seconds=5 * 60), arrow.get(str(ssh_forecast.time.values[-1])), ).to_xarray().rename({"index": "time"})) obs_10min_avg = obs_1min.resample(time="10min", loffset="5min").mean() obs = obs_10min_avg.to_dataset(name="water_level") except (AttributeError, KeyError): # No observations available obs = None shared.localize_time(ssh_forecast) try: shared.localize_time(obs) except (IndexError, AttributeError): # No observations available obs = None model_ssh_period = slice(str(ssh_forecast.time.values[0]), str(ssh_forecast.time.values[-1])) forecast_period = slice( str(ssh_forecast.time.values[-forecast_hrs * 6]), str(ssh_forecast.time.values[-1]), ) try: obs_period = slice(str(obs.time.values[0]), str(obs.time.values[-1])) except AttributeError: # No observations available obs_period = None # Predicted tide water levels dataset from ttide ttide = shared.get_tides(place, tidal_predictions) ttide.rename(columns={" pred_noshallow ": "pred_noshallow"}, inplace=True) ttide.index = pandas.to_datetime(ttide.time.values, format="%Y-%m-%d %H:%M:%S") ttide_ds = ttide.to_xarray().drop_vars(["time"]).rename({"index": "time"}) # Localize ttide dataset timezone to ssh_forecast times because ttide # extends well beyond ends of ssh_forecast period shared.localize_time(ttide_ds, local_datetime=arrow.get( str(ssh_forecast.time.values[0])).to("local")) # NEMO sea surface height dataset corrected to include unmodeled tide constituents ssh_correction = ttide_ds.pred_noshallow.sel( time=model_ssh_period) - ttide_ds.pred_8.sel(time=model_ssh_period) ssh_corrected = ssh_forecast + ssh_correction # Mean sea level and extreme water levels msl = PLACES[place]["mean sea lvl"] extreme_ssh = PLACES[place]["hist max sea lvl"] max_tides = ttide.pred_all.max() + msl mid_tides = 0.5 * (extreme_ssh - max_tides) + max_tides thresholds = (max_tides, mid_tides, extreme_ssh) max_ssh = ssh_corrected.ssh.sel(time=forecast_period) max_ssh = max_ssh.where(max_ssh == max_ssh.max(), drop=True).squeeze() # Residual differences between corrected model and observations and predicted tides model_residual = ssh_corrected - ttide_ds.pred_all.sel( time=model_ssh_period) model_residual.attrs["tz_name"] = ssh_forecast.attrs["tz_name"] max_model_residual = model_residual.max() try: obs_residual = obs - ttide_ds.pred_all.sel(time=obs_period) - msl obs_residual.attrs["tz_name"] = obs.attrs["tz_name"] except KeyError: # No observations available obs_residual = None # Wind at NEmo model time of max sea surface height wind_4h_avg = wind_tools.calc_wind_avg_at_point( arrow.get(str(max_ssh.time.values)), weather_path, PLACES[place]["wind grid ji"], avg_hrs=-4, ) wind_4h_avg = wind_tools.wind_speed_dir(*wind_4h_avg) # Model sea surface height field for contour map tracers_ds = xarray.open_dataset(grid_T_hr_path) max_ssh_time_utc = (arrow.get(str(max_ssh.time.values)).replace( tzinfo=ssh_forecast.attrs["tz_name"]).to("utc")) return SimpleNamespace( ssh_forecast=ssh_forecast, obs=obs, ttide=ttide_ds, ssh_corrected=ssh_corrected, msl=msl, thresholds=thresholds, max_ssh=max_ssh, model_residual=model_residual, max_model_residual=max_model_residual, obs_residual=obs_residual, wind_4h_avg=wind_4h_avg, bathy=bathy, max_ssh_field=tracers_ds.sossheig.sel( time_counter=max_ssh_time_utc.naive, method="nearest"), )