def compare_database_precip(): ''' Compare 'gold' and 'test' precip. ''' value = 'precip_z' result = True gold = collect(gold_cnx, plotorder, basins, start_date, end_date, value, run_name_gold, edges, 'sum') test = collect(test_cnx, plotorder_test, basins_test, start_date, end_date, value, run_name_test, edges, 'sum') test_result = gold[plotorder].values - test[plotorder_test].values if np.sum(test_result) != 0: result = False gold = collect(gold_cnx, plotorder, basins, start_date, end_date, value, run_name_gold, 'total', 'sum') test = collect(test_cnx, plotorder_test, basins_test, start_date, end_date, value, run_name_test, 'total', 'sum') test_result = gold[plotorder].values - test[plotorder_test].values if np.sum(test_result) != 0: result = False return result
def compare_database_depth(): ''' Compare 'gold' and 'test' depth. ''' value = 'depth' result = True gold = collect(gold_cnx, plotorder, basins, start_date, end_date, value, run_name_gold, edges, 'end') test = collect(test_cnx, plotorder_test, basins_test, start_date, end_date, value, run_name_test, edges, 'end') test_result = gold[plotorder].values - test[plotorder_test].values if np.sum(test_result) != 0: result = False print('\nFailed depth\ngold:\n{}\ntest:\n{}\n'.format(gold, test)) gold = collect(gold_cnx, plotorder, basins, start_date, end_date, value, run_name_gold, 'total', 'end') test = collect(test_cnx, plotorder_test, basins_test, start_date, end_date, value, run_name_test, 'total', 'end') test_result = gold[plotorder].values - test[plotorder_test].values if np.sum(test_result) != 0: result = False print('\nFailed depth total\ngold:\n{}\ntest:\n{}\n'.format( gold, test)) return result
def compare_database_unavail(): ''' Compare 'gold' and 'test' swe_unavail. ''' value = 'swe_unavail' result = True gold = collect(gold_cnx, plotorder, basins, start_date, end_date, value, run_name_gold, edges, 'end') test = collect(test_cnx, plotorder_test, basins_test, start_date, end_date, value, run_name_test, edges, 'end') test_result = gold[plotorder].values - test[plotorder_test].values if np.sum(test_result) != 0: result = False return result
def check_gold_results(): ''' Check database 'gold' values ''' value = 'swe_vol' gold_values = [3.075, 13.63, 9.543, 0.494] gold = collect(gold_cnx, plotorder, basins, start_date, end_date, value, run_name_gold, edges, 'end') result = True for ix, edge in enumerate(edges): if (gold.iloc[ix, 0] - gold_values[ix]) != 0.0: result = False return result
def figures(cfg, process, db): """ Set up and call snowav figures. See CoreConfig.ini and README.md for more on config options and use. swe_volume() must be called before cold_content() if you want to use the same ylims for each. Args ------ cfg: config class process: process class db: database class """ args = { 'report_start': cfg.report_start.date().strftime("%Y-%-m-%-d"), 'report_date': cfg.report_date.date().strftime("%Y-%-m-%-d"), 'run_name': cfg.run_name, 'start_date': cfg.start_date, 'end_date': cfg.end_date, 'directory': cfg.directory, 'figs_path': cfg.figs_path, 'edges': cfg.edges, 'plotorder': cfg.plotorder, 'labels': cfg.labels, 'lims': plotlims(cfg.plotorder), 'masks': cfg.masks, 'figsize': cfg.figsize, 'dpi': cfg.dpi, 'depthlbl': cfg.depthlbl, 'vollbl': cfg.vollbl, 'elevlbl': cfg.elevlbl, 'dplcs': cfg.dplcs, 'barcolors': cfg.barcolors, 'xlims': cfg.xlims, 'depth_clip': cfg.depth_clip, 'percent_min': cfg.clims_percent[0], 'percent_max': cfg.clims_percent[1], 'basins': cfg.basins, 'wy': cfg.wy, 'flag': False, 'flt_flag': cfg.flt_flag } if cfg.flt_flag: args['flight_dates'] = cfg.flight_diff_dates fig_names = {} connector = cfg.connector edges_str = [str(x) for x in list(cfg.edges)] # + ['total'] ########################################################################## # For each figure, collect 2D array image, by-elevation # # DataFrame, and set any figure-specific args inputs # ########################################################################## if cfg.flt_flag: names, df = flt_image_change(cfg.update_file, cfg.update_numbers, cfg.end_date, cfg.flight_outputs, cfg.pre_flight_outputs, cfg.masks, cfg.flt_image_change_clims, cfg.barcolors, cfg.edges, cfg.connector, cfg.plotorder, cfg.wy, cfg.depth_factor, cfg.basins, cfg.run_name, cfg.figsize, cfg.depthlbl, cfg.elevlbl, cfg.vollbl, cfg.dplcs, cfg.figs_path, dpi=cfg.dpi, logger=cfg._logger) if len(names) == 0: cfg.flt_flag = False else: cfg.assign_vars({'flight_diff_fig_names': names}) cfg.assign_vars({'flight_delta_vol_df': df}) if cfg.swi_flag: image = np.zeros_like(cfg.outputs['swi_z'][0]) for n in range(cfg.ixs, cfg.ixe): image = image + cfg.outputs['swi_z'][n] * cfg.depth_factor params = { Results: [('date_time', 'ge', cfg.start_date), ('date_time', 'le', cfg.end_date), ('variable', 'eq', 'swi_vol'), ('run_id', 'eq', cfg.run_id), ('value', 'ge', 0), ('elevation', 'gt', '0')], } df = db.query(params) df = df.groupby('elevation').sum() df.rename(columns={'value': cfg.plotorder[0]}, inplace=True) df = df[cfg.plotorder].reindex(edges_str) title = 'Accumulated SWI\n{} to {}'.format(args['report_start'], args['report_date']) swi(cfg.masks, image, df, cfg.plotorder, plotlims(cfg.plotorder), cfg.edges, cfg.labels, cfg.barcolors, cfg.vollbl, cfg.elevlbl, cfg.depthlbl, cfg.clims_percent, title, cfg.figsize, cfg.figs_path, cfg.swi_volume_fig_name, cfg.dplcs, cfg.xlims, dpi=200, logger=None) if cfg.image_change_flag: image = (cfg.outputs['swe_z'][cfg.ixe] - cfg.outputs['swe_z'][cfg.ixs]) * cfg.depth_factor start = collect(connector, args['plotorder'], args['basins'], args['start_date'], args['start_date'], 'swe_vol', args['run_name'], args['edges'], 'end') end = collect(connector, args['plotorder'], args['basins'], args['start_date'], args['end_date'], 'swe_vol', args['run_name'], args['edges'], 'end') df = end - start title = 'Change in SWE Depth\n{} to {}'.format(args['report_start'], args['report_date']) image_change(cfg.masks, image, df, cfg.plotorder, plotlims(cfg.plotorder), cfg.edges, cfg.labels, cfg.barcolors, cfg.clims_percent, cfg.vollbl, cfg.elevlbl, cfg.depthlbl, cfg.xlims, cfg.figsize, title, cfg.dplcs, cfg.figs_path, cfg.volume_change_fig_name, dpi=cfg.dpi, logger=cfg._logger) if cfg.swe_volume_flag: df = collect(connector, cfg.plotorder, cfg.basins, cfg.start_date, cfg.end_date, 'swe_vol', cfg.run_name, cfg.edges, 'end') image = cfg.outputs['swe_z'][cfg.ixe] * cfg.depth_factor args['image'] = image title = 'SWE {}'.format(args['report_date']) swe_ylims = swe_volume(cfg.masks, image, df, cfg.plotorder, plotlims(cfg.plotorder), cfg.edges, cfg.labels, cfg.barcolors, cfg.clims_percent, title, cfg.depthlbl, cfg.vollbl, cfg.elevlbl, cfg.xlims, cfg.dplcs, cfg.figs_path, cfg.swe_volume_fig_name, cfg.figsize, dpi=cfg.dpi, logger=cfg._logger) else: swe_ylims = None if cfg.cold_content_flag: swe = cfg.outputs['swe_z'][cfg.ixe] image = cfg.outputs['coldcont'][cfg.ixe] * 0.000001 title = 'Cold Content {}'.format(args['report_date']) df = collect(connector, args['plotorder'], args['basins'], args['start_date'], args['end_date'], 'swe_unavail', args['run_name'], args['edges'], 'end') cold_content(cfg.masks, swe, image, df, cfg.plotorder, plotlims(cfg.plotorder), cfg.edges, cfg.labels, cfg.barcolors, title, cfg.vollbl, cfg.elevlbl, cfg.figsize, cfg.dplcs, cfg.xlims, cfg.figs_path, cfg.cold_content_fig_name, ylims=swe_ylims, dpi=cfg.dpi, logger=cfg._logger) if cfg.density_flag: image = cfg.outputs['density'][cfg.ixe] title = 'Density {}'.format(args['report_date']) density(cfg.masks, image, process.density, cfg.plotorder, plotlims(cfg.plotorder), cfg.edges, cfg.barcolors, cfg.clims_percent, title, cfg.xlims, cfg.figsize, cfg.figs_path, cfg.density_fig_name, dpi=cfg.dpi, logger=cfg._logger) if cfg.basin_total_flag: wy_start = datetime(cfg.wy - 1, 10, 1) swi_summary = collect(connector, cfg.plotorder, cfg.basins, wy_start, cfg.end_date, 'swi_vol', cfg.run_name, 'total', 'daily') df_swe = collect(connector, cfg.plotorder, cfg.basins, wy_start, cfg.end_date, 'swe_vol', cfg.run_name, 'total', 'daily') df_swi = swi_summary.cumsum() basin_total(cfg.plotorder, cfg.labels, cfg.end_date, cfg.barcolors, df_swi, df_swe, cfg.wy, cfg.vollbl, cfg.figsize, cfg.figs_path, cfg.basin_total_fig_name, dpi=cfg.dpi, flight_dates=cfg.flight_diff_dates, logger=cfg._logger, flight_flag=cfg.flt_flag) if cfg.precip_depth_flag: swi_image = np.zeros_like(cfg.outputs['swi_z'][0]) for n in range(cfg.ixs, cfg.ixe): swi_image = swi_image + cfg.outputs['swi_z'][n] * cfg.depth_factor swi_df = collect(connector, args['plotorder'], args['basins'], args['start_date'], args['end_date'], 'swi_z', args['run_name'], args['edges'], 'sum') precip_df = collect(connector, args['plotorder'], args['basins'], args['start_date'], args['end_date'], 'precip_z', args['run_name'], args['edges'], 'sum') rain_df = collect(connector, args['plotorder'], args['basins'], args['start_date'], args['end_date'], 'rain_z', args['run_name'], args['edges'], 'sum') precip_image = process.precip_total * cfg.depth_factor rain_image = process.rain_total * cfg.depth_factor title = 'Depth of SWI, Precipitation, and Rain\n{} to {}'.format( args['report_start'], args['report_date']) precip_depth(swi_image, precip_image, rain_image, swi_df, precip_df, rain_df, cfg.plotorder, cfg.barcolors, plotlims(cfg.plotorder), cfg.masks, cfg.edges, cfg.labels, cfg.clims_percent, cfg.depthlbl, cfg.elevlbl, title, cfg.xlims, cfg.figs_path, cfg.precip_fig_name, logger=cfg._logger) if cfg.diagnostics_flag: wy_start = datetime(cfg.wy - 1, 10, 1) precip = collect(connector, args['plotorder'], args['basins'], wy_start, args['end_date'], 'precip_z', args['run_name'], args['edges'], 'daily') precip_per = collect(connector, args['plotorder'], args['basins'], args['start_date'], args['end_date'], 'precip_z', args['run_name'], args['edges'], 'daily') swe = collect(connector, args['plotorder'], args['basins'], wy_start, args['end_date'], 'swe_z', args['run_name'], args['edges'], 'daily') swe_per = collect(connector, args['plotorder'], args['basins'], args['start_date'], args['end_date'], 'swe_z', args['run_name'], args['edges'], 'daily') rho = collect(connector, args['plotorder'], args['basins'], wy_start, args['end_date'], 'density', args['run_name'], args['edges'], 'daily') rho_per = collect(connector, args['plotorder'], args['basins'], args['start_date'], args['end_date'], 'density', args['run_name'], args['edges'], 'daily') snow_line = collect(connector, args['plotorder'], args['basins'], wy_start, args['end_date'], 'snow_line', args['run_name'], args['edges'], 'daily') snow_line_per = collect(connector, args['plotorder'], args['basins'], args['start_date'], args['end_date'], 'snow_line', args['run_name'], args['edges'], 'daily') evap_z = collect(connector, args['plotorder'], args['basins'], wy_start, args['end_date'], 'evap_z', args['run_name'], args['edges'], 'daily') evap_z_per = collect(connector, args['plotorder'], args['basins'], args['start_date'], args['end_date'], 'evap_z', args['run_name'], args['edges'], 'daily') snow_line_per = snow_line_per.fillna(0) first_row = snow_line_per.iloc[[0]].values[0] snow_line_per = snow_line_per.apply(lambda row: row - first_row, axis=1) args['snow_line'] = snow_line args['snow_line_per'] = snow_line_per swe = swe.fillna(0) swe_per = swe_per.fillna(0) first_row = swe_per.iloc[[0]].values[0] swe_per = swe_per.apply(lambda row: row - first_row, axis=1) evap_z = evap_z.fillna(0) evap_z_per = evap_z_per.fillna(0) first_row = evap_z_per.iloc[[0]].values[0] evap_z_per = evap_z_per.apply(lambda row: row - first_row, axis=1) rho = rho.fillna(0) rho_per = rho_per.fillna(0) first_row = rho_per.iloc[[0]].values[0] rho_per = rho_per.apply(lambda row: row - first_row, axis=1) precip = precip.fillna(0) precip_per = precip_per.fillna(0) precip = precip.cumsum() precip_per = precip_per.cumsum() first_row = precip_per.iloc[[0]].values[0] precip_per = precip_per.apply(lambda row: row - first_row, axis=1) if cfg.diag_basins is None: args['dbasins'] = copy.deepcopy(cfg.plotorder) else: args['dbasins'] = cfg.diag_basins args['precip'] = precip args['precip_per'] = precip_per args['swe'] = swe args['swe_per'] = swe_per args['evap_z'] = evap_z args['evap_z_per'] = evap_z_per args['density'] = rho args['density_per'] = rho_per args['elevlbl'] = cfg.elevlbl diagnostics(args, cfg._logger) if cfg.stn_validate_flag: px = (1, 1, 1, 0, 0, 0, -1, -1, -1) py = (1, 0, -1, 1, 0, -1, 1, 0, -1) login = { 'user': cfg.wxdb_user, 'password': cfg.wxdb_password, 'host': cfg.wxdb_host, 'port': cfg.wxdb_port } flag = stn_validate(cfg.all_dirs, cfg.val_lbls, cfg.val_client, args['end_date'], args['wy'], cfg.snow_x, cfg.snow_y, cfg.val_stns, px, py, login, args['figs_path'], cfg.stn_validate_fig_name, cfg.dem, logger=cfg._logger, elevlbl=cfg.elevlbl, nash_sut_flag=cfg.nash_sut_flag) if not flag: cfg.stn_validate_flag = False else: # assign fig name to cfg for use in report.py cfg.assign_vars({'stn_validate_fig_name': ''}) if cfg.point_values: cfg._logger.debug(" Beginning point values processing for " "{}".format(cfg.point_values_csv)) flag = True xy = (cfg.snow_x, cfg.snow_y) headings = ['name', 'latitude', 'longitude', cfg.point_values_heading] end_date_str = cfg.end_date.date().strftime("%Y-%m-%d") course_date = cfg.point_values_date.date().strftime('%Y-%m-%d') basin_name = cfg.plotorder[0].split(" ")[0].lower() pv_date = cfg.point_values_date nsubplots = (cfg.point_values_settings[3] * cfg.point_values_settings[4] - 1) - 1 while flag: df = pd.read_csv(cfg.point_values_csv) for head in headings: if head not in df.columns.tolist(): cfg._logger.warn(' Required csv column "{}" not found, ' 'exiting point values'.format(head)) if head == cfg.point_values_heading: cfg._logger.warning(' User specified [validate] ' 'point_values_heading: {} not ' 'found'.format(head)) flag = False if pv_date is None: cfg._logger.info(' Value in [validate] point_values_date ' 'being assigned to {}'.format(end_date_str)) pv_date = cfg.end_date if pv_date < cfg.start_date or pv_date > cfg.end_date: cfg._logger.info(' Value in [validate] point_values_date ' 'outside of range in [run] start_date - ' 'end_date, point_values_date being assigned ' 'to: {}'.format(end_date_str)) idx = -1 else: x = np.abs([date - pv_date for date in cfg.outputs['dates']]) idx = x.argmin(0) model_date = cfg.outputs['dates'][idx].date().strftime('%Y-%m-%d') for value in cfg.point_values_properties: filename = '{}_pixel_{}_{}.csv'.format(basin_name, value, end_date_str) csv_name = os.path.abspath( os.path.join(cfg.figs_path, filename)) if len(df.name.unique()) > nsubplots: cfg._logger.warn(' Number of subplots in ' 'point_values() may not fit well with ' 'given settings, consider changing ' 'nrows and/or ncols in [validate] ' 'point_values_settings') flag = False if value == 'swe_z': factor = cfg.depth_factor elif value == 'depth': factor = 39.37 else: factor = 1 array = cfg.outputs[value][idx] * factor df_res = point_values_csv(array, value, df, xy, csv_name, model_date, cfg.plotorder[0], cfg.point_values_heading, cfg._logger) if cfg.point_values_flag: head = cfg.point_values_heading if head in df.columns.tolist(): point_values_figures(array, value, df_res, cfg.dem, cfg.figs_path, cfg.veg_type, model_date, course_date, cfg.point_values_settings, cfg.pixel, head, cfg._logger) else: cfg._logger.warn(' [validate] point_values_heading: ' '{} not in csv, skipping figures ' ''.format(cfg.point_values_heading)) cfg.point_values_flag = False # if everything is successful, set to False at the end flag = False if cfg.compare_runs_flag: args['variables'] = ['swe_vol', 'swi_vol'] if cfg.flt_flag: args['flag'] = True else: args['flag'] = False dict = {} for var in args['variables']: dict[var] = {} for wy, run in zip(cfg.compare_run_wys, cfg.compare_run_names): wy_start = datetime(wy - 1, 10, 1) df = collect(connector, args['plotorder'][0], args['basins'], wy_start, args['end_date'], var, run, 'total', 'daily') if wy != cfg.wy: adj = cfg.wy - wy df.index = df.index + timedelta(days=365 * adj) if var == 'swi_vol': df = df.cumsum() dict[var][run] = df args['dict'] = dict compare_runs(args, cfg._logger) if cfg.inflow_flag: wy_start = datetime(cfg.wy - 1, 10, 1) swi_summary = collect(connector, args['plotorder'], args['basins'], wy_start, args['end_date'], 'swi_vol', args['run_name'], 'total', 'daily') df_swi = swi_summary.cumsum() args['swi_summary'] = df_swi if cfg.inflow_data is None: raw = pd.read_csv(cfg.summary_csv, skiprows=1, parse_dates=[0], index_col=0) args['inflow_summary'] = raw[cfg.basin_headings] else: args['inflow_summary'] = pd.read_csv(cfg.summary_csv, parse_dates=[0], index_col=0) args['inflow_headings'] = cfg.inflow_headings args['basin_headings'] = cfg.basin_headings inflow(args, cfg._logger) if cfg.write_properties is not None: write_properties(args['end_date'], cfg.connector, args['plotorder'], args['basins'], datetime(cfg.wy - 1, 10, 1), args['run_name'], args['figs_path'], cfg.write_properties, vollbl=args['vollbl'], logger=cfg._logger) if cfg.inputs_fig_flag: if cfg.mysql is not None: dbs = 'sql' else: dbs = 'sqlite' df = get_existing_records(connector, dbs) df = df.set_index('date_time') df.sort_index(inplace=True) ivalue = {} p = [] for var in cfg.plots_inputs_variables: ivalue[var] = {} for basin in cfg.inputs_basins: bid = args['basins'][basin]['basin_id'] ivalue[var][basin] = {} for func in cfg.inputs_methods: if 'percentile' in func: nfunc = '{}_{}'.format(func, str(cfg.inputs_percentiles[0])) if ((var == cfg.plots_inputs_variables[0]) and (basin == cfg.inputs_basins[0])): p.append(nfunc) ivalue[var][basin][nfunc] = df[ (df['function'] == nfunc) & (df['variable'] == var) & (df['basin_id'] == int(bid)) & (df['run_name'] == args['run_name'])] nfunc = '{}_{}'.format(func, str(cfg.inputs_percentiles[1])) if ((var == cfg.plots_inputs_variables[0]) and (basin == cfg.inputs_basins[0])): p.append(nfunc) ivalue[var][basin][nfunc] = df[ (df['function'] == nfunc) & (df['variable'] == var) & (df['basin_id'] == int(bid)) & (df['run_name'] == args['run_name'])] else: ivalue[var][basin][func] = df[ (df['function'] == func) & (df['variable'] == var) & (df['basin_id'] == int(bid)) & (df['run_name'] == args['run_name'])] if ((var == cfg.plots_inputs_variables[0]) and (basin == cfg.inputs_basins[0])): p.append(func) args['inputs'] = ivalue args['inputs_methods'] = p args['var_list'] = cfg.plots_inputs_variables args['inputs_basins'] = cfg.inputs_basins inputs(args, cfg._logger) cfg.fig_names = fig_names
def write_properties(end_date, cnx, plotorder, basins, wy_start, run_name, figs_path, values, vollbl='TAF', logger=None): """ Write daily total snowpack properties to csv. Args ------ end_date {str}: end_date cnx {str}: database connector plotorder {list}: basins list basins {dict}: basins dict wy_start {str}: YYYY1001 run_name {str}: snowav run_name figs_path {str}: path to save files values {list}: list of snowav values vollbl {str}: volume label depthlbl {str}: depth label logger {class}: logger """ datestr = end_date.strftime("%Y%m%d") now_str = datetime.now().date().strftime("%Y-%m-%d") date_col = 'Date generated: {}'.format(now_str) unit = vollbl for value in values: out = collect(cnx, plotorder, basins, wy_start, end_date, value, run_name, 'total', 'daily') out.index = out.index.date # setting index to date strips the index name out.index.name = 'date' if value.lower() == 'swe_vol': value_line = ('Snow Water Equivalent (SWE) volume in thousands ' + 'of acre-feet (TAF)') elif value.lower() == 'swi_vol': value_line = ('Surface Water Input (SWI) volume in thousands ' + 'of acre-feet (TAF)') else: if logger is not None: logger.warning(" Value types other than swe, swi, and " "their derivates are not supported") return headers = ['USDA Agicultural Research Service Snowpack Summary Data', value_line, 'Data provided are daily model results from the iSnobal model', 'First column is the date of model result', 'Second column is the total basin volume', 'Additional columns are the subbasins in the watershed', date_col, 'Valid until next reports are generated', 'Contact: Scott Havens <*****@*****.**>' '\n'] s = ''.join(e for e in unit.lower() if e.isalnum()) filename = '{}_daily_{}_{}.csv'.format(value, datestr, s) path = os.path.join(os.path.abspath(figs_path), filename) if os.path.isfile(path): os.remove(path) with open(path, mode='w', encoding='utf-8') as f: f.write('\n'.join(headers)) out.to_csv(path, encoding='utf-8', mode='a') if logger is not None: logger.info(' Saved: {}'.format(path))
def report(cfg): """ Create the pdf report. The latex formats for figures, tables, and summary paragraphs are created by templates in snowav/report/. Data is pulled from the snowav database. To add a figure to the report: - create the plot (of course) - add to variables dict in this file, with the same naming convention variables['PDEP_FIG'] = 'precip_depth%s.png'%(cfg.directory) - add figure template to snowav_report.tex \VAR{PDEP_FIG_TPL} - add to section_dict - add template to snowav/report/figs/ - add to snowav/config/CoreConfig.py [report] exclude_figs - if the figure may not exist (such as flt_diff, or those that require process() to run), address that with some form of exception before creating the pdf Args ------ cfg {class}: config class """ # bid = cfg.basins[cfg.plotorder[0]]['basin_id'] basins = cfg.basins wy_start = datetime(cfg.wy-1, 10, 1) start_date = cfg.start_date end_date = cfg.end_date run_name = cfg.run_name # edges = cfg.edges plotorder = cfg.plotorder dpts = int(cfg.dplcs) ddpts = int(cfg.rep_dplcs) cnx = cfg.connector # variables to pass to latex file variables = {} dbval = collect(cnx, plotorder[0], basins, wy_start, end_date, 'swi_vol', run_name, 'total', 'sum') variables['TOTAL_SWI'] = dbval.sum().values.round(decimals=dpts)[0] dbval = collect(cnx, plotorder[0], basins, start_date, end_date, 'swi_vol', run_name, 'total', 'sum') variables['PER_SWI'] = dbval.sum().values.round(decimals=dpts)[0] dbval = collect(cnx, plotorder[0], basins, start_date, end_date, 'swe_vol', run_name, 'total', 'end') variables['TOTAL_SWE'] = dbval.sum().values.round(decimals=dpts)[0] dbval = collect(cnx, plotorder[0], basins, start_date, end_date, 'swe_avail', run_name, 'total', 'end') variables['TOTAL_SAV'] = dbval.sum().values.round(decimals=dpts)[0] dbval = collect(cnx, plotorder[0], basins, start_date, end_date, 'swe_z', run_name, 'total', 'end') variables['TOTAL_PM'] = dbval.sum().values.round(decimals=ddpts)[0] dbval = collect(cnx, plotorder[0], basins, wy_start, end_date, 'precip_z', run_name, 'total', 'sum') variables['TOTALPRE_PM'] = dbval.sum().values.round(decimals=ddpts)[0] s = collect(cnx, plotorder[0], basins, start_date, start_date, 'swe_vol', run_name, 'total', 'end') e = collect(cnx, plotorder[0], basins, start_date, end_date, 'swe_vol', run_name, 'total', 'end') start_swe = s.sum().values.round(decimals=dpts)[0] end_swe = e.sum().values.round(decimals=dpts)[0] diff = end_swe - start_swe variables['TOTAL_SDEL'] = diff variables['TOTAL_PVOL'] = '100' if float(end_swe) - float(start_swe) > 0: variables['SIGN'] = r'$+$' if float(end_swe) - float(start_swe) == 0.0: variables['SIGN'] = '' if float(end_swe) - float(start_swe) < 0.0: variables['SIGN'] = r'-' dbval = collect(cnx, plotorder[0], basins, wy_start, end_date, 'rain_z', run_name, 'total', 'sum') total_rain = dbval.sum().values.round(decimals=ddpts)[0] if variables['TOTALPRE_PM'] != 0: variables['TOTAL_RAT'] = str(int((total_rain / variables['TOTALPRE_PM']) * 100)) else: variables['TOTAL_RAT'] = '-' variables['TOTALPRE_PM'] = '-' report_time = datetime.now().strftime("%Y-%-m-%-d %H:%M") numsubs = range(1, len(cfg.plotorder)) for n, sub in zip(numsubs, cfg.plotorder[1:]): swiind = 'SUB' + str(n) + '_SWI' perswiind = 'SUB' + str(n) + '_PERSWI' sweind = 'SUB' + str(n) + '_SWE' avsweind = 'SUB' + str(n) + '_SAV' swedel = 'SUB' + str(n) + '_SDEL' pm = 'SUB' + str(n) + '_PM' prepm = 'SUB' + str(n) + 'PRE_PM' rain = 'SUB' + str(n) + 'RAI' ratio = 'SUB' + str(n) + '_RAT' pvol = 'SUB' + str(n) + '_PVOL' dbval = collect(cnx, sub, basins, wy_start, end_date, 'swi_vol', run_name, 'total', 'sum') variables[swiind] = dbval.sum().values.round(decimals=dpts)[0] dbval = collect(cnx, sub, basins, start_date, end_date, 'swi_vol', run_name, 'total', 'sum') variables[perswiind] = dbval.sum().values.round(decimals=dpts)[0] dbval = collect(cnx, sub, basins, start_date, end_date, 'swe_vol', run_name, 'total', 'end') variables[sweind] = dbval.sum().values.round(decimals=dpts)[0] dbval = collect(cnx, sub, basins, start_date, end_date, 'swe_avail', run_name, 'total', 'end') variables[avsweind] = dbval.sum().values.round(decimals=dpts)[0] dbval = collect(cnx, sub, basins, start_date, start_date, 'swe_vol', run_name, 'total', 'end') start_swe = dbval.sum().values.round(decimals=dpts)[0] dbval = collect(cnx, sub, basins, start_date, end_date, 'swe_vol', run_name, 'total', 'end') end_swe = dbval.sum().values.round(decimals=dpts)[0] variables[swedel] = end_swe - start_swe dbval = collect(cnx, sub, basins, start_date, end_date, 'swe_z', run_name, 'total', 'end') variables[pm] = dbval.sum().values.round(decimals=ddpts)[0] dbval = collect(cnx, sub, basins, wy_start, end_date, 'precip_z', run_name, 'total', 'sum') variables[prepm] = dbval.sum().values.round(decimals=ddpts)[0] dbval = collect(cnx, sub, basins, wy_start, end_date, 'rain_z', run_name, 'total', 'sum') variables[rain] = dbval.sum().values.round(decimals=ddpts)[0] if (end_swe > 0) and (variables['TOTAL_SWE'] > 0): variables[pvol] = end_swe / variables['TOTAL_SWE'] * 100 else: variables[pvol] = '-' if variables[prepm] != 0.0: variables[ratio] = str(int((variables[rain] / variables[prepm]) * 100)) else: variables[ratio] = '0' # Upper case variables are used in the LaTex file, # lower case versions are assigned here # untested - if report title contains comma? if isinstance(cfg.rep_title, list): title = cfg.rep_title[0] for s in cfg.rep_title[1::]: title = title + ', ' + s variables['REPORT_TITLE'] = title else: if cfg.flt_flag and cfg.flight_figs: fst = ' Model snow depths were updated with ASO snow depths on ' tst = '' for i, d in enumerate(cfg.flight_diff_dates): if d <= cfg.end_date: dn = d.date().strftime("%m/%d") if len(cfg.flight_diff_dates) == 1: fst = fst + dn + '.' tst += dn if ((len(cfg.flight_diff_dates) > 1) and (i < len(cfg.flight_diff_dates) - 1)): fst = fst + dn + ', ' tst += dn + ', ' if ((len(cfg.flight_diff_dates) > 1) and (i == len(cfg.flight_diff_dates) - 1)): fst = fst + 'and ' + dn + '.' tst += dn else: fst = fst.split(dn)[0] + 'and ' + dn + '.' break variables['REPORT_TITLE'] = (cfg.rep_title + r' \\ ASO Updates {}'.format(tst)) variables['FLTSENT'] = fst else: variables['REPORT_TITLE'] = cfg.rep_title variables['FLTSENT'] = '' variables['REPORT_TIME'] = report_time variables['WATERYEAR'] = str(cfg.wy) variables['UNITS'] = cfg.vollbl variables['VOLLBL'] = cfg.vollbl variables['DEPLBL'] = cfg.depthlbl variables['START_DATE'] = cfg.report_start.date().strftime("%B %-d") variables['END_DATE'] = cfg.report_date.date().strftime("%B %-d") variables['SWE_IN'] = variables['TOTAL_PM'] variables['SWI_IN'] = variables['TOTAL_SWI'] variables['FIG_PATH'] = cfg.figs_path variables['SWI_FIG'] = cfg.swi_volume_fig_name variables['CHANGES_FIG'] = cfg.volume_change_fig_name variables['TOTALS_FIG'] = cfg.basin_total_fig_name variables['MULTITOTSWE_FIG'] = 'compare_swe_vol_{}.png'.format(cfg.directory) variables['PDEP_FIG'] = cfg.precip_fig_name variables['VALID_FIG'] = cfg.stn_validate_fig_name variables['COLD_FIG'] = cfg.cold_content_fig_name variables['SWE_FIG'] = cfg.swe_volume_fig_name variables['VERSION'] = snowav.__version__ if (cfg.update_file is not None) and cfg.flt_flag and cfg.flight_figs: for name in cfg.flight_diff_fig_names: variables['DFLT_FIG'] = name if cfg.report_diagnostics: variables['DIAGNOSTICS_FIG'] = 'diagnostics_{}'.format(cfg.directory) variables['INPUTS_FIG'] = 'inputs_period_{}'.format(cfg.directory) if cfg.subs_fig is not None: variables['SUBBASINS_FIG'] = '{}'.format(cfg.subs_fig) # Logos variables['ARSLOGO'] = os.path.join(cfg.figs_tpl_path, 'ARS.jpg') variables['ASOLOGO'] = os.path.join(cfg.figs_tpl_path, 'ASO.jpg') variables['USDALOGO'] = os.path.join(cfg.figs_tpl_path, 'USDA.png') variables['JPLLOGO'] = os.path.join(cfg.figs_tpl_path, 'JPL.jpg') variables['CDWRLOGO'] = os.path.join(cfg.figs_tpl_path, 'CDWR.png') variables['USBRLOGO'] = os.path.join(cfg.figs_tpl_path, 'USBR.jpg') variables['NRCSLOGO'] = os.path.join(cfg.figs_tpl_path, 'NRCS.jpg') variables['KRWALOGO'] = os.path.join(cfg.figs_tpl_path, 'KRWA.jpg') variables['FRIANTLOGO'] = os.path.join(cfg.figs_tpl_path, 'FRIANT.jpg') variables['AWSMLOGO'] = os.path.join(cfg.figs_tpl_path, 'logo.png') dfind = [str(i) for i in cfg.edges] dfindt = [str(i) for i in cfg.edges] + ['total'] colstr = 'l' + 'r' * len(cfg.plotorder) if len(cfg.plotorder) > 5: spacecmd = r'\resizebox{\textwidth}{!}{' else: spacecmd = r'{' # ntables = len(cfg.tables) mtables = 2 ptables = 0 if 'swe_depth' in cfg.tables: dbval = collect(cnx, plotorder, basins, start_date, end_date, 'swe_z', run_name, dfindt, 'end') dbval = dbval.rename(columns=cfg.labels) swe_byelev = dbval.round(decimals=dpts) swe_byelev.rename(index={'total': 'mean'}, inplace=True) swe_byelev.index.name = 'Elevation' variables['SWE_BYELEV'] = (r' \normalsize \textbf{SWE [%s], %s}\\ \vspace{0.1cm} \\' % (cfg.depthlbl, cfg.report_date.date().strftime("%Y-%-m-%-d")) + spacecmd + swe_byelev.to_latex(na_rep='-', column_format=colstr) + r'} \\ \footnotesize{\textbf{Table %s:} SWE depth.}' % (str(mtables))) mtables += 1 ptables += 1 else: variables['SWE_BYELEV'] = '' if 'swe_vol' in cfg.tables: ptables += 1 if ptables == 2: clrpage = r'\clearpage' else: clrpage = '' dbval = collect(cnx, plotorder, basins, start_date, end_date, 'swe_vol', run_name, dfindt, 'end') dbval = dbval.rename(columns=cfg.labels) swe_byelev = dbval.round(decimals=dpts) swe_byelev.index.name = 'Elevation' variables['SWEVOL_BYELEV'] = (r' \normalsize \textbf{SWE [%s], %s}\\ \vspace{0.1cm} \\' % (cfg.vollbl, cfg.report_date.date().strftime("%Y-%-m-%-d")) + spacecmd + swe_byelev.to_latex(na_rep='-', column_format=colstr) + r'} \\ \footnotesize{\textbf{Table %s:} SWE volume.}%s' % (str(mtables), clrpage)) mtables += 1 else: variables['SWEVOL_BYELEV'] = '' if 'swe_change' in cfg.tables: ptables += 1 if ptables == 2: clrpage = r'\clearpage' else: clrpage = '' dbval = collect(cnx, plotorder, basins, start_date, start_date, 'swe_z', run_name, dfindt, 'end') dbval = dbval.rename(columns=cfg.labels) start_swe = dbval.round(decimals=dpts) dbval = collect(cnx, plotorder, basins, start_date, end_date, 'swe_z', run_name, dfindt, 'end') dbval = dbval.rename(columns=cfg.labels) end_swe = dbval.round(decimals=dpts) dswe_byelev = end_swe - start_swe dswe_byelev.rename(index={'total': 'mean'}, inplace=True) dswe_byelev.index.name = 'Elevation' variables['DSWE_BYELEV'] = (r' \normalsize \textbf{Change in SWE [%s], %s to %s}\\ \vspace{0.1cm} \\' % (cfg.depthlbl, cfg.report_start.date().strftime("%Y-%-m-%-d"), cfg.report_date.date().strftime("%Y-%-m-%-d")) + spacecmd + dswe_byelev.to_latex(na_rep='-', column_format=colstr) + r'} \\ \footnotesize{\textbf{Table %s:} Change in SWE.} %s' % ( str(mtables), clrpage)) mtables += 1 else: variables['DSWE_BYELEV'] = '' if 'swe_percent' in cfg.tables: ptables += 1 if (ptables % 2) == 0 and ptables != 0: clrpage = r'\clearpage' else: clrpage = '' dbval = collect(cnx, plotorder, basins, start_date, end_date, 'swe_vol', run_name, dfind, 'end') dbval = dbval.rename(columns=cfg.labels) swe_byelev = dbval.round(decimals=dpts) value = swe_byelev.iloc[:-1].sum() sweper_byelev = (swe_byelev / value * 100).round(decimals=dpts) sweper_byelev.index.name = 'Elevation' variables['SWEPER_BYELEV'] = ( r' \normalsize \textbf{SWE volume, percent of basin total, %s}\\ \vspace{0.1cm} \\' % (cfg.report_date.date().strftime("%Y-%-m-%-d")) + spacecmd + sweper_byelev.round(1).to_latex(na_rep='-', column_format=colstr) + r'} \\ \footnotesize{\textbf{Table %s:} Percent of total SWE volume.}%s' % (str(mtables), clrpage)) variables['SWEPER_BYELEV'] = variables['SWEPER_BYELEV'].replace('inf', '-') mtables += 1 else: variables['SWEPER_BYELEV'] = '' if 'swi_vol' in cfg.tables: dbval = collect(cnx, plotorder, basins, start_date, end_date, 'swi_vol', run_name, dfindt, 'sum') dbval = dbval.rename(columns=cfg.labels) swi_byelev = dbval.round(decimals=dpts) variables['ACCUM_BYELEV'] = (r' \normalsize \textbf{SWI [%s] by elevation, %s to %s}\\ \vspace{0.1cm} \\' % (cfg.vollbl, cfg.report_start.date().strftime("%Y-%-m-%-d"), cfg.report_date.date().strftime("%Y-%-m-%-d")) + spacecmd + swi_byelev.to_latex(na_rep='-', column_format=colstr) + r'} \\ \footnotesize{\textbf{Table %s:} SWI volume. }' % (str(mtables))) mtables += 1 else: variables['ACCUM_BYELEV'] = '' variables['TOT_LBL'] = cfg.plotorder[0] # for n in range(1,len(cfg.plotorder)): for n in range(1, len(cfg.labels)): s = 'SUB' + str(n) + '_LBL' variables[s] = cfg.labels[cfg.plotorder[n]] # Convert floats to strings for name in variables: if isinstance(variables[name], float): if cfg.dplcs == 0: tmp = str(int(variables[name])) else: tmp = str(round(variables[name], cfg.dplcs)) variables[name] = tmp # Summary sections and fig template have variable strings # (e.g. CHANGES_FIG) that need to be replaced section_dict = {'SUMMARY': cfg.summary_file, 'CHANGES_FIG_TPL': os.path.join(cfg.figs_tpl_path, 'changes_fig_tpl.txt'), 'SWI_FIG_TPL': os.path.join(cfg.figs_tpl_path, 'swi_fig_tpl.txt'), 'TOTALS_FIG_TPL': os.path.join(cfg.figs_tpl_path, 'totals_fig_tpl.txt'), 'MULTITOTSWE_FIG_TPL': os.path.join(cfg.figs_tpl_path, 'multitotswe_fig_tpl.txt'), 'VALID_FIG_TPL': os.path.join(cfg.figs_tpl_path, 'valid_fig_tpl.txt'), 'FLTCHANGES_FIG_TPL': os.path.join(cfg.figs_tpl_path, 'flt_fig_tpl.txt'), 'PDEP_FIG_TPL': os.path.join(cfg.figs_tpl_path, 'pdep_fig_tpl.txt'), 'COLD_FIG_TPL': os.path.join(cfg.figs_tpl_path, 'cold_fig_tpl.txt'), 'SWE_FIG_TPL': os.path.join(cfg.figs_tpl_path, 'swe_fig_tpl.txt'), 'SUBBASINS_FIG_TPL': os.path.join(cfg.figs_tpl_path, 'subbasins_fig_tpl.txt'), 'DIAGNOSTICS_FIG_TPL': os.path.join(cfg.figs_tpl_path, 'diagnostics_fig_tpl.txt'), 'SWE_SUMMARY_TPL': os.path.join(cfg.figs_tpl_path, 'swe_summary_{}sub.txt'.format(str(len(cfg.plotorder))))} # Define and load summary tables depending on number of subbasins # Remove if no flight if not cfg.flt_flag or not cfg.flight_figs: del section_dict['FLTCHANGES_FIG_TPL'] if not cfg.report_diagnostics: del section_dict['DIAGNOSTICS_FIG_TPL'] variables['DIAGNOSTICS_FIG'] = '' for rep in section_dict.keys(): # for variable numbers of fligth figures if rep == 'FLTCHANGES_FIG_TPL': fid = open(section_dict[rep], 'r') var = fid.read() fid.close() for name in sorted(variables): if name == 'DFLT_FIG': for i, fltname in enumerate(cfg.flight_diff_fig_names): flt_date = cfg.flight_outputs['dates'][i].date().strftime("%Y%m%d") flt_num = cfg.flight_delta_vol_df[flt_date].round(1).to_latex(na_rep='-', column_format=colstr) table = (r'{ \vspace{0.5cm} \textbf{Change in SWE [%s] by elevation, ' % (cfg.vollbl) + r'from %s update} \\ \vspace{0.1cm} \\' % (flt_date) + r' %s %s }' % (spacecmd, flt_num)) if i == 0: tmp = var.replace(name, fltname) else: tmp += var.replace(name, fltname) tmp += table var = tmp.replace(name, variables[name]) else: var = var.replace(name, variables[name]) variables[rep] = var else: fid = open(section_dict[rep], 'r') var = fid.read() fid.close() for name in sorted(variables): var = var.replace(name, variables[name]) variables[rep] = var if not cfg.subs_fig: variables['SUBBASINS_FIG_TPL'] = '' if not cfg.rep_compare_runs_flag: variables['MULTITOTSWE_FIG_TPL'] = '' if not cfg.rep_swi_flag: variables['SWI_FIG_TPL'] = '' if not cfg.rep_image_change_flag: variables['CHANGES_FIG_TPL'] = '' if not cfg.rep_cold_content_flag: variables['COLD_FIG_TPL'] = '' if not cfg.rep_swe_volume_flag: variables['SWE_FIG_TPL'] = '' if not cfg.rep_basin_total_flag: variables['TOTALS_FIG_TPL'] = '' if not cfg.rep_stn_validate_flag: variables['VALID_FIG_TPL'] = '' if not cfg.rep_compare_runs_flag: variables['MULTITOTSWE_FIG_TPL'] = '' if not cfg.rep_precip_depth_flag: variables['PDEP_FIG_TPL'] = '' # Make the report env = make_env(loader=FileSystemLoader(cfg.templ_path)) tpl = env.get_template(cfg.tex_file) # print VAR-replaced latex file for debugging if desired if cfg.print_latex: print(tpl.render(variables)) pdf = build_pdf(tpl.render(variables)) # Save in reports and with figs rpath = os.path.join(cfg.figs_path, '' + cfg.report_name) pdf.save_to(rpath) cfg._logger.info(' Saved {}'.format(rpath)) if cfg.rep_path is not None: rpath = os.path.join(cfg.rep_path, '' + cfg.report_name) pdf.save_to(rpath) cfg._logger.info(' Saved {}'.format(rpath))
def flt_image_change(file, update_numbers, end_date, flight_outputs, pre_flight_outputs, masks, lims, barcolors, edges, connector, plotorder, wy, depth_factor, basins, run_name, figsize, depthlbl, elevlbl, vollbl, dplcs, figspath, dpi=200, logger=None): """ Difference in SWE from one day prior to flight updates to the day of flight updates. The file specified in [plots] update_file is used to get dates of flight updates and the masks for which portions of the basin were updated. The actual SWE differences are between the snow.nc files on the dates of the flights and the day immediately prior. [plots] update_numbers is a 1-based list of subsetting flights that can be applied. Args ------ file {str}: path to lidar_depths.nc update_numbers {list}: flight numbers to generate end_date {datetime}: end date flight_outputs {dict}: from config.py pre_flight_outputs {dict}: from config.py masks {dict}: from config.py lims {list}: percent of min/max for color limits barcolors {list}: colors edges {list}: elevation bands connector {str}: database connector plotorder {list}: basins wy {int}: water year depth_factor {float}: depth factor basins {dict}: from config.py run_name {str}: snowav run_name figsize {list}: figure dimensions depthlbl {str}: depth label elevlbl {str}: elevation label vollbl {str}: volume label dplcs {int}: decimal places figspath {str}: base path for figures dpi {int}: figure dpi """ e = False basin_name = plotorder[0].lower().split(" ")[0] flight_diff_fig_names = [] flight_delta_vol_df = {} p = nc.Dataset(file, 'r') if update_numbers is None: times = p.variables['time'][:] else: if max(update_numbers) > len(p.variables['time']) - 1: raise Exception('Max flight update_numbers greater than available ' 'flights') else: times = p.variables['time'][update_numbers] # remove flight indices that might be after the report period idx = [] for i, n in enumerate(times): date = calculate_date_from_wyhr(int(n), wy) if date > end_date: idx = np.append(idx, i) times = np.delete(times, idx) ix = np.argsort(times) times = times[ix] if update_numbers is not None: update_numbers = [int(x) for x in update_numbers] for i, time in enumerate(times): if update_numbers is not None: depth = p.variables['depth'][update_numbers[ix[i]], :, :] else: depth = p.variables['depth'][ix[i], :, :] delta_swe = flight_outputs['swe_z'][i][:] - pre_flight_outputs['swe_z'][i][:] delta_swe = delta_swe * depth_factor # We will always make the difference between the previous day start_date = flight_outputs['dates'][i] - timedelta(hours=24) end_date = flight_outputs['dates'][i] try: delta_swe_byelev = collect(connector, plotorder, basins, start_date, end_date, 'swe_vol', run_name, edges, 'difference') except: e = True try: end_swe_byelev = collect(connector, plotorder, basins, start_date, end_date, 'swe_vol', run_name, edges, 'end') except: e = True if e: if logger is not None: logger.info(' Failed requesting database records ending on {} ' 'for flight difference figure. This may mean that ' '[run] directory has not been processed with ' '[snowav] run_name: {} for the periods in ' '[plots] update_file. Try subsetting with [plots] ' 'update_numbers or processing the full [run] ' 'directory.'.format(end_date, run_name)) logger.info(' Flight figures being set to False...') return [], [] # Make copy so that we can add nans for the plots delta_state = copy.deepcopy(delta_swe) v_min, v_max = np.nanpercentile(delta_state, [lims[0], lims[1]]) colorsbad = plt.cm.Set1_r(np.linspace(0., 1, 1)) colors1 = cmocean.cm.matter_r(np.linspace(0., 1, 127)) colors2 = plt.cm.Blues(np.linspace(0, 1, 128)) colors = np.vstack((colorsbad, colors1, colors2)) mymap = mcolors.LinearSegmentedColormap.from_list('my_colormap', colors) ixf = delta_state == 0 delta_state[ixf] = -100000 pmask = masks[plotorder[0]]['mask'] ixo = pmask == 0 delta_state[ixo] = np.nan cmap = copy.copy(mymap) cmap.set_bad('white', 1.) sns.set_style('darkgrid') sns.set_context("notebook") plt.close(i) fig, (ax, ax1) = plt.subplots(num=i, figsize=figsize, dpi=dpi, nrows=1, ncols=2) norm = MidpointNormalize(midpoint=0, vmin=v_min, vmax=v_max) # this is primarily for awsm_test_cases, in which the flight update # nc file doesn't contain mask information. wy2019-forward flight nc # files *should* have a mask if hasattr(depth, 'mask'): mask = np.ma.masked_array(depth.mask, ~depth.mask) if mask.shape != delta_state.shape: raise Exception('Dimensions {}: {} do not match snow.nc: ' '{}'.format(file, mask.shape, delta_state.shape)) delta_state[mask] = np.nan h = ax.imshow(delta_state * mask, cmap=cmap, clim=(v_min, v_max), norm=norm) else: h = ax.imshow(delta_state, cmap=cmap, clim=(v_min, v_max), norm=norm) for name in masks: ax.contour(masks[name]['mask'], cmap="Greys", linewidths=1) h.axes.get_xaxis().set_ticks([]) h.axes.get_yaxis().set_ticks([]) divider = make_axes_locatable(ax) cax = divider.append_axes("right", size="5%", pad=0.2) cbar = plt.colorbar(h, cax=cax) cbar.set_label(r'$\Delta$ SWE [{}]'.format(depthlbl)) end_date = start_date + timedelta(hours=1) d = end_date + timedelta(hours=1) h.axes.set_title('Change in SWE Depth\n{} to {}' .format(start_date.date().strftime("%Y-%-m-%-d"), end_date.date().strftime("%Y-%-m-%-d"))) if len(plotorder) == 1: porder = plotorder else: porder = plotorder[1::] for iters, name in enumerate(porder): if dplcs == 0: lbl = '{}: {} {}'.format(name, str(int(delta_swe_byelev[name].sum())), vollbl) else: lbl = '{}: {} {}'.format(name, str(np.round(delta_swe_byelev[name].sum(), dplcs)), vollbl) if iters == 0: ax1.bar(range(0, len(edges)), delta_swe_byelev[name], color=barcolors[iters], edgecolor='k', label=lbl) else: ax1.bar(range(0, len(edges)), delta_swe_byelev[name], color=barcolors[iters], edgecolor='k', label=lbl, alpha=0.5) end_swe_byelev = end_swe_byelev.fillna(0) datestr = flight_outputs['dates'][i].date().strftime("%Y%m%d") percent_delta_byelev = (delta_swe_byelev.sum(skipna=True) / end_swe_byelev.sum()) * 100 flight_delta_vol_df[flight_outputs['dates'][i].date().strftime("%Y%m%d")] = delta_swe_byelev fp = os.path.join(figspath, '{}_flight_{}_delta_taf.csv'.format(basin_name, datestr)) delta_swe_byelev.to_csv(fp) fp = os.path.join(figspath, '{}_flight_{}_percent_change.csv'.format(basin_name, datestr)) percent_delta_byelev.to_csv(fp) plt.tight_layout() xts = ax1.get_xticks() edges_lbl = [] for x in xts[0:len(xts) - 1]: edges_lbl.append(str(int(edges[int(x)]))) ax1.set_xticklabels(str(x) for x in edges_lbl) for tick in ax1.get_xticklabels(): tick.set_rotation(30) ylims = ax1.get_ylim() ymax = ylims[1] + abs(ylims[1] - ylims[0]) ymin = ylims[0] - abs(ylims[1] - ylims[0]) * 0.1 ax1.set_ylim((ymin, ymax)) ax1.set_ylabel(r'$\Delta$ SWE [{}]'.format(vollbl)) ax1.set_xlabel('elevation [{}]'.format(elevlbl)) ax1.yaxis.set_label_position("right") ax1.tick_params(axis='x') ax1.tick_params(axis='y') ax1.yaxis.tick_right() if len(plotorder) > 1: ax1.legend(loc=2, fontsize=10) plt.tight_layout() fig.subplots_adjust(top=0.88) fstr = '{}_flight_difference_{}.png'.format(basin_name, datestr) flight_diff_fig_names += [fstr] fig_path = os.path.join(figspath, fstr) snowav.framework.figures.save_fig(fig, fig_path) if logger is not None: logger.info(' Saved: {}'.format(fig_path)) p.close() return flight_diff_fig_names, flight_delta_vol_df