def read_mus(self): mu_xlsx = cIO.Read(config.xlsx_mu) for i in range(6, 44): # loop over all mu-rows if not (i == 23): # jump over floodplain table headers mu_type = str(mu_xlsx.ws["D" + str(i)].value) try: mu_ID = int(mu_xlsx.ws["E" + str(i)].value) except: continue if not (mu_type.lower() == "none"): try: float(mu_xlsx.ws["F" + str(i)].value) float(mu_xlsx.ws["G" + str(i)].value) float(mu_xlsx.ws["H" + str(i)].value) float(mu_xlsx.ws["I" + str(i)].value) self.mu_dict.update({mu_type: mu_ID }) # add mu name and ID to dict self.logger.info(" * added %s." % str(mu_type)) except: self.logger.info( " * omitted {0} (no depth / velocity thresholds provided in row {1})." .format(mu_type, str(i))) mu_xlsx.close_wb()
def read_flow_series(self, input_xlsx): try: input_xlsx_f = cIO.Read(input_xlsx) self.date_column = input_xlsx_f.read_column('A', 3) self.flow_column = input_xlsx_f.read_column('B', 3) input_xlsx_f.close_wb() self.assign_years() except: self.logger.info( "ERROR: The source discharge file contains non-detectable formats." )
def get_disc_frequency(self): """ Get frequency of disconnection from hydrologic record: average number of disconnections per season """ # read workbook disc_freq_xlsx_name = os.path.join( config.dir2ra, '00_Flows\\%s\\disc_freq_%s.xlsx' % (self.condition, self.lifestage_code)) if os.path.exists(disc_freq_xlsx_name): try: disc_freq_wb = cIO.Read(disc_freq_xlsx_name) disc_freq_c1 = disc_freq_wb.read_column("A", 3) disc_freq_c2 = disc_freq_wb.read_column("B", 3) disc_freqs = dict(zip(disc_freq_c1, disc_freq_c2)) disc_freq_wb.close_wb() except: self.logger.info( "ERROR: Could not read disconnection frequency data. Make sure to Analyze Flows with the Start Menu." ) return -1 return disc_freqs
def get_flow_duration_data_from_xlsx(self, flow_duration_xlsx): # flow_duration_xlsx = STR containing the absolute workbook path with flow duration curve data # --> anyway, the workbook must have Q_flowdur data in col B and exceed. percenmt in col C (start_row=3) self.logger.info(" * retrieving flow duration curve (%s) ..." % str(flow_duration_xlsx)) try: data_reader = cIO.Read(flow_duration_xlsx) self.Q_flowdur = data_reader.read_column( "A", 3) # cfs or m3-col = "B"+1 because read_col considers colA=0 self.exceedance_rel = data_reader.read_column( "B", 3) # percent-col = "C"+1 because read_col considers colA=0 data_reader.close_wb() except: self.logger.info("ERROR: Could not read flow duration curve data") return -1 try: [self.Q_flowdur, self.exceedance_rel ] = fG.eliminate_nan_from_list(self.Q_flowdur, self.exceedance_rel) except: pass
def get_info(self, flow_info_table): self.logger.info(" * Reading flow information from % s" % flow_info_table) # HYDRAULIC flow_info_xlsx = cIO.Read(flow_info_table) temp_return_periods = flow_info_xlsx.read_float_column_short("C", 5) temp_h_rasters = flow_info_xlsx.read_float_column_short("D", 5) temp_u_rasters = flow_info_xlsx.read_float_column_short("E", 5) flow_info_xlsx.close_wb() # remove entries with return periods of less than one year last_entry = 100.0 for e in range(0, temp_return_periods.__len__()): value_applies = True if float(last_entry) == 1.0: if float(temp_return_periods[e]) == 1.0: value_applies = False if value_applies: self.return_periods.append(str(temp_return_periods[e])) self.h_rasters.append(str(temp_h_rasters[e]).split(".tif")[0]) self.u_rasters.append(str(temp_u_rasters[e]).split(".tif")[0]) last_entry = temp_return_periods[e] self.return_periods.reverse() self.h_rasters.reverse() self.u_rasters.reverse() # DOD and dmean self.logger.info(" * Reading dod and dmean info") ras_name_list = [ i for i in os.listdir(self.dir2condition_act) if i.endswith('.tif') ] for ras in ras_name_list: if ("scour" in str(ras)) or ("fill" in str(ras)): self.dod.append(str(ras).split(".tif")[0]) if "dmean" in str(ras): self.dmean = str(ras).split(".tif")[0]
def make_qua(self, input_type): # input_type = STR either "statistic" or "time_series" xlsx_template = "" self.errors = False interpolation_mger = cIp.Interpolator() if input_type == "time_series": xlsx_template = askopenfilename( initialdir=config.dir2flows + "\\InputFlowSeries", title="Select flow series workbook (xlsx)", filetypes=[("Workbooks", "*.xlsx")]) col_Q = "B" col_UA = "F" start_row = 4 if self.cover_applies: condition = self.chsi_condition_cov else: condition = self.chsi_condition_hy for f_spec in self.fish_applied.keys(): lf_stages = self.fish_applied[f_spec] for ls in lf_stages: try: self.logger.info( " > Creating Q - Area workbook for {0} - {1}".format( f_spec, ls)) fsn = str(f_spec).lower()[0:2] + str(ls).lower()[0:2] cxlsx = config.dir2sh + "SHArea\\{0}_sharea_{1}.xlsx".format( condition, fsn) xlsx_tar_data = cIO.Read(cxlsx) if input_type == "statistic": Q_template = cFl.FlowAssessment() if self.cover_applies: self.logger.info(" * with cover") cstr = "_cov.xlsx" else: cstr = ".xlsx" xlsx_template = config.dir2flows + condition + "\\flow_duration_" + fsn + cstr self.logger.info( " * using flow duration curve (%s)" % xlsx_template) Q_template.get_flow_duration_data_from_xlsx( xlsx_template) dates = Q_template.exceedance_rel flows = Q_template.Q_flowdur xlsx_out = config.dir2sh + "SHArea\\{0}_QvsA_{1}_stats.xlsx".format( condition, fsn) else: self.logger.info(" * using flow time series (%s)" % xlsx_template) Q_template = cFl.SeasonalFlowProcessor(xlsx_template) dates = Q_template.date_column flows = Q_template.flow_column xlsx_out = config.dir2sh + "SHArea\\{0}_QvsA_{1}_time.xlsx".format( condition, fsn) self.logger.info(" * interpolating SHArea ...") interpolation_mger.assign_targets( xlsx_tar_data.read_float_column_short( col_Q, start_row), xlsx_tar_data.read_float_column_short( col_UA, start_row)) UA_interp = interpolation_mger.linear_central(flows) writer = cIO.Write( config.dir2sh + ".templates\\CONDITION_QvsA_template_{0}.xlsx".format( self.unit)) self.logger.info(" * writing workbook %s ..." % xlsx_out) writer.write_column("A", 3, flows) writer.write_column("B", 3, UA_interp) writer.write_column("C", 3, dates) if input_type == "statistic": writer.write_cell("C", 2, "% Exceedance") writer.write_cell("D", 2, "Area vs % Exceedance") else: writer.write_cell("C", 2, "Date") writer.write_cell("D", 2, "Area vs date") self.logger.info(" * saving ...") writer.save_close_wb(xlsx_out) try: del writer except: pass except: showinfo( "ERROR", "Could not create Area analyses for{0} - {1}. \nRead console Error and Warning messages." .format(f_spec, ls)) self.errors = True if not self.errors: webbrowser.open(config.dir2sh + "SHArea\\")
def open_fish_wb(self): self.reader = cIO.Read(config.xlsx_aqua)
def main(condition_initial=str(), condition_project=str(), cover_pre=bool(), cover_post=bool(), fish={}, prj_name=str(), unit=str(), version=str(), apply_wua=bool()): """ calculates pre- and post implementation SHArea version = "v10" # type() = 3-char str: vII prj_name = "MyProject" # (corresponding to folder name) condition_initial = "2008" condition_project = "2008_tbr_lyr10" cover_app_pre = False fish = {"Chinook salmon": ["juvenile"]} """ logger = logging.getLogger("logfile") error = False sys.path.append(config.dir2oxl) # set directories if cover_pre: pre_ext = "cover" else: pre_ext = "no_cover" if cover_post: post_ext = "cover" else: post_ext = "no_cover" dir2pp = os.path.dirname( os.path.realpath(__file__)) + "\\" + prj_name + "_" + version + "\\" dir2ras_chsi = [ config.dir2sh + "SHArea\\Rasters_" + condition_initial + "\\" + pre_ext + "\\", config.dir2sh + "SHArea\\Rasters_" + condition_project + "\\" + post_ext + "\\" ] dir2ras_tar = [ dir2pp + "Geodata\\Rasters\\" + condition_initial + "\\" + pre_ext + "\\", dir2pp + "Geodata\\Rasters\\" + condition_project + "\\" + post_ext + "\\" ] fGl.chk_dir(dir2ras_tar[0]) fGl.chk_dir(dir2ras_tar[1]) xlsx_out_name = config.empty_file shp_dir = dir2pp + "Geodata\\Shapefiles\\" # file and variable settings xlsx_tar_costs = dir2pp + prj_name + "_assessment_" + version + ".xlsx" if unit == "us": unit_q = "cfs" xlsx_sha_template = dir2pp + "Geodata\\SHArea_evaluation_template_us.xlsx" else: unit_q = "m3/s" xlsx_sha_template = dir2pp + "Geodata\\SHArea_evaluation_template_si.xlsx" # INSTANTIATE SHArea CLASS OBJECT: sha = cSHArC.SHArC(unit, prj_name, version) # RUN SHArea ANALYSIS try: logger.info("Starting SHArea analysis ...") project_area = shp_dir + "ProjectArea.shp" fields = ["SHAPE@", "gridcode"] sha.get_extents(project_area, fields[0]) sha.set_project_area("ProjectArea") for species, lifestages in fish.items(): for ls in lifestages: logger.info("SHArea ANALYSIS FOR " + str(species).upper() + " - " + str(ls).upper()) fili = str(species).lower()[0:2] + str(ls)[0:2] xlsx_conditions = [ condition_initial + "_sharea_" + fili + ".xlsx", condition_project + "_sharea_" + fili + ".xlsx" ] xlsx_sha = cIO.Write(xlsx_sha_template, worksheet_no=0, data_only=False) xlsx_sha_name = dir2pp + "Geodata\\SHArea_" + fili + ".xlsx" conditions_sha = [] xc_count = 0 start_write_col = "B" for xc in xlsx_conditions: # instantiate dict for results writing (entry types are {Q: [Probability, Usable Area]}) result_matrix = [] try: logger.info(" >> Condition: " + str(xc).split("_sharea_")[0]) xlsx_info = cIO.Read(config.dir2sh + "SHArea\\" + xc) except: xlsx_info = "" logger.info("ERROR: Could not access " + str(xc)) error = True try: logger.info( " -> Looking up discharge information (RiverArchitect/SHArC/SHArea/)..." ) discharges = xlsx_info.read_float_column_short("B", 4) exceedance_pr = xlsx_info.read_float_column_short( "E", 4) discharge_dict = dict(zip(discharges, exceedance_pr)) raster_list = glob.glob(dir2ras_chsi[xc_count] + "*.tif") logger.info( " -> Matching CHSI rasters with discharge information ..." ) for q in discharges: test_ras = dir2ras_chsi[ xc_count] + "csi_" + fili + str( int(q)) + ".tif" ras = [r for r in raster_list if (r == test_ras)][0] logger.info( " ---> Calculating habitat area from {0} for Q = {1}" .format(ras, str(q) + unit_q)) try: sha.get_usable_area(ras.split(".tif")[0]) result_matrix.append( [q, discharge_dict[q], sha.result]) except: logger.info(" * empty sluice for " + str(q)) logger.info(" ok") except: logger.info( "ERROR: Could not process information from " + str(xc)) error = True try: logger.info( " -> Writing discharges and usable area to " + xlsx_sha_name + " ...") result_matrix.sort(key=itemgetter(0), reverse=True) write_row = 9 for res in result_matrix: xlsx_sha.write_row( start_write_col, write_row, [res[0], res[1], res[2]]) # q, pr, area write_row += 1 logger.info(" -> ok") except: logger.info("ERROR: Could not write SHArea data for " + str(species) + " - " + str(ls)) error = True # calculate SHArea for transfer independent from xlsx calculation try: ex_pr_pdf = [float(exceedance_pr[0])] for i_pr in range(1, exceedance_pr.__len__()): if not ((float(exceedance_pr[i_pr - 1]) >= 100.0) or (exceedance_pr[i_pr] == 0)): ex_pr_pdf.append( float(exceedance_pr[i_pr] - exceedance_pr[i_pr - 1])) else: ex_pr_pdf.append(0.0) conditions_sha.append( sha.calculate_sha( [pr for pr in ex_pr_pdf], [res[2] for res in result_matrix])) except: logger.info( "ERROR: Could not transfer SHArea data for " + str(species) + " - " + str(ls)) error = True xc_count += 1 start_write_col = cIO.Read.col_num_to_name( cIO.Read.col_name_to_num(start_write_col) + 5) xlsx_info.close_wb() logger.info(" >> Saving and closing " + xlsx_sha_name + " ...") try: xlsx_sha.save_close_wb(xlsx_sha_name) except: logger.info("ERROR: Could not save " + xlsx_sha_name) del xlsx_sha sha.clear_cache(True) # limit cache size try: logger.info( " >> Transferring results (net SHArea gain) to cost table ..." ) xlsx_costs = cIO.Write(xlsx_tar_costs, data_only=False) xlsx_costs.write_cell( "G", 3, float(conditions_sha[1] - conditions_sha[0])) xlsx_out_name = prj_name + "_assessment_" + version + "_" + fili + ".xlsx" xlsx_costs.save_close_wb(dir2pp + xlsx_out_name) logger.info(" >> CHECK RESULTS IN: " + dir2pp + xlsx_out_name) except: logger.info("ERROR: Could not transfer net SHArea gain.") error = True sha.clear_cache() except: logger.info("ERROR: Could not run SHArea analysis.") return -1 if not error: fGl.open_file(dir2pp + xlsx_out_name) return sha.cache