def createSourceCategoryOutputs(self): # Create Facility Max Risk and HI file fac_max_risk = FacilityMaxRiskandHI(self.model.rootoutput, None, self.model, None, None) fac_max_risk.write() # Create Facility Cancer Risk Exposure file fac_canexp = FacilityCancerRiskExp(self.model.rootoutput, None, self.model, None) fac_canexp.write() # Create Facility TOSHI Exposure file fac_hiexp = FacilityTOSHIExp(self.model.rootoutput, None, self.model, None) fac_hiexp.write()
def generateOutputs(self): Logger.log("Creating " + self.name + " report...", None, False) # The first step is to load the risk breakdown output for each facility so that we # can recover the risk for each pollutant. filename = self.categoryName + "_facility_max_risk_and_hi.xlsx" facilityMaxRiskAndHI = FacilityMaxRiskandHI(targetDir=self.categoryFolder, filenameOverride=filename) facilityMaxRiskAndHI_df = facilityMaxRiskAndHI.createDataframe() pollutantCrosswalk = PollutantCrosswalk(createDataframe=True) pollutantCrosswalk_df = pollutantCrosswalk.dataframe # Lowercase the pollutant name column pollutantCrosswalk_df[pollutant_name] = pollutantCrosswalk_df[pollutant_name].str.lower() pathways = [] for facilityId in self.facilityIds: targetDir = self.categoryFolder + "/" + facilityId # Determine if this facility was run with acute or not inputops = InputSelectionOptions(targetDir=targetDir, facilityId=facilityId) inputops_df = inputops.createDataframe() acute_yn = inputops_df['acute_yn'].iloc[0] # Steps a-f in Steve's summary maxIndivRisks = MaximumIndividualRisks(targetDir=targetDir, facilityId=facilityId) maxIndivRisks_df = maxIndivRisks.createDataframe() # Replace nan with empty string maxIndivRisks_df.replace('nan', '', regex=True, inplace=True) riskBkdn = RiskBreakdown(targetDir=targetDir, facilityId=facilityId) riskBkdn_df = riskBkdn.createDataframe() riskBkdn_df = riskBkdn_df.loc[(riskBkdn_df[site_type] == 'Max indiv risk') & (riskBkdn_df[parameter] == 'Cancer risk') & (riskBkdn_df[source_id].str.contains('Total')) & (~riskBkdn_df[pollutant].str.contains('All '))] # Lowercase the pollutant name column riskBkdn_df[pollutant] = riskBkdn_df[pollutant].str.lower() # keep all records but give default designation of 'POL' to pollutants which are not in crosswalk rbkdn_df = riskBkdn_df.merge(pollutantCrosswalk_df, left_on=[pollutant], right_on=[pollutant_name], how="left") rbkdn_df[designation] = rbkdn_df[designation].fillna('POL') rbkdn_df = rbkdn_df.groupby(designation).sum().reset_index() maxRiskAndHI_df = facilityMaxRiskAndHI_df.loc[facilityMaxRiskAndHI_df['Facil_id'] == facilityId] maxIndivRisks_df = maxIndivRisks_df.loc[maxIndivRisks_df[parameter] == 'Cancer risk'] facilityMaxRiskAndHI_df.reset_index() maxIndivRisks_df.reset_index() asRow = rbkdn_df.loc[rbkdn_df[designation] == 'As'] asRisk = 0 if asRow.empty else asRow.iloc[0][value] pahRow = rbkdn_df.loc[rbkdn_df[designation] == 'PAH'] pahRisk = 0 if pahRow.empty else pahRow.iloc[0][value] dfRow = rbkdn_df.loc[rbkdn_df[designation] == 'DF'] dfRisk = 0 if dfRow.empty else dfRow.iloc[0][value] pathway = [self.categoryName, facilityId, maxRiskAndHI_df.iloc[0][rural_urban], 'MIR', 'All HAP', maxIndivRisks_df.iloc[0][fips] + maxIndivRisks_df.iloc[0][block], maxIndivRisks_df.iloc[0][lat], maxIndivRisks_df.iloc[0][lon], maxIndivRisks_df.iloc[0][population], maxRiskAndHI_df.iloc[0]['mx_can_rsk'], asRisk, pahRisk, dfRisk] pathways.append(pathway) # Steps g-j allinner = AllInnerReceptors(targetDir=targetDir, facilityId=facilityId, acuteyn=acute_yn) allinner_df = allinner.createDataframe() # Only keep records that have non-zero population or represent non-overlapped user receptors allinner_df = allinner_df.loc[((allinner_df[block].str.contains('U')) | (allinner_df[population] > 0)) & (allinner_df[overlap] == 'N')] # group by and sum by fips, block, population, lat, lon, pollutant allinner_df = allinner_df.groupby(by=[fips, block, population, lat, lon, pollutant], as_index=False) \ .sum().reset_index(drop=True) # compute risk with immediate above result allinner_df['risk'] = allinner_df.apply(lambda x: self.calculateRisk(x[pollutant], x[conc]), axis=1) # keep all records but give default designation of 'POL' to pollutants which are not in crosswalk allinnermerged_df = allinner_df.merge(pollutantCrosswalk_df, left_on=[pollutant], right_on=[pollutant_name], how="left") allinnermerged_df[designation] = allinnermerged_df[designation].fillna('POL') # Aggregate concentration, grouped by FIPS/block inner_summed = allinnermerged_df.groupby(by=[fips, block, population, lat, lon, designation], as_index=False)\ .sum().reset_index(drop=True) # Steps k-n allouter_summed = pd.DataFrame() listOuter = [] listDirfiles = os.listdir(targetDir) pattern = "*_all_outer_receptors*.csv" for entry in listDirfiles: if fnmatch.fnmatch(entry, pattern): listOuter.append(entry) anyOuters = "N" for f in listOuter: allouter = AllOuterReceptors(targetDir=targetDir, acuteyn=acute_yn, filenameOverride=f) allouter_df = allouter.createDataframe() if not allouter_df.empty: anyOuters = "Y" # Only keep records that have non-zero population or represent non-overlapped user receptors allouter_df = allouter_df.loc[((allouter_df[block].str.contains('U')) | (allouter_df[population] > 0)) & (allouter_df[overlap] == 'N')] allouter_df = allouter_df.groupby(by=[fips, block, population, lat, lon, pollutant], as_index=False) \ .sum().reset_index(drop=True) allouter_df['risk'] = allouter_df.apply(lambda x: self.calculateRisk(x[pollutant], x[conc]), axis=1) # keep all records but give default designation of 'POL' to pollutants which are not in crosswalk alloutermerged_df = allouter_df.merge(pollutantCrosswalk_df, left_on=[pollutant], right_on=[pollutant_name], how="left") alloutermerged_df[designation] = alloutermerged_df[designation].fillna('POL') outer_summed = alloutermerged_df.groupby(by=[fips, block, population, lat, lon, designation], as_index=False) \ .sum().reset_index(drop=True) allouter_summed = allouter_summed.append(outer_summed) if anyOuters == "Y": riskblocks_df = inner_summed.append(allouter_summed) else: riskblocks_df = inner_summed # Steps o-r asRisksPathway = self.getRisksPathway('As', riskblocks_df, facilityId, maxRiskAndHI_df, maxIndivRisks_df) pahRisksPathway = self.getRisksPathway('PAH', riskblocks_df, facilityId, maxRiskAndHI_df, maxIndivRisks_df) dfRisksPathway = self.getRisksPathway('DF', riskblocks_df, facilityId, maxRiskAndHI_df, maxIndivRisks_df) pathways.append(asRisksPathway) pathways.append(pahRisksPathway) pathways.append(dfRisksPathway) # Steps s-w octants = {'E': [], 'N': [], 'NE': [], 'NW': [], 'S': [], 'SE': [], 'SW': [], 'W': []} facCenterLat = maxRiskAndHI_df.iloc[0]['fac_center_latitude'] facCenterLon = maxRiskAndHI_df.iloc[0]['fac_center_longitude'] for index, row in riskblocks_df.iterrows(): bearingValue, distanceValue = self.bearingDistance(facCenterLat, facCenterLon, row[lat], row[lon]) row[distance] = distanceValue if bearingValue > 337.5 or bearingValue <= 22.5: octants['N'].append(row) elif bearingValue > 22.5 and bearingValue <= 67.5: octants['NE'].append(row) elif bearingValue > 67.5 and bearingValue <= 112.5: octants['E'].append(row) elif bearingValue > 112.5 and bearingValue <= 157.5: octants['SE'].append(row) elif bearingValue > 157.5 and bearingValue <= 202.5: octants['S'].append(row) elif bearingValue > 202.5 and bearingValue <= 247.5: octants['SW'].append(row) elif bearingValue > 247.5 and bearingValue <= 292.5: octants['W'].append(row) elif bearingValue > 292.5 and bearingValue <= 337.5: octants['NW'].append(row) for key in octants.keys(): minDistanceRow = None rows = octants[key] print("Octant " + key + " has " + str(len(rows)) + " rows.") for row in rows: if minDistanceRow is None or row[distance] < minDistanceRow[distance]: minDistanceRow = row if minDistanceRow is not None: centroidPathway = self.getCentroidPathway(key, minDistanceRow[fips], minDistanceRow[block], riskblocks_df, facilityId, maxRiskAndHI_df) pathways.append(centroidPathway) pathways_df = pd.DataFrame(pathways, columns=[category, fac_id, rural_urban, octant, centroid, fips_block, lat, lon, population, total_risk, total_as_risk, total_pah_risk, total_df_risk]) # Put final df into array self.dataframe = pathways_df self.data = self.dataframe.values yield self.dataframe
def process(self): if self.abort.is_set(): Logger.logMessage("Terminating output processing...") return #----------- create input selection file ----------------- input_selection = InputSelectionOptions(self.outdir, self.facid, self.model, None) input_selection.write() Logger.logMessage("Completed InputSelectionOptions output") if self.abort.is_set(): Logger.logMessage("Terminating output processing...") return #----------- create All_Polar_Receptor output file ----------------- all_polar_receptors = AllPolarReceptors(self.outdir, self.facid, self.model, self.plot_df, self.acute_yn) all_polar_receptors.write(generateOnly=False) self.model.all_polar_receptors_df = all_polar_receptors.dataframe Logger.logMessage("Completed AllPolarReceptors output") if self.abort.is_set(): Logger.logMessage("Terminating output processing...") return # Was this facility run with alternate receptors? If so, we need to use the output modules that do not # reference census data fields like FIPs and block number. altrec = self.model.altRec_optns.get("altrec", None) altrec_nopop = self.model.altRec_optns.get("altrec_nopop", None) #----------- create All_Inner_Receptor output file ----------------- all_inner_receptors = AllInnerReceptorsNonCensus(self.outdir, self.facid, self.model, self.plot_df, self.acute_yn) if altrec \ else AllInnerReceptors(self.outdir, self.facid, self.model, self.plot_df, self.acute_yn) all_inner_receptors.write(generateOnly=False) self.model.all_inner_receptors_df = all_inner_receptors.dataframe Logger.logMessage("Completed AllInnerReceptors output") if self.abort.is_set(): Logger.logMessage("Terminating output processing...") return #----------- create All_Outer_Receptor output file ----------------- try: all_outer_receptors = AllOuterReceptorsNonCensus(self.outdir, self.facid, self.model, self.plot_df, self.acute_yn) if altrec \ else AllOuterReceptors(self.outdir, self.facid, self.model, self.plot_df, self.acute_yn) if not self.model.outerblks_df.empty: all_outer_receptors.write(generateOnly=self.generateOnly) self.model.all_outer_receptors_df = all_outer_receptors.dataframe Logger.logMessage("Completed AllOuterReceptors output") else: Logger.logMessage( "No outer receptors. Did not create AllOuterReceptors output." ) except BaseException as e: var = traceback.format_exc() Logger.logMessage(var) if self.abort.is_set(): Logger.logMessage("Terminating output processing...") return #----------- create temporal output file if necessary ----------------- if self.temporal: temporal = Temporal(self.outdir, self.facid, self.model, self.plot_df) temporal.write() Logger.logMessage("Completed Temporal output") #----------- create Ring_Summary_Chronic data ----------------- ring_summary_chronic = RingSummaryChronic(self.outdir, self.facid, self.model, self.plot_df) generator = ring_summary_chronic.generateOutputs() for batch in generator: ring_summary_chronic_df = ring_summary_chronic.dataframe Logger.logMessage("Completed RingSummaryChronic output") if self.abort.is_set(): Logger.logMessage("Terminating output processing...") return #----------- create Block_Summary_Chronic data ----------------- block_summary_chronic = BlockSummaryChronicNonCensus(targetDir=self.outdir, facilityId=self.facid, model=self.model, plot_df=self.plot_df, outerAgg=all_outer_receptors.outerAgg) if altrec else \ BlockSummaryChronic(self.outdir, self.facid, self.model, self.plot_df, all_outer_receptors.outerAgg) generator = block_summary_chronic.generateOutputs() for batch in generator: self.model.block_summary_chronic_df = block_summary_chronic.dataframe Logger.logMessage("Completed BlockSummaryChronic output") if self.abort.is_set(): Logger.logMessage("Terminating output processing...") return # Assign rec_type to block summary chronic from the inner and outer census DFs. if not self.model.outerblks_df.empty: allrectype = pd.concat([ self.model.innerblks_df[[utme, utmn, rec_type]], self.model.outerblks_df[[utme, utmn, rec_type]] ], ignore_index=True) else: allrectype = self.model.innerblks_df[[utme, utmn, rec_type]] blksummary_w_rectype = pd.merge(self.model.block_summary_chronic_df, allrectype, how="left", on=[utme, utmn]) # Combine ring summary chronic and block summary chronic dfs into one and assign a receptor type ring_columns = [ lat, lon, mir, hi_resp, hi_live, hi_neur, hi_deve, hi_repr, hi_kidn, hi_ocul, hi_endo, hi_hema, hi_immu, hi_skel, hi_sple, hi_thyr, hi_whol, overlap ] ring_risk = ring_summary_chronic_df[ring_columns].copy() ring_risk[rec_type] = 'PG' ring_risk['blk_type'] = 'PG' # Block and population are needed in non-altrec runs to ensure schools and monitors are not the MIR if not altrec: block_columns = ring_columns + [ rec_type, 'blk_type', block, population ] ring_risk[block] = '' ring_risk[population] = 0 else: block_columns = ring_columns + [rec_type, 'blk_type', rec_id] ring_risk[rec_id] = '' block_risk = blksummary_w_rectype[block_columns] self.model.risk_by_latlon = ring_risk.append(block_risk).reset_index( drop=True).infer_objects() if self.abort.is_set(): Logger.logMessage("Terminating output processing...") return #----------- create noncancer risk exposure output file ----------------- noncancer_risk_exposure = NoncancerRiskExposure( self.outdir, self.facid, self.model, self.plot_df, self.model.block_summary_chronic_df) noncancer_risk_exposure.write() Logger.logMessage("Completed NoncancerRiskExposure output") if self.abort.is_set(): Logger.logMessage("Terminating output processing...") return #----------- create cancer risk exposure output file ----------------- cancer_risk_exposure = CancerRiskExposure( self.outdir, self.facid, self.model, self.plot_df, self.model.block_summary_chronic_df) cancer_risk_exposure.write() Logger.logMessage("Completed CancerRiskExposure output") if self.abort.is_set(): Logger.logMessage("Terminating output processing...") return #----------- create Maximum_Individual_Risk output file --------------- max_indiv_risk = MaximumIndividualRisksNonCensus(self.outdir, self.facid, self.model, self.plot_df) if altrec \ else MaximumIndividualRisks(self.outdir, self.facid, self.model, self.plot_df) max_indiv_risk.write() self.model.max_indiv_risk_df = max_indiv_risk.dataframe Logger.logMessage("Completed MaximumIndividualRisks output") #----------- create Maximum_Offsite_Impacts output file --------------- inner_recep_risk_df = self.model.block_summary_chronic_df[ self.model.block_summary_chronic_df["blk_type"] == "D"] max_offsite_impacts = MaximumOffsiteImpactsNonCensus(self.outdir, self.facid, self.model, self.plot_df, ring_summary_chronic_df, inner_recep_risk_df) if altrec else \ MaximumOffsiteImpacts(self.outdir, self.facid, self.model, self.plot_df, ring_summary_chronic_df, inner_recep_risk_df) max_offsite_impacts.write() Logger.logMessage("Completed MaximumOffsiteImpacts output") # For any rows in ring_summary_chronic and block_summary_chronic where overlap = Y, # replace mir and HI's with values from max_indiv_risk and write data to csv output. replacement = self.model.max_indiv_risk_df[value].values ringrows = np.where(ring_summary_chronic_df[overlap] == 'Y')[0] if len(ringrows) > 0: ring_summary_chronic.data[ringrows, 7:22] = replacement blockrows = np.where( self.model.block_summary_chronic_df[overlap] == 'Y')[0] if len(blockrows) > 0: block_summary_chronic.data[blockrows, 10:25] = replacement # Now wite the RingSummaryChronic and BlockSummaryChronic outputs ring_summary_chronic.write() block_summary_chronic.write() #----------- create Risk Breakdown output file ------------------------ risk_breakdown = RiskBreakdown(self.outdir, self.facid, self.model, self.plot_df, self.acute_yn) risk_breakdown.write() Logger.logMessage("Completed RiskBreakdown output") #----------- create Incidence output file ------------------------ if not altrec_nopop: outerInc_list = [] for key in all_outer_receptors.outerInc.keys(): insert_list = [ key[0], key[1], key[2], all_outer_receptors.outerInc[key] ] outerInc_list.append(insert_list) outerInc_df = pd.DataFrame( outerInc_list, columns=[source_id, pollutant, emis_type, inc]) incidence = Incidence(self.outdir, self.facid, self.model, self.plot_df, outerInc_df) incidence.write() Logger.logMessage("Completed Incidence output") #----------- append to facility max risk output file ------------------ fac_max_risk = FacilityMaxRiskandHINonCensus(self.model.rootoutput, self.facid, self.model, self.plot_df, incidence.dataframe) if altrec else \ FacilityMaxRiskandHI(targetDir=self.model.rootoutput, facilityId=self.facid, model=self.model, plot_df=self.plot_df, incidence=incidence.dataframe) fac_max_risk.writeWithoutHeader() #----------- append to facility cancer risk exposure output file ------------------ fac_risk_exp = FacilityCancerRiskExp(self.model.rootoutput, self.facid, self.model, self.plot_df) fac_risk_exp.writeWithoutHeader() #----------- append to facility TOSHI exposure output file ------------------ fac_toshi_exp = FacilityTOSHIExp(self.model.rootoutput, self.facid, self.model, self.plot_df) fac_toshi_exp.writeWithoutHeader() #=================== Acute processing ============================================== # If acute was run for this facility, read the acute plotfile and create the acute outputs if self.acute_yn == 'Y': if self.abort.is_set(): Logger.logMessage("Terminating output processing...") return #----------- create Acute Chemical Populated output file ------------------------ acutechempop = AcuteChemicalPopulatedNonCensus(self.outdir, self.facid, self.model, self.model.acuteplot_df) if altrec \ else AcuteChemicalPopulated(self.outdir, self.facid, self.model, self.model.acuteplot_df) acutechempop.write() Logger.logMessage("Completed Acute Chemical Populated output") #----------- create Acute Chemical Max output file ------------------------ acutechemmax = AcuteChemicalMaxNonCensus(self.outdir, self.facid, self.model, self.model.acuteplot_df) if altrec \ else AcuteChemicalMax(self.outdir, self.facid, self.model, self.model.acuteplot_df) acutechemmax.write() Logger.logMessage("Completed Acute Chemical Max output") #----------- create Acute Breakdown output file ------------------------ acutebkdn = AcuteBreakdown(self.outdir, self.facid, self.model, self.model.acuteplot_df, None, False, acutechempop.dataframe, acutechemmax.dataframe) acutebkdn.write() Logger.logMessage("Completed Acute Breakdown output") #create facility kml kmlWriter = KMLWriter() try: if not altrec: kmlWriter.write_facility_kml( self.facid, self.model.computedValues['cenlat'], self.model.computedValues['cenlon'], self.outdir, self.model) else: kmlWriter.write_facility_kml_NonCensus( self.facid, self.model.computedValues['cenlat'], self.model.computedValues['cenlon'], self.outdir, self.model) except BaseException as e: # exc_type, exc_obj, exc_tb = sys.exc_info() var = traceback.format_exc() Logger.logMessage(var) Logger.logMessage("Completed creating KMZ file for " + self.facid)
def createReports(self, arguments=None): ready= False print('ready') #check to see if there is a directory location print(self.fullpath) #set log file to append to in folder logpath = self.fullpath +"/hem4.log" #open log self.logfile = open(logpath, 'a') now = str(datetime.now()) try: # Figure out which facilities will be included in the report skeleton = os.path.join(self.fullpath, '*facility_max_risk_and_hi.xl*') print(skeleton) fname = glob.glob(skeleton) print(fname) if fname: head, tail = os.path.split(fname[0]) groupname = tail[:tail.find('facility_max_risk_and_hi')-1] facmaxrisk = FacilityMaxRiskandHI(targetDir=self.fullpath, filenameOverride=tail) facmaxrisk_df = facmaxrisk.createDataframe() faclist = facmaxrisk_df['Facil_id'].tolist() else: Logger.logMessage("Cannot generate summaries because there is no Facility_Max_Risk_and_HI Excel file \ in the folder you selected.") messagebox.showinfo("Error", "Cannot generate summaries because there is no Facility_Max_Risk_and_HI Excel file \ in the folder you selected.") ready = False except Exception as e: print(e) print("No facilities selected.", "Please select a run folder.") messagebox.showinfo("No facilities selected", "Please select a run folder.") ready = False # Figure out which facilities will be included in the report. # Facilities listed in the facility_max_risk_and_hi HEM4 output will be used # and the modeling group name is taken from the first part of the filename. #get reports and set arguments reportNames = [] reportNameArgs = {} try: for report in self.checked: print(self.checked) if report == 'Max Risk': reportNames.append('MaxRisk') reportNameArgs['MaxRisk'] = None if report == 'Cancer Drivers': reportNames.append('CancerDrivers') reportNameArgs['CancerDrivers'] = None if report == 'Hazard Index Drivers': reportNames.append('HazardIndexDrivers') reportNameArgs['HazardIndexDrivers'] = None if report == 'Risk Histogram': reportNames.append('Histogram') reportNameArgs['Histogram'] = None if report == 'Hazard Index Histogram': reportNames.append('HI_Histogram') reportNameArgs['HI_Histogram'] = None if report == 'Incidence Drivers': reportNames.append('IncidenceDrivers') reportNameArgs['IncidenceDrivers'] = None if report == "Acute Impacts": reportNames.append('AcuteImpacts') reportNameArgs['AcuteImpacts'] = None if report == "Source Type Risk Histogram": reportNames.append('SourceTypeRiskHistogram') # Pass starting position and number of characters # Translate user supplied starting position to array index value (0-based indexing) if self.pos_num.get() == '' or self.pos_num.get() == '0': startpos = 0 print(startpos) else: startpos = int(self.pos_num.get()) - 1 print(startpos) # Convert non-numeric to 0 (handles blank case) if self.chars_num.get().isnumeric(): numchars = int(self.chars_num.get()) else: numchars = 0 print(numchars) reportNameArgs['SourceTypeRiskHistogram'] = [startpos, numchars] if report == "Multipathway": reportNames.append('MultiPathway') reportNameArgs['MultiPathway'] = None except Exception as e: print(e) #add run checks if len(self.checked) == 0: messagebox.showinfo("No report selected", "Please select one or more report types to run.") ready = False else: #check if source type has been selected if "Source Type Risk Histogram" in self.checked: if startpos < 0: messagebox.showinfo('Invalid starting position', 'Starting position of the sourcetype ID must be > 0.') ready = False else: if numchars <= 0: messagebox.showinfo('Invalid number of sourcetype ID characters', 'Please enter a valid number of characters of the sourcetype ID.') ready = False else: ready = True else: ready = True #if checks have been passed if ready == True: running_message = "\nRunning report(s) on facilities: " + ', '.join(faclist) #write to log self.logfile.write(str(datetime.now()) + ": " + running_message + "\n") self.nav.log.scr.configure(state='normal') self.nav.log.scr.insert(tk.INSERT, running_message) self.nav.log.scr.insert(tk.INSERT, "\n") self.nav.log.scr.configure(state='disabled') summaryMgr = SummaryManager(self.fullpath, groupname, faclist) #loop through for each report selected for reportName in reportNames: report_message = "Creating " + reportName + " report." self.nav.log.scr.configure(state='normal') self.nav.log.scr.insert(tk.INSERT, report_message) self.nav.log.scr.insert(tk.INSERT, "\n") self.nav.log.scr.configure(state='disabled') self.logfile.write(str(datetime.now()) + ": " + report_message + "\n") args = reportNameArgs[reportName] summaryMgr.createReport(self.fullpath, reportName, args) if summaryMgr.status == True: report_complete = reportName + " complete." self.nav.log.scr.configure(state='normal') self.nav.log.scr.insert(tk.INSERT, report_complete) self.nav.log.scr.insert(tk.INSERT, "\n") self.nav.log.scr.configure(state='disabled') self.logfile.write(str(datetime.now()) + ": " + report_complete + "\n") else: break self.nav.log.scr.configure(state='normal') self.nav.log.scr.insert(tk.INSERT, "Risk Summary Reports Finished.") self.nav.log.scr.insert(tk.INSERT, "\n") self.nav.log.scr.configure(state='disabled') self.logfile.write(str(datetime.now()) + ": " + "Risk Summary Reports Finished." + "\n") messagebox.showinfo("Summary Reports Finished", "Risk summary reports for " + ', '.join(faclist) + " run.") if "Source Type Risk Histogram" in self.checked: self.pos.destroy() self.pos_num.destroy() self.chars.destroy() self.chars_num.destroy() z_label = tk.Label(self.r6, font=TEXT_FONT, width=22, anchor='w', bg=self.tab_color, text="") z_label.grid(row=1, column=3, padx=5, sticky="W") #unchecked box self.vLabel = tk.Label(self.r6, text="", width=5, bg=self.tab_color) # self.vLabel.image = self.uncheckedIcon # keep a reference! self.vLabel.grid(row=1, column=4, padx=10, sticky='W') z_label.bind("<Enter>", partial(self.fake_config, z_label, self.vLabel, self.r6, 'light grey')) z_label.bind("<Leave>", partial(self.fake_config, z_label, self.vLabel, self.r6, self.tab_color)) w_label = tk.Label(self.r7, font=TEXT_FONT, width=32, anchor='w', bg=self.tab_color, text="") w_label.grid(row=1, column=3, padx=5, sticky="W") #unchecked box self.uLabel = tk.Label(self.r7, text="", width=5, bg=self.tab_color) # self.uLabel.image = self.uncheckedIcon # keep a reference! self.uLabel.grid(row=1, column=4, padx=10, sticky='W') w_label.bind("<Enter>", partial(self.fake_config, w_label, self.uLabel, self.r7, 'light grey')) w_label.bind("<Leave>", partial(self.fake_config, w_label, self.uLabel, self.r7, self.tab_color)) for icon in self.checked_icons: icon.configure(image=self.uncheckedIcon) self.folder_select['text'] = "Select output folder" self.nav.summaryLabel.configure(image=self.nav.summaryIcon) self.logfile.close()
def generate(self): blocksummary_df = pd.DataFrame() # Used for finding the fac center maxRiskAndHI = FacilityMaxRiskandHI( targetDir=self.output_dir, filenameOverride="PrimCop_Actuals2_facility_max_risk_and_hi.xlsx") maxRiskAndHI_df = maxRiskAndHI.createDataframe() for facilityId in self.facilityIds: print("Inspecting facility folder " + facilityId + " for output files...") try: targetDir = self.output_dir + "/" + facilityId maxrisk_df = maxRiskAndHI_df.loc[maxRiskAndHI_df['Facil_id'] == facilityId] center_lat = maxrisk_df.iloc[0]['fac_center_latitude'] center_lon = maxrisk_df.iloc[0]['fac_center_longitude'] ceny, cenx, zone, hemi, epsg = UTM.ll2utm( center_lat, center_lon) blockSummaryChronic = BlockSummaryChronicNonCensus(targetDir=targetDir, facilityId=facilityId) if self.altrec == 'Y' else \ BlockSummaryChronic(targetDir=targetDir, facilityId=facilityId) bsc_df = blockSummaryChronic.createDataframe() bsc_df['fac_count'] = 1 bsc_df[distance] = np.sqrt((cenx - bsc_df.utme)**2 + (ceny - bsc_df.utmn)**2) maxdist = self.radius bsc_df = bsc_df.query('distance <= @maxdist').copy() blocksummary_df = blocksummary_df.append(bsc_df) bsc_df['fac_count'] except BaseException as e: print("Error gathering output information: " + repr(e)) print("Skipping facility " + facilityId) continue blocksummary_df.drop_duplicates().reset_index(drop=True) columns = [ fips, block, lon, lat, population, mir, hi_resp, hi_live, hi_neur, hi_deve, hi_repr, hi_kidn, hi_ocul, hi_endo, hi_hema, hi_immu, hi_skel, hi_sple, hi_thyr, hi_whol, 'fac_count' ] if self.altrec == 'N': aggs = { lat: 'first', lon: 'first', overlap: 'first', elev: 'first', utme: 'first', blk_type: 'first', utmn: 'first', hill: 'first', fips: 'first', block: 'first', population: 'first', mir: 'sum', hi_resp: 'sum', hi_live: 'sum', hi_neur: 'sum', hi_deve: 'sum', hi_repr: 'sum', hi_kidn: 'sum', hi_ocul: 'sum', hi_endo: 'sum', hi_hema: 'sum', hi_immu: 'sum', hi_skel: 'sum', hi_sple: 'sum', hi_thyr: 'sum', hi_whol: 'sum', 'fac_count': 'sum' } # Aggregate concentration, grouped by FIPS/block risk_summed = blocksummary_df.groupby([fips, block]).agg(aggs)[columns] else: aggs = { lat: 'first', lon: 'first', overlap: 'first', elev: 'first', utme: 'first', blk_type: 'first', utmn: 'first', hill: 'first', rec_id: 'first', population: 'first', mir: 'sum', hi_resp: 'sum', hi_live: 'sum', hi_neur: 'sum', hi_deve: 'sum', hi_repr: 'sum', hi_kidn: 'sum', hi_ocul: 'sum', hi_endo: 'sum', hi_hema: 'sum', hi_immu: 'sum', hi_skel: 'sum', hi_sple: 'sum', hi_thyr: 'sum', hi_whol: 'sum' } # Aggregate concentration, grouped by rec_id risk_summed = blocksummary_df.groupby([rec_id]).agg(aggs)[columns] risk_summed['mir_rounded'] = risk_summed[mir].apply( self.round_to_sigfig, 1) path = os.path.join(self.output_dir, 'MIR_HI_allreceptors.xlsx') risk_summed.to_excel(path, index=False, columns=self.getColumns(), header=self.getHeader())