def processFile(domainName, user, edbName, filePath): logger=logging.getLogger('loadRUSedb.processFile') dmn=domain(domainName) edb=Edb(domainName,user,edbName) if not dmn.edbUserExist(user): logger.error("Directory for user '"+user+"' does not exist in edb directory for domain '"+domain+"'") sys.exit() if not edb.exists(): edb.create() logger.info("EDB '"+edb.name+"' for user '"+user+"' in domain '"+dmn.name+"' created") if filePath[-3:]==".gz": utilities.gunzip(filePath) filePath=filePath[:-3] filePath=filePath[:-5] #removing .data from the file name #converting from dos-format to linux utilities.dos2linux(filePath+".asc") utilities.dos2linux(filePath+".data") #create grid object gridname=path.basename(filePath) grid=Egrid(dmn.name,user,edb.name,gridname) grid.load(filePath) #load grid into edb
def main(): #-----------Setting up and unsing option parser----------------------- parser=OptionParser(usage= usage, version=version) parser.add_option("-u",'--user', action="store",dest="user", help="Name of target edb user") parser.add_option("-e","--edb", action="store",dest="edb", help="Name of target edb") parser.add_option("-i","--infile", action="store",dest="infile", help="Input csv file") # parser.add_option("-y","--year", # action="store",dest="year", # help="Only store sources for given year") parser.add_option("-v",dest='loglevel', action="store_const",default=get_loglevel(), help="produce verbose output") parser.add_option("-t", "--template", action="store",dest="cf",default=None, help="Generate default controlfile") parser.add_option("-o", "--outfile", action="store",dest="outfile",default=None, help="Name of outfiles (without extension)") parser.add_option("-d","--delimiter", action="store",dest="delimiter",default="\t", help="Delimiter used in csv-file") parser.add_option("-c","--filterCol", action="store",dest="filterCol", help="Header of column to use as filter") parser.add_option("-f","--filterVal", action="store",dest="filterVal", help="Value to use in filter") # parser.add_option("-g", "--geocodeRasterDir", # action="store",dest="geocodeRasterDir",default=None, # help="Directory with geocode rasters") (options, args) = parser.parse_args() #--------------------Init logger----------------------- # rootLogger = logger.RootLogger(level=options.loglevel) logging.basicConfig( format='%(levelname)s:%(name)s: %(message)s', level=options.loglevel, ) global log # log = rootLogger.getLogger(sys.argv[0]) log = logging.getLogger(parser.prog) #-----------------Validating options------------------- if options.cf is not None: generateCf(path.abspath(options.cf),controlFileTemplate) log.info("Wrote default controlfile") return 1 if options.user is None: log.error("Need to specify -u <user>") return 1 if options.edb is None: log.error("Need to specify -e <edb>") return 1 # if options.year is None: # log.error("Need to specify -y <year>") # return 1 # if len(options.year)!=4: # log.error("Year should be given with four digits") # return 1 if len(args)!=1: log.error("Controlfile should be given as argument") return 1 dmn=Domain() edb=Edb(dmn,options.user,options.edb) if not edb.exists(): log.error("Edb %s does not exist" %options.edb) return 1 log.info("Parsing controlfile") cf=ControlFile(args[0]) cdbPars=re.compile("companydb\.par\.(\w*?):").findall(cf.content) fdbPars=re.compile("facilitydb\.par\.(\w*?):").findall(cf.content) sdbPars=re.compile("sourcedb\.par\.(\w*?):").findall(cf.content) substEmisNr=re.compile("sourcedb\.subst_emis\.([0-9]*)\.emis").findall(cf.content) subgrpEmisNr=re.compile("sourcedb\.subgrp_emis\.([0-9]*)\.emis").findall(cf.content) cdbCols={} cdbDefaults={} for par in cdbPars: cdbCols[par]=cf.findString("companydb.par.%s:" %par) cdbDefaults[par]=cf.findString("companydb.par.%s.default:" %par, optional=True,default=None) fdbCols={} fdbDefaults={} for par in fdbPars: fdbCols[par]=cf.findString("facilitydb.par.%s:" %par) fdbDefaults[par]=cf.findString("facilitydb.par.%s.default:" %par, optional=True,default=None) sdbCols={} sdbDefaults={} for par in sdbPars: sdbCols[par]=cf.findString("sourcedb.par.%s:" %par) sdbDefaults[par]=cf.findString("sourcedb.par.%s.default:" %par, optional=True,default=None) substEmisCols={} substEmisDefaults={} if substEmisNr is not None: for emisNr in substEmisNr: cols={} defaults={} emisPars=re.compile("sourcedb\.subst_emis\.%s\.(\w*?):" %(emisNr)).findall(cf.content) emisDefaultPars=re.compile( "sourcedb\.subst_emis\.%s\.(\w*?)\.default:" %(emisNr)).findall(cf.content) if emisPars is not None: for par in emisPars: cols[par]=cf.findString("sourcedb.subst_emis.%s.%s:" %(emisNr,par)) if emisDefaultPars is not None: for par in emisDefaultPars: defaults[par]=cf.findString("sourcedb.subst_emis.%s.%s.default:" %(emisNr,par), optional=True,default=None) substEmisCols[emisNr]=cols substEmisDefaults[emisNr]=defaults subgrpEmisCols={} subgrpEmisDefaults={} if subgrpEmisNr is not None: for emisNr in subgrpEmisNr: cols={} defaults={} emisPars=re.compile("sourcedb\.subgrp_emis\.%s\.(\w*?):" %(emisNr)).findall(cf.content) emisDefaultPars=re.compile( "sourcedb\.subgrp_emis\.%s\.(\w*?)\.default:" %(emisNr)).findall(cf.content) if emisPars is not None: for par in emisPars: cols[par]=cf.findString("sourcedb.subgrp_emis.%s.%s:" %(emisNr,par)) if emisDefaultPars is not None: for par in emisDefaultPars: defaults[par]=cf.findString("sourcedb.subgrp_emis.%s.%s.default:" %(emisNr,par), optional=True,default=None) subgrpEmisCols[emisNr]=cols subgrpEmisDefaults[emisNr]=defaults log.info("Reading subdb...") subdb=Subdb(edb) subdb.read() log.info("Reading companydb...") companydb=Companydb(edb) companydb.read() log.info("Reading sourcedb...") # source_stream = SourceStream(edb, 'w') source_stream = open(options.outfile, 'w') source_writer = ModelWriter(source_stream,encoding="HP Roman8") log.info("Reading facilitydb...") facilitydb=Facilitydb(edb) facilitydb.read() log.info("Reading subgrpdb") subgrpdb=Subgrpdb(edb) subgrpdb.read() log.info("Reading edb.rsrc") rsrc=Rsrc(edb.rsrcPath()) acCodeTables=[] for i in range(rsrc.numberOfCodeTrees("ac")): acCodeTables.append(CodeTable(rsrc.path,codeType="ac",codeIndex=i+1)) gcCodeTables=[] for i in range(rsrc.numberOfCodeTrees("gc")): gcCodeTables.append(CodeTable(rsrc.path,codeType="gc",codeIndex=i+1)) geocodeRasters=[] rast1=Raster() rast1.read("/usr/airviro/data/geo/topdown/dynamicRasters/dynamic__GEOCODE__1.txt") rast2=Raster() rast2.read("/usr/airviro/data/geo/topdown/dynamicRasters/dynamic__GEOCODE__2.txt") geocodeRasters.append(rast1) geocodeRasters.append(rast2) log.info("Reading csv-file") table=DataTable() table.read(options.infile,delimiter=options.delimiter,encoding="ISO-8859-15") if options.filterCol is not None: if options.filterCol not in table.colIndex: log.error("Filter column header not found in table") sys.exit(1) invalid=False nFiltered=0 nRows=0 log.info("Processing rows") for rowInd,row in enumerate(table.data): nRows+=1 if options.filterCol is not None: filterVal=row[table.colIndex[options.filterCol]] if options.filterVal!=str(filterVal): nFiltered+=1 continue comp = Company() for par in comp.parOrder: val=cdbDefaults.get(par,None) if par in cdbCols: colId=cdbCols[par] try: tableVal=row[table.colIndex[colId]] except KeyError: log.error( "No column with header %s, columns: %s" %( colId,str(table.listIds()))) if tableVal is not None: val = tableVal if val is not None: #Too long names are truncated if par=="NAME" and len(val)>45: val=val[:45] comp[par]=val fac = Facility() for par in fac.parOrder: val=fdbDefaults.get(par,None) if par in fdbCols: colId=fdbCols[par] tableVal=row[table.colIndex[colId]] if tableVal is not None: val = tableVal if val is not None: #Too long names are truncated if par=="NAME" and len(val)>45: val=val[:45] fac[par]=val src = Source() for par in ["X1", "Y1","X2","Y2", "PX","PY","NAME","INFO","INFO2","DATE","CHANGED", "CHIMNEY HEIGHT","GASTEMPERATURE","GAS FLOW", "SEARCHKEY1","SEARCHKEY2","SEARCHKEY3", "SEARCHKEY4","SEARCHKEY5","CHIMNEY OUT","CHIMNEY IN", "HOUSE WIDTH","HOUSE HEIGHT","NOSEGMENTS","BUILD_WIDTHS", "BUILD_HEIGHTS","BUILD_LENGTHS","BUILD_DISTFARWALL", "BUILD_CENTER","GEOCODE","FORMULAMACRO","ALOB"]: val=sdbDefaults.get(par,None) if par in sdbCols: colId=sdbCols[par] tableVal=row[table.colIndex[colId]] if tableVal is not None: val = tableVal if val is not None: #validate code if par=="GEOCODE" and val is not None: gcList=val.split() for codeIndex,code in enumerate(gcList): if not gcCodeTables[codeIndex].hasCode(code): log.error("Invalid geo code %s on row %i" %(code,rowInd)) invalid=True #Too long names are truncated if par=="NAME" and len(val)>45: val=val[:45] #Store in src object and convert to correct type src._fieldvalues[par] = lazy_parse( src, par, val) gc1=geocodeRasters[0].getVal(src.get_coord()[0],src.get_coord()[1]) gc2=geocodeRasters[1].getVal(src.get_coord()[0],src.get_coord()[1]) src.GEOCODE = [str(int(gc1)) + "." + str(int(gc2))] for emisNr,emis in substEmisCols.items(): substEmis={"unit":None,"ac":None,"substance":None,"emis":None} for par in substEmis.keys(): if par in emis: substEmis[par]=row[table.colIndex[emis[par]]] else: try: substEmis[par]=substEmisDefaults[emisNr][par] except KeyError: log.error( "Need to specify column or default value for subgrp emis %i" %emisNr) substInd=subdb.substIndex(substEmis["substance"]) if substInd is None: log.error("Invalid substance name %s on row %i" %( substEmis["substance"],rowInd)) sys.exit(1) try: unit=rsrc.sub[substEmis["unit"]] except KeyError: log.error("Invalid unit name %s on row %i" %(emis["unit"],rowInd)) sys.exit(1) acList=substEmis["ac"].split('\\')[0].split() for codeIndex,code in enumerate(acList): # if code == "2.A.4.2": # import pdb; pdb.set_trace() refCode = acCodeTables[codeIndex].checkCode(code) if refCode == "-": log.error("Invalid activity code %s on row %i" %(code,rowInd)) sys.exit(1) if refCode != code: acList[codeIndex] = refCode substEmis["ac"] = acList if substEmis["emis"] is not None and substEmis["emis"]!="0": try: emis = src.add_emission() emis.UNIT = substEmis["unit"] emis.ACTCODE = substEmis["ac"] # needs re-formatting emis.EMISSION = float(substEmis["emis"]) emis.SUBSTANCE = substInd emis.auto_adjust_unit(edb) except: # print substEmis # log.error("Invalid substance emission on row %i" %rowInd) invalid=True src.EMISSION=src.EMISSION[:-1] for emis in subgrpEmisCols.values(): subgrpEmis={"unit":None,"ac":None,"name":None,"emis":None} for par in subgrpEmis.keys(): if par in emis: subgrpEmis[par]=row[table.colIndex[emis[par]]] else: try: subgrpEmis[par]=subgrpEmisDefaults[emisNr][par] except KeyError: log.error( "Need to specify column or default value for subgrp emis %i" %emisNr) #validating subgrp name try: subgrp=subgrpdb.getByName(subgrpEmis["name"]) except KeyError: log.error("Invalid subgrp name %s on row %i" %(subgrpEmis["name"],rowInd)) invalid=True #validating subgrp emis unit try: unitFactor=rsrc.subGrpEm[subgrpEmis["unit"]] except KeyError: log.error("Invalid unit %s for subgrp emission on row %i" %( subgrpEmis["unit"],rowInd)) invalid=True #validating subgrp activity code acList=subgrpEmis["ac"].split() for codeIndex,code in enumerate(acList): refCode = acCodeTables[codeIndex].checkCode(code) if refCode == "-": log.error("Invalid activity code %s on row %i" %(code,rowInd)) invalid=True break if refCode != code: acList[codeIndex] = refCode substEmis["ac"] = acList try: src.addSubgrpEmis(subgrp.index,emis=subgrpEmis["emis"],unit=subgrpEmis["unit"], ac=subgrpEmis["ac"]) except: log.error("Invalid subgrp emission on row %i" %rowInd) invalid=True companydb.append(comp,force=True) facilitydb.append(fac,force=True) source_writer.write(src) # sourcedb.append(src) if invalid: log.info("No output written due to validation errors") sys.exit(0) if len(companydb.items)>0: if options.outfile is None: log.info("Writing companydb") else: log.info("Writing company db to file") companydb.write(filename=options.outfile+".companydb") if len(facilitydb.items)>0: if options.outfile is None: log.info("Writing facilitydb") else: log.info("Writing facilitydb to file") facilitydb.write(filename=options.outfile+".facilitydb") # if len(sourcedb.sources)>0: # if options.outfile is None: # log.info("Writing sourcedb") # else: # log.info("Writing sourcedb to file") # sourcedb.write(filename=options.outfile+".sourcedb") if options.filterCol is not None: log.info("Filtered out %i out of %i" %(nFiltered,nRows))
def main(): #setting up parser parser=OptionParser(usage= usage, version=version) (options, args) = parser.parse_args() #Setting up logging environment logLevel=os.environ.get("LOG_LEVEL") if logLevel==None or logLevel=="": logLevel=2 logLevels={0:logging.NOTSET, 1:logging.WARNING, 2:logging.INFO, 3:logging.DEBUG} rootLogger=logging.getLogger('') logLevelObj=logLevels[int(logLevel)] #info is written to stderr #errors are written stdout rootLogger.setLevel(logLevelObj) infoHandler=logging.StreamHandler(sys.stderr) infoHandler.setLevel(logLevel) errHandler=logging.StreamHandler(sys.stdout) errHandler.setLevel(logging.ERROR) formatter = logging.Formatter('%(name)-12s: %(levelname)-8s %(message)s') infoHandler.setFormatter(formatter) errHandler.setFormatter(formatter) rootLogger.addHandler(infoHandler) rootLogger.addHandler(errHandler) logger=logging.getLogger("trendReport") if len(args)!=2: parser.error("Incorrect number of arguments") infile=path.abspath(args[0]) #file with emission data per code outfile=path.abspath(args[1]) #path for output excel report if not path.exists(infile): logger.error("Input asciiReport does not exist") sys.exit(-1) #reading emission data file f=codecs.open(infile,"r","HP Roman8") dlines=f.readlines() f.close() #parsing meta-data file dlines.pop(0) dlines.pop(0) name=dlines.pop(0)[5:].strip() nrmacros=int(dlines.pop(0).split()[1]) nrsubstances=int(dlines.pop(0).split()[1]) units=dlines.pop(0)[5:].strip() macros=[] for mIndex in range(nrmacros): edbuser=dlines.pop(0).split()[1] edbname=dlines.pop(0).split()[1] label=dlines.pop(0).split()[1] macros.append({"edbuser":edbuser,"edbname":edbname,"label":label}) #finding path to edb.rsrc for first edb in trend domainPath=os.environ["DBAS_PATH"] domainName=os.environ["AVDBNAME"] rsrcPath=path.join(domainPath,"edb",macros[0]["edbuser"],macros[0]["edbname"],"edb.rsrc") #Creating codeTable object (for look-ups of activitycode names and geocodenames) try: cTable=codetable.CodeTable(rsrcPath) except: logger.error("Could not build code tree from .../dba/edb/edb.rsrc") sys.exit(-1) #parsing emission data file macroIndex=0 emissions=[] substances=[] for line in dlines: data=line.split() if data[0]=="""#MACRO""": #if header, update substance and macro index macroIndex=int(data[1]) subst=data[2].replace("\"","") if subst not in substances: substances.append(subst) else: #else get codes and corresponding name ac=data[1] ac=ac.split(".") if ac[0]=="<all>": #add "all" to get the right depth in the dictionary ac=["all","all"] elif len(ac)==1: ac.append("all") gc=data[3] gc=gc.split(".") if gc[0]=="<all>": gc=["all","all"] elif len(gc)==1: gc.append("all") gcLev1Name=cTable.gc[ gc[0] ][ "name" ] if gc[1]!='all': gcLev2Name=cTable.gc[ gc[0] ][ gc[1] ][ "name" ] else: gcLev2Name="Alla" acLev1Name=cTable.ac[ ac[0] ][ "name" ] if ac[1]!='all': acLev2Name=cTable.ac[ ac[0] ][ ac[1] ]["name"] else: acLev2Name="Alla" val=data[4] emissions.append({"label":label,"substance":subst,"gcLev1":gcLev1Name,"gcLev2":gcLev2Name,"acLev1":acLev1Name,"acLev2":acLev2Name,"val":val,"macroIndex":macroIndex}) #Create style objects for excel output header1Style=xlwt.easyxf('font: name Times New Roman,color-index black, bold on', num_format_str='0.000E+00') normalStyle=xlwt.easyxf('font: name Times New Roman,color-index black, bold off', num_format_str='0.000E+00') excelBook=xlwt.Workbook() #Creating info sheet infoWs = excelBook.add_sheet("Info") infoWs.col(0).width = 256*20 infoWs.col(1).width = 256*25 infoWs.col(2).width = 256*20 infoWs.col(3).width = 256*200 infoWs.write(0,0,u"Rapportnamn:",header1Style) infoWs.write(0,1,name,header1Style) infoWs.write(1,0,u"Beskrivning av dataunderlaget",header1Style) infoWs.write(3,0,u"Makron (specificerar utsökningar ur databasen)",header1Style) infoWs.write(4,0,u"Etikett",header1Style) infoWs.write(4,1,u"Ägare till EDB",header1Style) infoWs.write(4,2,u"EDB (emissiondatabas)",header1Style) infoWs.write(4,3,u"Beskrivning",header1Style) for m in range(nrmacros): infoWs.write(5+m,0,macros[m]["label"]) infoWs.write(5+m,1,macros[m]["edbuser"]) infoWs.write(5+m,2,macros[m]["edbname"]) #reading edb description file (if it exists) edb=Edb(domainName,macros[m]["edbuser"],macros[m]["edbname"]) infoWs.write(5+m,3,edb.desc().replace("\n"," ")) #split substances in green house gases and air quality related ghgList=[s for s in substances if s in ghgs] aqList=[s for s in substances if s not in ghgs] sheets={} for s in substances: sheets[s]=excelBook.add_sheet(s) #Write air quality headers for subst,sheet in sheets.iteritems(): firstRow=3 sheet.col(0).width = 256*25 sheet.col(1).width = 256*30 sheet.col(2).width = 256*20 sheet.col(3).width = 256*15 for col in range(nrsubstances*nrmacros): sheet.col(col+4).width=256*15 sheet.write(0,0,u"Rapportnamn:",header1Style) sheet.write(0,1,name,header1Style) sheet.write(1,0,u"Emissioner av luftföroreningar",header1Style) sheet.write(1,1,subst,header1Style) sheet.write(firstRow,1,"Undersektor",header1Style) sheet.write(firstRow,2,u"Län",header1Style) sheet.write(firstRow,3,"Kommun",header1Style) def getColInd(nmacros,macroInd,co2Ekv=False): #gets the column index in excel file if not co2Ekv: return 4 + macroInd else: return 4+ macroInd + nmacros #write macro labels and substance headers for air quality sheet for sInd,subst in enumerate(substances): for mInd,macro in enumerate(macros): col=getColInd(nrmacros,mInd) sheets[subst].write(firstRow-1,col,macro["label"],header1Style) sheets[subst].write(firstRow,col,units,header1Style) if subst in ghgList: col=getColInd(nrmacros,mInd,co2Ekv=True) sheets[subst].write(firstRow-1,col,macro["label"],header1Style) sheets[subst].write(firstRow,col,units+u" (CO2-ekvivalenter)",header1Style) #looping over all emissions, writing them to the correct column and row sheetRow={} for s in substances: sheetRow[s]=[] #For each sheet an array with as many values as there are columns with data #Each value is initialized with the first row containing data #Each value is a counter for the row number for m in range(nrmacros*2+4): for s in substances: sheetRow[s].append(firstRow+1) for emis in emissions: subst = emis["substance"] emisVal=emis["val"] macroInd=emis["macroIndex"] col=getColInd(nrmacros,macroInd) row=sheetRow[subst][col] #If row index for sheet is not larger than current row #The meta data for the emission is written if sheetRow[subst][0]<=row: sheets[subst].write(row,0,emis["acLev1"],normalStyle) sheets[subst].write(row,1,emis["acLev2"],normalStyle) sheets[subst].write(row,2,emis["gcLev1"],normalStyle) sheets[subst].write(row,3,emis["gcLev2"],normalStyle) sheetRow[subst][0]+=1 #increment row in first col for sheet sheets[subst].write(row,col,float(emisVal),normalStyle) sheetRow[subst][col]+=1 if subst in ghgList: col=getColInd(nrmacros,macroInd,co2Ekv=True) #converts the emission to CO2-ekquivalents sheets[subst].write(row,col,float(emisVal)*float(ekvFactors[subst]),normalStyle) sheetRow[subst][col]+=1 excelBook.save(outfile)
def main(): #-----------Setting up and unsing option parser----------------------- parser=OptionParser(usage= usage, version=version) parser.add_option('--src_user', action="store",dest="src_user", help="Name of target edb user") parser.add_option("--src_edb", action="store",dest="src_edb", help="Name of target edb") parser.add_option('--tgt_user', action="store",dest="tgt_user", help="Name of target edb user") parser.add_option("--tgt_edb", action="store",dest="tgt_edb", help="Name of target edb") parser.add_option("--codeIndex", action="store",dest="codeIndex", help="Activity code index to use for filter") parser.add_option("--codeValue", action="store",dest="codeValue", help="Activity code value to filter by") parser.add_option("-l", "--loglevel", action="store",dest="loglevel",default=2, help="Sets the loglevel (0-3 where 3=full logging)") parser.add_option("-o", "--outfile", action="store",dest="outfile", help="Output filenam, leave out to write directly to target edb") (options, args) = parser.parse_args() #--------------------Init logger----------------------- rootLogger = logger.RootLogger(level=options.loglevel) global log log = rootLogger.getLogger(sys.argv[0]) #-----------------Validating options------------------- if options.src_user is None: log.error("Need to specify --src_user <user>") return 1 if options.src_edb is None: log.error("Need to specify --src_edb <edb>") return 1 if options.tgt_user is None: log.error("Need to specify --tgt_user <user>") return 1 if options.tgt_edb is None: log.error("Need to specify --tgt_edb <edb>") return 1 if len(args)!=0: log.error("No argument expected") return 1 dmn=Domain() src_edb=Edb(dmn,options.src_user,options.src_edb) tgt_edb=Edb(dmn,options.tgt_user,options.tgt_edb) if not src_edb.exists(): log.error("Source edb %s does not exist") %options.src_edb return 1 if not tgt_edb.exists(): log.error("Target edb %s does not exist") %options.tgt_edb return 1 if options.codeIndex is None: log.error("No code index specified") return 1 if options.codeValue is None: log.error("No code value specified") return 1 log.info("Reading subdb...") src_subdb=Subdb(src_edb) src_subdb.read() log.info("Reading sourcedb...") src_sourcedb=Sourcedb(src_edb) src_sourcedb.read() tgt_sourcedb=Sourcedb(tgt_edb) tgt_sourcedb.read() #log.info("Reading subgrpdb") #src_subgrpdb=Subgrpdb(src_edb) #src_subgrpdb.read() log.info("Reading edb.rsrc") src_rsrc=Rsrc(src_edb.rsrcPath()) tgt_rsrc=Rsrc(tgt_edb.rsrcPath()) log.info("Reading target sourcedb...") acCodeTables=[] for i in range(tgt_rsrc.numberOfCodeTrees("ac")): acCodeTables.append(CodeTable(tgt_rsrc.path,codeType="ac",codeIndex=i+1)) gcCodeTables=[] for i in range(tgt_rsrc.numberOfCodeTrees("gc")): gcCodeTables.append(CodeTable(tgt_rsrc.path,codeType="gc",codeIndex=i+1)) copiedSources=0 readSources=0 for src in src_sourcedb.sources: hasEmis=False src["PX"]=0 src["PY"]=0 toBeRemoved=[] for substInd,emis in src.subst_emis.iteritems(): ac= emis["ACTCODE"][int(options.codeIndex)-1] ac=[c for c in ac if c!="-"] emis["ACTCODE"][int(options.codeIndex)-1]=ac #Remove '-'from code ac=".".join(ac) if ac!=options.codeValue: toBeRemoved.append(substInd) for key in toBeRemoved: src.removeSubstEmis(key) if src.subst_emis!={}: hasEmis=True toBeRemoved=[] for subgrpInd,emis in src.subgrp_emis.iteritems(): ac= emis["ACTCODE"][int(options.codeIndex)-1] ac=[c for c in ac if c!="-"] emis["ACTCODE"][int(options.codeIndex)-1]=ac #Remove '-'from code ac=".".join(ac) if ac!=options.codeValue: toBeRemoved.append(subgrpInd) for key in toBeRemoved: src.removeSubgrpEmis(key) if src.subgrp_emis!={}: hasEmis=True toBeRemoved=[] for actInd,emis in src.activity_emis.iteritems(): ac= emis["ACTCODE"][int(options.codeIndex)-1] ac=[c for c in ac if c!="-"] emis["ACTCODE"][int(options.codeIndex)-1]=ac #Remove '-'from code ac=".".join(ac) if ac!=options.codeValue: toBeRemoved.append(actInd) for key in toBeRemoved: src.removeActivityEmis(key) if src.activity_emis!={}: hasEmis=True readSources+=1 if hasEmis: copiedSources+=1 tgt_sourcedb.sources.append(src) if options.outfile is None: log.info("Writing sources to target edb") else: log.info("Writing sources to file") tgt_sourcedb.write(filename=options.outfile,force=True) log.info("Successfully Copied %i out of %i sources" %(copiedSources,readSources))
def main(): #-----------Setting up and unsing option parser----------------------- parser=OptionParser(usage= usage, version=version) parser.add_option("-l", "--loglevel", action="store",dest="loglevel",default=2, help="Sets the loglevel (0-3 where 3=full logging)") parser.add_option("-u", "--user", action="store", dest="user", default=None, help="Specify user manually") parser.add_option("-e", "--edb", action="store", dest="edb", default=None, help="Name of target edb") parser.add_option("-t", "--template", action="store",dest="cf",default=None, help="Generate default controlfile") parser.add_option("-f", "--force", action="store_true",dest="force",default=False, help="To start the process without confirming the domain") parser.add_option("-a", "--attributeFilter", action="store",dest="attributeFilter",default=None, help="Set to filter out roads with the specified attribute value, attribute field name is set in controlfile") parser.add_option("-o","--outfile", action="store",dest="outfile",default=None, help="Output road ascii file") parser.add_option("-g","--geofilter", action="store",dest="geoFilter",default=None, help="Filter out roads within polygons with field value matching the geoFilter, specify shapefile in controlfile") parser.add_option("--scaleADT", action="store",dest="ADTScaleFactor",default=None, help="Scale ADT with a factor") (options, args) = parser.parse_args() #--------------------Init logger----------------------- rootLogger = logger.RootLogger(options.loglevel,format="%(message)s") global log log = rootLogger.getLogger(sys.argv[0]) #-----------------Validating options------------------- if options.cf!=None: #Checks if the file already exists, prompt the user if overwrite is wanted, create file controlfile.generateCf(path.abspath(options.cf),controlFileTemplate) log.info("Wrote default controlfile") sys.exit() if len(args)!=1: parser.error("Incorrect number of arguments") if options.edb ==None: parser.error("Need to specify edb using flag -e") if options.user ==None: parser.error("Need to specify user using flag -u") if options.attributeFilter is not None: attributeFilter=options.attributeFilter else: attributeFilter=None domainName=os.environ["AVDBNAME"] dmn = domain.Domain(domainName) if not options.force: answer=raw_input("Chosen dbase is: "+domainName+",continue(y/n)?") if answer=="y": dmn=domain.Domain() else: sys.exit(1) if not dmn.edbExistForUser(options.edb,options.user): log.error("Edb "+options.edb+" does not exist for user "+ options.user+" in domain "+domainName) sys.exit(1) #Creating edb object edb=Edb(dmn.name,options.user,options.edb) #Create edb rsrc object rsrc=Rsrc(edb.rsrcPath()) #Creating a roaddb object roaddb=Roaddb(dmn,options.user,edb.name) #Creating a control-file object (simple parser) cf=controlfile.ControlFile(fileName=path.abspath(args[0]),codec="latin6") #Retrieving data from control file shapeFilePath = cf.findExistingPath("shapeFile:") asciiCodeTable = cf.findString("asciiCodeTable:",optional=True,default="latin6") vehCompTablePath = cf.findExistingPath("vehicleCompositionTable:",optional=True) attributeFilterFieldName=cf.findString("attributeFilterFieldName:",optional=True,default=None) #Loads driver to read shape-files using ogr-library driver = ogr.GetDriverByName('ESRI Shapefile') #If option for geoFilter is used, the polygon defining the boundaries #of the area to be filtered is read from geoFilterShapeFile if options.geoFilter is not None: gfShapeFilePath=cf.findExistingPath("geoFilterShapeFile:") gfFieldName=cf.findString("geoFilterFieldName:") gfShapeFile = driver.Open(str(gfShapeFilePath), update=0) if gfShapeFile is None: log.error("Could not open data source: " +gfShapeFilePath) sys.exit(1) gfLayer = gfShapeFile.GetLayer() log.info("Found %i features in geocode shapefile" %gfLayer.GetFeatureCount()) geoFilterPolys=[] gfFeature = gfLayer.GetNextFeature() while gfFeature: geocode=gfFeature.GetFieldAsString(str(gfFieldName)) if geocode==options.geoFilter: geoFilterPoly=gfFeature.GetGeometryRef() geoFilterPolys.append(geoFilterPoly) gfFeature = gfLayer.GetNextFeature() if len(geoFilterPolys)==0: log.error("Could not find any polygon with field value matching the specified geoFilter in shapeFile: %s, field: %s" %(gfShapeFilePath,gfFieldName)) sys.exit(1) #Creating table for default vehicle composition if vehCompTablePath is not None: vehCompTable=datatable.DataTable() vehCompTable.read(vehCompTablePath) try: #Set columns used as unique row identifiers, raise error if they do not exist vehCompTable.setKeys(["Vehicle","Tatort"]) for fuel in ["bensin","ethanol","diesel","CNG","Totalt"]: vehCompTable.convertCol(fuel,float) except DataTableException,msg: log.error("Could not find column header in vehCompTable:"+msg) sys.exit(1) except ValueError: log.error("Could not find fuel %s among column headers in vehCompTable:" %fuel) sys.exit(1)
def setEdb(self, domainName, userName, edbName): self.edb = Edb(domainName, userName, edbName)
def main(): #-----------Setting up and unsing option parser----------------------- parser=OptionParser(usage= usage, version=version) logger=logging.getLogger("exportToEMEP.py") parser.add_option("-l", "--loglevel", action="store",dest="loglevel",default=2, help="Sets the loglevel (0-3 where 3=full logging)") parser.add_option("-u", "--user", action="store", dest="user", default=None, help="Specify user manually") parser.add_option("-e", "--edb", action="store", dest="edb", default=None, help="Name of target edb") parser.add_option("-t", "--template", action="store",dest="cf",default=None, help="Generate default controlfile") parser.add_option("-f", "--force", action="store_true",dest="force",default=False, help="To start the process without confirming the domain") (options, args) = parser.parse_args() #------------Setting up logging capabilities ----------- rootLogger=logger.RootLogger(int(options.loglevel)) log=rootLogger.getLogger(sys.argv[0]) if options.cf!=None: generateCf(path.abspath(options.cf)) log.info("Wrote default controlfile") sys.exit() if len(args)!=1: parser.error("Incorrect number of arguments") domainName=os.environ["AVDBNAME"] dmn = domain.Domain(domainName) if options.edb ==None: parser.error("Need to specify edb using flag -e") if options.user ==None: parser.error("Need to specify user using flag -u") if not options.force: answer=raw_input("Chosen dbase is: "+domainName+", continue(y/n)?") if answer=="y": dmn=domain.Domain() else: sys.exit("Interrupted by user") if not dmn.edbExistForUser(options.edb,options.user): log.error("Edb "+options.edb+" does not exist for user "+options.user+" in domain "+domainName) sys.exit() edb=Edb(dmn.name,options.user,options.edb) rsrc=rsrc.Rsrc(edb.rsrcPath()) #Opening controlfile cf=controlfile.ControlFile(fileName=path.abspath(args[0])) substances=cf.findStringList("substances:") outputDir=cf.findExistingPath("outputDir:") acIndex=cf.findInt("acIndex:") #Get activity code tree codes=codetable.CodeTable(edb.rsrcPath(),acIndex=acIndex) datadb=datadb.Datadb(dmn,options.user,edb.name) datadb.read() substDict=dmn.listSubstanceIndices() #List grid names gridNames=edb.listGrids() for ac in codes.ac: if ac=="all": continue pdb.set_trace() log.debug("Activity code: "+ac) dataMarker = emepgrid.emepRaster() rasterDict={} substancesWithData=[] #Rereads grid list for each category to not fill the memory with emission grids #Grid data is only read for grids with current ac gridList=[] for gridName in gridNames: grd=Egrid(dmn.name,options.user,edb.name,gridName) grd.readAsc() if grd.hasFuel(): log.warning("Only support for non-fuel grids implemented, no processing done for grid "+gridName) break gridList.append(grd) for subst in substances: log.debug("Substance: "+subst) substInd=substDict[subst] totEmisRast=raster.Raster(Xll=1190000,Yll=6110000,Ncols=720,Nrows=1560,Cellsize=1000,init=0) for grd in gridList: if grd.par["ACTIVITYCODE"].val[0]==ac: if len(grd.substances)==0: grd.readData() totEmisRast=totEmisRast+grd.substances.get(substInd,0) for src in datadb.sources: if src.par["ACTIVITYCODE"].val[0]==ac: row,col=totEmisRast.getIndex(src.par["X1"].val,src.par["Y1"].val) totEmisRast.data[row,col]+=src.getEmis(substInd,rsrc,"ton/year") pdb.set_trace() if not totEmisRast.sum()==0: if subst not in substancesWithData: substancesWithData.append(subst) __RT90__="+proj=tmerc +lat_0=0 +lon_0=15d48\\'29.8\\\" +x_0=1500000 +k_0=1 +ellps=bessel +towgs84=414.1,41.3,603.1,-0.855,2.141,-7.023,0" emepRast = emepgrid.sortToEmep(totEmisRast,__RT90__,printInfo=True) dataMarker.data = numpy.where(emepRast.data > 0, 1, dataMarker.data) rasterDict[subst]=emepRast categoryDirPath = path.join(outputDir, ac) if not path.isdir(categoryDirPath): os.mkdir(categoryDirPath) fileName = path.join(categoryDirPath, "Emep50km_" + subst+ ".asc") emepRast.write(fileName) log.info("Emissions in EMEP-projection for substance: " + subst + "written to outputDir for category: " + ac) if len(rasterDict)>0: #creating substance header in the same order as the substances in the template header = "i\tj\t" #headerList=["SO2","NOx","NH3","NMVOC","CO","TSP","PM10","PM25","Pb ","Cd","Hg","As","Cr","Cu","Ni","Se","Zn","Aldrin","Chlordane","Chlordecone","Dieldrin","Endrin","Heptachlor","Hexabromobiphenyl","Mirex","Toxaphene","HCH","DDT","PCB","DIOX","PAH","HCB","PCP","SCCP"] for s in substancesWithData: header += s + "\t" #remove the tab after the last column and add a newline instead header = header[: - 1]+ "\n" #Creating file for EMEP-data fileName = "CLRTAP_" + ac + ".txt" categoryDirPath = path.join(outputDir, ac) if not path.isdir(categoryDirPath): os.mkdir(categoryDirPath) fid = open(path.join(categoryDirPath, fileName), 'w') fid.writelines(header) #Writing indexes and data for all non-zero elements for row in range(dataMarker.nrows): for col in range(dataMarker.ncols): if dataMarker.data[row, col] > 0: (i, j) = dataMarker.getCentreCoords(row, col) fid.write(str(i) + "\t" + str(j) + "\t") for substWithData in substancesWithData[:-1]: fid.write(str(rasterDict[substWithData].data[row, col]) + "\t") fid.write(str(rasterDict[substancesWithData[-1]].data[row, col]) + "\n") fid.close() log.info("wrote emissions to clrtap-file: " + path.join(categoryDirPath, fileName)) log.info("Finished")
def main(): #setting up parser parser = argparse.ArgumentParser(description=__doc__) utils.add_standard_command_options(parser) parser.add_argument( "-e","--edbs", action="store",dest="edbList", help="List of 'user/edb' pairs separated by :" ) parser.add_argument( "-L","--labels", action="store", dest="labels", help="List of edb labels separated by :" ) parser.add_argument( "-s","--substances", action="store",dest="substances", help="List of substance names separated by :" ) parser.add_argument( "-t","--title", action="store",dest="title", help="Report title" ) parser.add_argument( "-g","--gc-filter", action="store",dest="gcfilter", help="Filter on Geo codes, separated by :" ) parser.add_argument( "-o","--outfile", action="store",dest="outfile", help="Output filename" ) parser.add_argument( "-f","--format", action="store",dest="format", help="Output in 'excel','csv' or 'raw' " + "(Excel-format requires xlwt python module)" ) parser.add_argument("--substMapping", action="store",dest="substMapping", help="File with tab separated mappings of substance names") parser.add_argument("--markerTable", action="store",dest="markerTable", help="Table of codes to be formatted and commented") parser.add_argument( "macro",metavar="MACRO", help="A macro to use" ) args = parser.parse_args() if args.markerTable is not None: keys=["Year","GC","AC","note_1","note_2"] markerTable = DataTable(keys=keys,desc=[{"id":"Year","type":str},{"id":"GC","type":str},{"id":"AC","type":str},{"id":"note_1","type":str},{"id":"note_2","type":str}]) markerTable.read(args.markerTable) else: markerTable=None substMapping={} if args.substMapping is not None: with codecs.open(args.substMapping,encoding="HP Roman8",mode="r") as f: for line in f: oldName,newName = line.split(":") substMapping[oldName.strip()]=newName.strip() dmn = Domain() if args.gcfilter is not None: args.gcfilter = args.gcfilter.split(":") # Read original macro with codecs.open(args.macro, encoding="HP Roman8", mode="r") as f: originalContent = f.read() # Create a tmp copy of the macro, write content from the original macro macroTempFile = tempfile.NamedTemporaryFile( suffix=".sedb", dir=dmn.tmpDir() ) tmpMacro = codecs.open( macroTempFile.name, encoding="HP Roman8",mode="w" ) tmpMacro.write(originalContent) tmpMacro.flush() # Create a ControlFile obj to simplify reading and modifying macro macro = ControlFile(macroTempFile.name, removeComments=False) ebd = macro.findString("edb.edb:") user = macro.findString("edb.user:"******"edb.reportgeocode:")[-1]) acIndex = int(macro.findString("edb.reportactcode:")[-1]) if args.edbList is None: ebds = [[user, edb]] else: edbs = args.edbList.split(":") edbs = [e.split("/") for e in edbs] nedbs = len(edbs) if args.labels is None: labels = ["No label"] * len(edbs) else: labels = args.labels.split(":") if len(labels) != nedbs: log.error("Number of labels specified should match number of edb:s") sys.exit(1) if args.substances is None: log.error("Need to specify substances") sys.exit(1) else: substances = args.substances.split(":") if args.format not in ('excel','csv','raw'): log.error( "Invalid format specifier : %s, should be one of 'excel'" + ", 'csv' or 'raw'" %args.format ) sys.exit(1) elif args.format == "excel": try: import xlwt except: log.error( "trendReport.py requires python module xlwt to write excel-files") sys.exit(1) # first edb # import pdb; pdb.set_trace() edb = Edb(dmn, edbs[0][0], edbs[0][1]) # assume same code definitions in all edbs to be processed, read from first rsrc = edb.rsrc nrsubstances = len(substances) unitIndex = int(macro.findString("UNIT :")) units = rsrc.search[unitIndex] subdb = Subdb(edb) subdb.read() #decode input title using stdin encoding title=args.title.decode(sys.stdin.encoding) rawOutput = "" rawMeta = u"name: %s\nnrmacros: %i\nnrsub: %i\nunit: %s\n" %( title, nedbs, nrsubstances, units) emissions = [] for ind, edbUser in enumerate(edbs): label = labels[ind] userName = edbUser[0] edbName = edbUser[1] macro.setParam("edb.user:"******"edb.edb:", edbName) macro.setParam("USER :"******"EDB :", edbName) rawMeta += "macro.%i.edbuser: %s\n" %(ind, userName) rawMeta += "macro.%i.edbname: %s\n" %(ind, edbName) rawMeta += "macro.%i.desc: %s\n" %(ind, label) for subst in substances: log.info( "User: %s, edb: %s, substance %s" %( userName, edbName, subst) ) substanceIndex = subdb.substIndex(subst) macro.setParam("ELEMENT :", substanceIndex) macro.write() command = "xrepedb -i " + macro.name log.info("Running xrepedb for substance %s" % subst) # import pdb; pdb.set_trace() (returnCode, errMsg, outMsg) = utilities.execute(command) if returnCode != 0: log.error("Could not run %s\nstdout: %s\nstderr:%s" %( command,outMsg,errMsg)) sys.exit(1) if len(outMsg) < 10: log.error("Invalid output from xrepedb: %s" % outMsg) sys.exit(1) rawOutput += "#MACRO %i \"%s\" \"%s\"\n" % (ind, subst, labels[ind]) rawOutput += outMsg lines = outMsg.split("\n")[:-1] for lineInd, line in enumerate(lines): vals = line.split() ac = vals[1].split(".") gc = vals[3].split(".") if len(ac) == 1: if ac[0] == "<all>": acLev1 = "alla" else: acLev1 = ac[0] acLev2 = "alla" else: acLev1 = ac[0] acLev2 = ac[1] if len(gc) == 1: if gc[0] == "<all>": gcLev1 = "alla" else: gcLev1 = gc[0] gcLev2 = "alla" else: gcLev1 = gc[0] gcLev2 = gc[1] emis = float(vals[4]) if acLev1 == "alla": acLev1Name = "alla" acLev2Name = "alla" else: node = rsrc.ac[acIndex - 1].root.find(acLev1) acLev1Name = node.attrib["name"] if acLev2 == "alla": acLev2Name = "alla" else: node = rsrc.ac[acIndex-1].root.find( acLev1 + "/" + acLev2 ) acLev2Name = node.attrib["name"] if gcLev1 == "alla": gcLev1Name = "alla" gcLev2Name = "alla" else: node = rsrc.gc[gcIndex-1].root.find(gcLev1) gcLev1Name = node.attrib["name"] if gcLev2 == "alla": gcLev2Name = "alla" else: node = rsrc.gc[gcIndex - 1].root.find( gcLev1 + "/" + gcLev2 ) gcLev2Name = node.attrib["name"] if args.gcfilter is not None: if gc[0] not in args.gcfilter: # if args.gcfilter != gcLev1: continue emissions.append({"label": label, "substance": subst, "ac": '.'.join(ac), "gc": '.'.join(gc), "gcLev1": gcLev1Name, "gcLev2": gcLev2Name, "acLev1": acLev1Name, "acLev2": acLev2Name, "acLev1Code": acLev1, "acLev2Code": acLev2, "val": emis, "edbIndex": ind}) #Close tempfile to automatically remove it tmpMacro.close() if args.format == "raw": outfile = codecs.open(args.outfile,"w","HP Roman8") outfile.write(rawMeta) outfile.write(rawOutput) outfile.close() elif args.format == "csv": outfile = open(args.outfile,"w") desc = [ {'id': 'gc', 'type': unicode}, {'id': 'ac', 'type': unicode}, {'id': 'label', 'type': unicode}, {'id': 'user', 'type': unicode}, {'id': 'edb', 'type': unicode} ] for subst in substances: desc.append({'id': subst, 'type': float}) keys = ['gc', 'ac', 'label'] table = DataTable(desc=desc, keys=keys) log.info("Adding emissions to csv-table") for emis in emissions: row = [None] * len(desc) user = edbs[emis['edbIndex']][0] edb = edbs[emis['edbIndex']][1] row[table.colIndex['gc']] = emis['gc'] row[table.colIndex['ac']] = emis['ac'] row[table.colIndex['label']] = emis['label'] row[table.colIndex['user']] = user row[table.colIndex['edb']] = edb row[table.colIndex[emis['substance']]] = emis['val'] # data is appended to the correct row, or a new row is added if the # table keys do not match any existing row log.debug( "Adding row for substance %s, gc %s, ac %s" %( emis['substance'], emis['gc'], emis['ac']) ) table.addRow(row, append=True) table.write(outfile) outfile.close() else: # Create style objects for excel output header1Style = xlwt.easyxf( 'font: name Times New Roman,color-index black, bold on', num_format_str='0.000E+00' ) markerStyle1 = xlwt.easyxf( 'font: name Times New Roman,color-index red, bold off, italic on', num_format_str='0.000E+00') markerStyle2 = xlwt.easyxf( 'font: name Times New Roman,color-index orange, bold off, italic on', num_format_str='0.000E+00') normalStyle = xlwt.easyxf( 'font: name Times New Roman,color-index black, bold off', num_format_str='0.000E+00' ) excelBook = xlwt.Workbook() # Creating info sheet infoWs = excelBook.add_sheet("Info") infoWs.col(0).width = 256*20 infoWs.col(1).width = 256*25 infoWs.col(2).width = 256*20 infoWs.col(3).width = 256*200 infoWs.write(0,0,u"Rapportnamn:",header1Style) infoWs.write(0,1,title,header1Style) infoWs.write(1,0,u"Beskrivning av dataunderlaget",header1Style) infoWs.write(3,0,u"Makron (specificerar utsökningar ur databasen)",header1Style) infoWs.write(4,0,u"Etikett",header1Style) infoWs.write(4,1,u"Ägare till EDB",header1Style) infoWs.write(4,2,u"EDB (emissiondatabas)",header1Style) infoWs.write(4,3,u"Beskrivning",header1Style) for i,edbUser in enumerate(edbs): userName=edbUser[0] edbName=edbUser[1] label=labels[i] infoWs.write(5+i,0,label) infoWs.write(5+i,1,userName) infoWs.write(5+i,2,edbName) #reading edb description file (if it exists) edb=Edb(dmn,userName,edbName) infoWs.write(5+i,3,edb.desc().replace("\n"," ")) #split substances in green house gases and air quality related ghgList=[s for s in substances if s in ghgs] aqList=[s for s in substances if s not in ghgs] #Write air quality headers firstRow=4 #Add two rows for marker comments if markerTable is not None: firstRow+=2 if len(aqList)>0: aqWs = excelBook.add_sheet(u"Luftföroreningar") aqWs.col(0).width = 256*25 aqWs.col(1).width = 256*30 aqWs.col(2).width = 256*20 aqWs.col(3).width = 256*15 for col in range(nrsubstances*nedbs): aqWs.col(col+4).width=256*15 aqWs.write(0,0,u"Rapportnamn:",header1Style) aqWs.write(0,1,title,header1Style) aqWs.write(1,0,u"Emissioner av luftföroreningar",header1Style) aqWs.write(1,1,u"Enhet: "+units,header1Style) if markerTable is not None: aqWs.write(2,0,u"OBS! Röd kursiv text anger osäkra värden p.g.a. att en stor del av emissionen är fördelad med schabloner inom kommungruppen. Granska underkategorin \"Energiförsörjning via el-värmeverk samt inom industrin\" för att se eventuella misstänkta värden.",markerStyle1) aqWs.write(3,0,u"OBS! Orange kursiv text anger osäkra värden p.g.a. att trenden varierar kraftigt och eventuellt felaktigt, ytterligare verifiering krävs. Granska underkategorin \"Energiförsörjning via el-värmeverk samt inom industrin\" för att se eventuella misstänkta värden.",markerStyle2) aqWs.write(firstRow,0,"Huvudsektor",header1Style) aqWs.write(firstRow,1,"Undersektor",header1Style) aqWs.write(firstRow,2,u"Län",header1Style) aqWs.write(firstRow,3,"Kommun",header1Style) #Write ghg headers if len(ghgList)>0: ghgWs = excelBook.add_sheet(u"Växthusgaser") ghgWs.col(0).width = 256*25 ghgWs.col(1).width = 256*30 ghgWs.col(2).width = 256*20 ghgWs.col(3).width = 256*15 for col in range(nrsubstances*nedbs): ghgWs.col(col+4).width=256*15 ghgWs.write(0,0,u"Rapportnamn:",header1Style) ghgWs.write(0,1,title,header1Style) ghgWs.write(1,0,u"Emissioner av Växthusgaser",header1Style) ghgWs.write(2,0,u"CO2-ekv. efter ämnesnamn innebär att emissionen är uttryckt i CO2-ekvivalenter",header1Style) if markerTable is not None: ghgWs.write(3,0,u"OBS! Röd kursiv text anger osäkra värden p.g.a. att en stor del av emissionen är fördelad med schabloner inom kommungruppen. Granska underkategorin \"Energiförsörjning via el-värmeverk samt inom industrin\" för att se eventuella misstänkta värden.",markerStyle1) ghgWs.write(4,0,u"OBS! Orange kursiv text anger osäkra värden p.g.a. att trenden varierar kraftigt och eventuellt felaktigt, ytterligare verifiering krävs. Granska underkategorin \"Energiförsörjning via el-värmeverk samt inom industrin\" för att se eventuella misstänkta värden.",markerStyle2) ghgWs.write(1,1,u"Enhet: "+units,header1Style) ghgWs.write(firstRow,0,"Huvudsektor",header1Style) ghgWs.write(firstRow,1,"Undersektor",header1Style) ghgWs.write(firstRow,2,u"Län",header1Style) ghgWs.write(firstRow,3,"Kommun",header1Style) def getColInd(nmacros, substances,macroInd,subst): #gets the column index in excel file sInd=substances.index(subst) #Including extra columns to write CO2-equivalents nSubstWithCO2equivalents=0 for s in substances[:sInd+1]: if s in doubleColumns: nSubstWithCO2equivalents+=1 return 4 + macroInd+sInd*nmacros+nSubstWithCO2equivalents*(macroInd+1) #write macro labels and substance headers for air quality sheet for sInd,subst in enumerate(aqList): for i,edbUser in enumerate(edbs): col=getColInd(nedbs,aqList,i,subst) aqWs.write(firstRow-1,col,labels[i],header1Style) #If a substance name is given in mapping this is used, otherwise #The substance bname from the airviro substance list is used aqWs.write(firstRow,col,substMapping.get(subst,subst),header1Style) #write macro labels and substance headers for ghg sheet for sInd,subst in enumerate(ghgList): for i,edbUser in enumerate(edbs): col=getColInd(nedbs,ghgList,i,subst) #If CO2-equivalents are calculated, an extra column is needed if subst in doubleColumns: ghgWs.write(firstRow-1,col-1,labels[i],header1Style) ghgWs.write(firstRow-1,col,labels[i],header1Style) #If CO2-equivalents are calculated, an extra column is needed if subst in doubleColumns: #debug statement #print "writing subst %s in col %i and %i" %(subst,col-1,col) ghgWs.write(firstRow,col-1,substMapping.get(subst,subst),header1Style) ghgWs.write(firstRow,col,substMapping.get(subst,subst)+"CO2-ekv.",header1Style) elif subst in storedAsCO2equivalents: #debug statement #print "writing subst %s in col %i" %(subst,col) ghgWs.write(firstRow,col,substMapping.get(subst,subst)+"CO2-ekv.",header1Style) else: #debug statement #print "writing subst %s in col %i" %(subst,col) ghgWs.write(firstRow,col,substMapping.get(subst,subst),header1Style) #looping over all emissions, writing them to the correct column and row ghgRow=[] aqRow=[] for m in range(nedbs*nrsubstances+4+3*nedbs): ghgRow.append(firstRow+1) for m in range(nedbs*nrsubstances+4): aqRow.append(firstRow+1) for emis in emissions: subst = emis["substance"] emisVal=emis["val"] edbInd=emis["edbIndex"] #Check if gc, ac and year can be found in the error list #debugging marker style if markerTable is not None: TableRowInd=markerTable.rowIndices([labels[edbInd], emis["gc"], emis["ac"], "ja","*"]) if len(TableRowInd) >0: valueStyle=markerStyle1 else: TableRowInd=markerTable.rowIndices([labels[edbInd], emis["gc"], emis["ac"], "*","ja"]) if len(TableRowInd)>0: valueStyle=markerStyle2 else: valueStyle=normalStyle else: valueStyle=normalStyle if subst in ghgList: col=getColInd(nedbs,ghgList,edbInd,subst) row=ghgRow[col] if ghgRow[0]<=+row: ghgWs.write(row,0,emis["acLev1"],normalStyle) ghgWs.write(row,1,emis["acLev2"],normalStyle) ghgWs.write(row,2,emis["gcLev1"],normalStyle) ghgWs.write(row,3,emis["gcLev2"],normalStyle) ghgRow[0]+=1 #converts the emission to CO2-ekquivalents if subst in doubleColumns: ghgWs.write(row,col-1,float(emisVal),valueStyle) ghgWs.write(row,col,float(emisVal)*float(ekvFactors[subst]),valueStyle) else: ghgWs.write(row,col,float(emisVal),valueStyle) ghgRow[col]+=1 else: col=getColInd(nedbs,aqList,edbInd,subst) row=aqRow[col] if aqRow[0]<=+row: aqWs.write(row,0,emis["acLev1"],normalStyle) aqWs.write(row,1,emis["acLev2"],normalStyle) aqWs.write(row,2,emis["gcLev1"],normalStyle) aqWs.write(row,3,emis["gcLev2"],normalStyle) aqRow[0]+=1 aqWs.write(row,col,float(emisVal),valueStyle) aqRow[col]+=1 excelBook.save(args.outfile) log.info("Finished!")
def main(): #-----------Setting up and unsing option parser----------------------- parser=OptionParser(usage= usage, version=version) parser.add_option("-u",'--user', action="store",dest="user", help="Name of target edb user") parser.add_option("-e","--edb", action="store",dest="edb", help="Name of target edb") parser.add_option("-v","--viewports", action="store",dest="viewports", help="Comma-separated list of area id's to be cut out, default is all") parser.add_option("-y","--year", action="store",dest="year", help="Cut out for given year") parser.add_option("-l", "--loglevel", action="store",dest="loglevel",default=2, help="Sets the loglevel (0-3 where 3=full logging)") parser.add_option("-s","--suffix", action="store",dest="suffix",default="v1", help="Sets suffix to names of generated edb's to support version management, default is 'v1'") (options, args) = parser.parse_args() #--------------------Init logger----------------------- rootLogger = logger.RootLogger(level=options.loglevel) global log log = rootLogger.getLogger(sys.argv[0]) #-----------------Validating options------------------- if options.user is None: log.error("Need to specify -u <user>") return 1 if options.edb is None: log.error("Need to specify -e <edb>") return 1 if options.year is None: log.error("Need to specify -y <year>") return 1 if len(options.suffix)>4: log.error("Limit your suffix length to 4 characters") return 1 if len(options.year)!=4: log.error("Year should be given with four digits") return 1 dmn=Domain() viewports=[] if options.viewports is not None: viewportIds=options.viewports.split(",") else: viewportIds=dmn.listViewports() for vpId in viewportIds: vp=ViewPort() vp.read(path.join(dmn.wndPath(),"modell.par"),vpId) viewports.append(vp) edb=Edb(dmn,options.user,options.edb) log.info("Reading sourcedb...") sourcedb=Sourcedb(edb) sourcedb.read() log.info("Reading emfacdb...") emfacdb=Emfacdb(edb) emfacdb.read() log.info("Reading subdb...") subdb=Subdb(edb) subdb.read() edbDotRsrc=edb.rsrcPath() for vpInd,vp in enumerate(viewports): targetEdbName=vp.code+"_"+options.year+"_"+options.suffix tEdb=Edb(dmn,options.user,targetEdbName) if tEdb.exists(): log.info("Edb %s already exists, remove first to update" %targetEdbName) continue tEdb.create(edbRsrc=edbDotRsrc) log.info("Created empty edb %s" %targetEdbName) subdb.setEdb(tEdb) subdb.write() log.info("Wrote searchkeys") emfacdb.setEdb(tEdb) emfacdb.write() log.info("Wrote emfacdb") tSourcedb=Sourcedb(tEdb) log.info("Cutting out viewport %s (%i/%i)" %(vp.code,vpInd+1,len(viewports))) for srcInd,src in enumerate(sourcedb.sources): if includeShip(src,vp.code,src["Y1"],options.year): log.debug("Ship %i/%i included in %s" %(srcInd+1,len(sourcedb.sources),tEdb.name)) tSourcedb.sources.append(src) tSourcedb.write() log.info("Wrote exatracted sources to %s" %tEdb.name) tEdb.setDesc("This edb has been extracted from %s under user %s, " %(edb.name,edb.user)+ "and includes all ships that have visited the map area %s (%s) during %s\n" %(vp.code,vp.name,options.year)) log.info("Finished!") return 0
def main(): #-----------Setting up and unsing option parser----------------------- parser=OptionParser(usage= usage, version=version) parser.add_option("-l", "--loglevel", action="store",dest="loglevel",default=2, help="Sets the loglevel (0-3 where 3=full logging)") parser.add_option("-u", "--user", action="store", dest="user", default=None, help="Specify user manually") parser.add_option("-e", "--edb", action="store", dest="edb", default=None, help="Name of target edb") parser.add_option("-t", "--template", action="store",dest="cf",default=None, help="Generate default controlfile") parser.add_option("-f", "--force", action="store_true",dest="force",default=False, help="To start the process without confirming the domain") (options, args) = parser.parse_args() # Setup logging logging.configure(terminal_level=logging.DEBUG) log = logging.getLogger(__name__) if options.cf!=None: generateCf(path.abspath(options.cf)) log.info("Wrote default controlfile") sys.exit() if len(args)!=1: parser.error("Incorrect number of arguments") if options.edb ==None: parser.error("Need to specify edb using flag -e") if options.user ==None: parser.error("Need to specify user using flag -u") dmn = Domain() if not options.force: answer=raw_input("Chosen dbase is: "+dmn.name+",continue(y/n)?") if answer!="y": sys.exit("Interrupted by user") if not dmn.edbExistForUser(options.edb,options.user): log.error("Edb "+options.edb+" does not exist for user "+ options.user+" in domain "+dmn.name) sys.exit() #---Creating edb and rsrc objects------------------ edb=Edb(dmn,options.user,options.edb) rsrc=Rsrc(edb.rsrcPath()) #Opening controlfile #---retrieving data from control file---- cf=ControlFile(fileName=path.abspath(args[0])) substances=cf.findStringList("substances:") outputDir=cf.findExistingPath("outputDir:") acIndex=cf.findInt("acIndex:") macroFileName=path.abspath(cf.findExistingPath("xrepedbMacro:")) fromProj=cf.findString("fromProj:") toProj=cf.findString("toProj:") try: fromProj=transcoord.proj4Dict[fromProj] except KeyError: log.error("Projection %s not found in proj4Dictin transCoord.py" %fromProj) try: toProj=transcoord.proj4Dict[toProj] except KeyError: log.error("Projection %s not found in proj4Dictin transCoord.py" %toProj) formats = cf.findStringList("formats:") units = cf.findString("units:") writeGrids=cf.findBoolean("writeGrids:",optional=True,default=True) edb_xll=cf.findInt("edb_xll:") edb_yll=cf.findInt("edb_yll:") edb_ncols=cf.findInt("edb_ncols:") edb_nrows=cf.findInt("edb_nrows:") edb_cellsize=cf.findFloat("edb_cellsize:") if fromProj!=toProj: out_xll=cf.findFloat("out_xll:") out_yll=cf.findFloat("out_yll:") out_ncols=cf.findInt("out_ncols:") out_nrows=cf.findInt("out_nrows:") out_cellsize=cf.findFloat("out_cellsize:") #----------------------------------------- #Finds index to search units unitIndex=None for key,unit in rsrc.search.iteritems(): if isinstance(key,int): if rsrc.search[key]==units: unitIndex=key break if unitIndex is None: log.error("Search units: %s not defined in edb.rsrc" %units) sys.exit() macro = ControlFile(fileName=macroFileName,removeComments=False) #preparing export macro macro.setParam("general.database:",dmn.name) xmin=edb_xll xmax=edb_xll+edb_ncols*edb_cellsize ymin=edb_yll ymax=edb_yll+edb_nrows*edb_cellsize macro.setParam("edb.mapopt.bounds:", "%i %i %i %i" %(xmin, xmax, ymin, ymax)) macro.setParam("edb.user:"******"edb.edb:",edb.name) macro.setParam("REGION :","%i %i %i %i" %(xmin, xmax, ymin, ymax)) macro.setParam("USER :"******"EDB :",edb.name) macro.setParam("GRID :", "%i %i %i %i %i %i" %(edb_xll,edb_yll,edb_ncols,edb_nrows,edb_cellsize,edb_cellsize)) macro.setParam("edb.unit:",unitIndex) macro.setParam("UNIT :",unitIndex) # macro.setParam("NOACTCODE :",acIndex) macro.setParam("NOACTCODE :",len(rsrc.ac)) #Get activity code tree acTree=codeemistree.CodeEmisTree("Activity codes",units=units) acTree.readActivityCodes(rsrc.path,acIndex) substDict=dmn.listSubstanceIndices() edbRast = Raster(Xll=edb_xll,Yll=edb_yll,Ncols=edb_ncols, Nrows=edb_nrows,Cellsize=edb_cellsize, Nodata=-9999,init=0) if fromProj!=toProj: outRastTemplate = Raster(Xll=out_xll,Yll=out_yll,Ncols=out_ncols, Nrows=out_nrows,Cellsize=out_cellsize, Nodata=-9999) else: outRastTemplate=Raster() outRastTemplate.assign(edbRast) for node in acTree.root.getiterator(): if node.tag=="root" or node.tag=="Record": continue ac=node.tag log.debug("Activity code: "+ac) #Finds row index for activity codes in macro #Add a row with the current ac #If there are many ac already given, these are #replaced by the current ac macroLines=macro.content.split("\n") actCodeInd=None geoCodeInd=None for lineInd,line in enumerate(macroLines): if "NOACTCODE" in line: actCodeInd=lineInd if "NOGEOCODE" in line: geoCodeInd=lineInd if len(ac.split('.')) >= rsrc.ac[acIndex-1].depth: macroLines=macroLines[:actCodeInd+1]+["none"]*(acIndex-1)+[ac]+["none"]*(len(rsrc.ac)-acIndex)+macroLines[geoCodeInd:] else: macroLines=macroLines[:actCodeInd+1]+["none"]*(acIndex-1)+[ac+'.']+["none"]*(len(rsrc.ac)-acIndex)+macroLines[geoCodeInd:] macro.content="\n".join(macroLines) macro.write() #boolean raster marking where there is data for any of the substances if 'CLRTAP' in formats: dataMarker = Raster(Xll=out_xll,Yll=out_yll,Ncols=out_ncols, Nrows=out_nrows,Cellsize=out_cellsize, Nodata=-9999,init=0) rasterDict={} substancesWithData=[] for substance in substances: log.debug("Substance %s" %substance) substanceIndex=substDict[substance] macro.setParam("ELEMENT :",substanceIndex) macro.write() command="xrepedb -i "+macro.name (returnCode,errMsg,outMsg)=utilities.execute(command) tmp=outMsg.split()[10:-2] tmp.sort() if tmp[0] == '0.000000E+00' and tmp[-1] == '0.000000E+00': print "ERROR: The field for "+substance+" is empty!" continue # pdb.set_trace() emisRast=string2rast(outMsg,edbRast) emisSum=emisRast.sum() outRast=Raster() outRast.assign(outRastTemplate) rec=ET.SubElement(node,"Record") rec.attrib["substance"]=substance rec.attrib["emission"]=str(emisSum) if emisSum>0 and writeGrids: if substance not in substancesWithData: substancesWithData.append(substance) if fromProj!=toProj: exportRast = transcoord.transformEmisRaster(emisRast,outRast,fromProj,toProj,tmpDir=dmn.tmpDir()) else: exportRast=emisRast if 'CLRTAP' in formats: dataMarker.data = numpy.where(exportRast.data > 0, 1, dataMarker.data) rasterDict[substance]=exportRast categoryDirPath = path.join(outputDir, ac) if not path.isdir(categoryDirPath): os.mkdir(categoryDirPath) if 'ESRI Ascii grid' in formats: fileName = path.join(categoryDirPath, substance+ ".asc") exportRast.write(fileName) log.debug("Grid for " + substance + "written to outputDir for category: " + ac) summaryTable=acTree.createTable(writeAll=True) summaryTable.sortRows() tableFile=open(path.join(outputDir,"summaryTable.txt"),'w') summaryTable.write(tableFile) if len(rasterDict)>0 and 'CLRTAP' in formats: #creating substance header in the same order as the substances in the template header = "i\tj\t" #headerList=["SO2","NOx","NH3","NMVOC","CO","TSP","PM10","PM25","Pb ","Cd","Hg","As","Cr","Cu","Ni","Se","Zn","Aldrin","Chlordane","Chlordecone","Dieldrin","Endrin","Heptachlor","Hexabromobiphenyl","Mirex","Toxaphene","HCH","DDT","PCB","DIOX","PAH","HCB","PCP","SCCP"] for s in substancesWithData: header += s + "\t" #remove the tab after the last column and add a newline instead header = header[: - 1]+ "\n" #Creating file for EMEP-data fileName = "CLRTAP_" + ac + ".txt" categoryDirPath = path.join(outputDir, ac) if not path.isdir(categoryDirPath): os.mkdir(categoryDirPath) fid = open(path.join(categoryDirPath, fileName), 'w') fid.writelines(header) sum=0 #Writing indexes and data for all non-zero elements for row in range(dataMarker.nrows): for col in range(dataMarker.ncols): if dataMarker.data[row, col] > 0: (i, j) = dataMarker.getCentreCoords(row, col) fid.write(str(i) + "\t" + str(j) + "\t") for substWithData in substancesWithData[:-1]: fid.write(str(rasterDict[substWithData].data[row, col]) + "\t") sum+=rasterDict[substWithData].data[row, col] fid.write(str(rasterDict[substancesWithData[-1]].data[row, col]) + "\n") sum+=rasterDict[substancesWithData[-1]].data[row, col] fid.close() log.info("wrote emissions to clrtap-file: " + path.join(categoryDirPath, fileName)) log.info("Finished")
def main(): actionList = [ 'addSubstance', 'removeSubstance', 'mapSubstances', 'addCodeTree', 'removeCodeTree', 'moveCodeTree', 'extractByGeocode', 'uniteGrids', 'scale' ] # setting up parser parser = argparse.ArgumentParser( description=__doc__, formatter_class=argparse.ArgumentDefaultsHelpFormatter ) utils.add_standard_command_options(parser) parser.add_argument("-d", "--doc", action="store_true", dest="doc", help="Prints more detailed documentation and exit") parser.add_argument("-u", "--user", action="store", dest="user", default=None, help="Specify user manually") parser.add_argument("-e", "--edb", action="store", dest="edb", default=None, help="Name of target edb") parser.add_argument("-s", "--substance", action="store", dest="substance", help="Name of substance to add or remove") parser.add_argument("--substanceFactorFile", action="store", dest="substanceFactorFile", help="File with substance factors") parser.add_argument("--substanceMapping", action="store", dest="substanceMapping", help="File with mapping for substance indices") parser.add_argument("--codeMapping", action="store", dest="codeMapping", help="File with mapping from old codes to new") parser.add_argument("--edbMapping", action="store", dest="edbMapping", help="File with mapping from gc to edb") parser.add_argument("-f", "--force", action="store_true", dest="force", default=False, help="Start the process without confirming the domain") parser.add_argument("--factor", action="store", type=float, dest="factor", help="Factor used to scale emissions") parser.add_argument( "-n", "--names", action="store", dest="sourceNames", default=['.*'], nargs='*', help="Space separated list of regular expressions" ) parser.add_argument( "--sourceTypes", action="store", dest="sourceTypes", nargs='*', default=['griddb', 'sources'], choices=["griddb", "sources", "subgrpdb", "svehdb", "vehdb"], help="Source types to process" ) parser.add_argument( "--codeType", action="store", dest="codeType", default='ac', help="Code type to process" ) parser.add_argument( "--codeIndex", action="store", dest="codeIndex", default=1, type=int, help="Index of code tree to use in processing" ) parser.add_argument( "--newIndex", action="store", dest="newIndex", default=None, help="New index when moving an already defined code tree" ) parser.add_argument( "--codeLevel", action="store", dest="codeLevel", default=1, type=int, help="Level of code tree to use in processing") parser.add_argument( "--actions", action="store", dest="actions", nargs='*', choices=actionList, help="Action list" ) args = parser.parse_args() if args.doc: print doc sys.exit() # if len(args) > 0: # log.error("Incorrect number of arguments") # sys.exit(1) if args.edb is None: log.error("Need to specify edb") sys.exit(1) if args.user is None: log.error("Need to specify user") sys.exit(1) dmn = Domain() edb = Edb(dmn, args.user, args.edb) subdb = Subdb(edb) subdb.read() rsrc = Rsrc(edb.rsrcPath()) # Check if edb exist if not edb.exists(): log.error("Edb " + args.edb + " does not exist for user " + args.user + " in domain " + dmn.name) sys.exit(1) if args.codeType.lower() == "ac": codeType = "ACTIVITYCODE" elif args.codeType.lower() == "gc": codeType = "GEOCODE" else: parser.error("codeType should be either 'gc' or 'ac'") if args.codeIndex < 1: raise ValueError("Minimum code index is 1") if args.codeIndex > 1 and codeType == "gc": raise OSError("Multiple geocode trees not implemented") for a in args.actions: if a not in actionList: parser.error("Unknown action %s" % a) log.info("User: "******"Edb: " + args.edb) loadGridData = False # Set values in argsDict argDict = { "edb": edb, "user": args.user, "domain": dmn, "grids": [], "filters": args.sourceNames, "codeType": codeType, "codeIndex": args.codeIndex, "codeLevel": args.codeLevel, "sources": None, "griddb": None, "subgrpdb": None, "roaddb": None, "vehdb": None, "svehdb": None, "rsrc": rsrc } # ----- Reading sources from edb ------------------------------- if 'griddb' in args.sourceTypes: grids = [] gridNames = edb.listGrids() for name in gridNames: grd = egrid.Egrid(edb, name) grd.readAsc() grids.append(grd) argDict["grids"] = grids if 'sources' in args.sourceTypes: argDict["sources"] = list(ModelReader(SourceStream(edb, mode='r'))) if 'roaddb' in args.sourceTypes: log.error("Not implemented with new roaddb-structure") sys.exit(1) if 'subgrpdb' in args.sourceTypes: subgrpdb = Subgrpdb(edb) subgrpdb.read() argDict["subgrpdb"] = subgrpdb # emfacdbdb = Emfacdb(edb) # emfacdb.read() # argDict["subgrpdb"] = subgrpdb if 'svehdb' in args.sourceTypes: svehdb = Svehdb(edb) svehdb.read() argDict["svehdb"] = svehdb # TODO: add option for vehdb # parse additional args # -----addSubstance-------------------------------------- if "addSubstance" in args.actions: if args.substance is None: parser.error("Action addSubstance needs" + "--substance to be specified") if args.substanceFactorFile is None: parser.error("Option addSubstance also needs" + "--substanceFactorFile to be specified") if args.substance not in subdb.substIndices: raise ValueError( "Substance %s not in substance list" % args.substance) substanceNameFactorDict = parseMapping( path.abspath(args.substanceFactorFile), valueType=float) substanceFactorDict = {} # Converts mapping between substance name and factor to # mapping between substance index and factor for name, factor in substanceNameFactorDict.iteritems(): if name not in subdb.substIndices: raise KeyError( "Substance : %s not found in substance list" % name ) ind = subdb.substIndices[name] substanceFactorDict[ind] = factor argDict["substanceFactorDict"] = substanceFactorDict argDict["substance"] = subdb.substIndices[args.substance] # ----removeSubstance------------------------------------ if "removeSubstance" in args.actions: if args.substance is None: parser.error("Action removeSubstance needs" + "--substance to be specified") if args.substance not in subdb.substIndices: log.error("Substance %s not in substance list" % args.substance) sys.exit(1) argDict["substance"] = subdb.substIndices[args.substance] # ----mapSubstances-------------------------------------- if "mapSubstances" in args.actions: if args.substanceMapping is None: parser.error("Action mapSubstances needs" + "--substanceMapping to be specified") substanceMappingFile = path.abspath(args.substanceMapping) substanceMapping = parseMapping( substanceMappingFile, keyType=int, valueType=int ) argDict["substanceMapping"] = substanceMapping # ----addCodeTree----------------------------------------- if "addCodeTree" in args.actions: if args.codeMapping is None: parser.error("Action addCodeTree needs" + "--codeMapping to be specified") if args.newIndex is None: parser.error("Action addCodeTree needs" + "--newIndex to be specified") codeMappingFile = path.abspath(args.codeMapping) codeMapping = parseMapping(codeMappingFile) argDict["codeMapping"] = codeMapping argDict["newIndex"] = int(args.newIndex) # ----removeCodeTree------------------------------------- # Only arguments with default values needed, no validation is needed # ----moveCodeTree--------------------------------------- if "moveCodeTree" in args.actions: if args.newIndex is None: parser.error("Action moveCodeTree needs" + "--newIndex to be specified") try: argDict["newIndex"] = int(args.newIndex) except ValueError: log.error("newIndex must be an integer value") sys.exit(1) # ----extractByGeocode----------------------------------- if "extractByGeocode" in args.actions: if args.edbMapping is None: parser.error("Action extractByGeocode needs" + "--edbMapping to be specified") log.info("Slicing edb by geocode") edbMappingFile = path.abspath(args.edbMapping) # Geocode has to be int edbMapping = parseMapping(edbMappingFile, keyType=int) argDict["edbMapping"] = edbMapping if 'scale' in args.actions: argDict['substance'] = subdb.substIndices[args.substance] argDict['factor'] = args.factor if args.factor is None: log.error("Must specify --factor") sys.exit(1) if args.substance is None: log.error("Must specify --substance") sys.exit(1) # Processing actions for action in args.actions: log.info("Running action: %s" % action) argDict = eval(action)(argDict) # Writes each processed grid to edb if loadGridData: for grd in argDict["grids"]: if not match(grd["NAME"], argDict["filters"]) or grd.hasSubgrp(): continue else: log.debug( "Wrote grid %s to edb: %s, user: %s" % ( grd.par["NAME"].val, grd.edb.name, grd.user) ) grd.load() else: for grd in argDict["grids"]: if not match(grd["NAME"], argDict["filters"]) or grd.hasSubgrp(): continue else: log.debug( "Wrote .asc-file for grid %s to edb: %s, user: %s" % ( grd.par["NAME"].val, grd.edb.name, grd.user) ) grd.writeAsc(grd.getAscPath()) # Writes each processed database to edb if argDict["sources"] is not None: with SourceStream(edb, mode="w") as stream: model_writer = ModelWriter(stream) for src in argDict["sources"]: log.debug('Wrote source %s' % src.NAME) model_writer.write(src) for dbName in ["roaddb", "subgrpdb", "svehdb", "vehdb"]: if argDict[dbName] is not None: argDict[dbName].write() log.info( "Wrote %s to edb: %s, user: %s" % ( dbName, edb.name, edb.user) )
def extractByGeocode(argDict): """Extracts sources by geocode and saves to edb's for other users specified in a file with users, and optionally edb's, specified for each geocode""" edb = argDict["edb"] mapping = argDict["edbMapping"] codeLevel = argDict["codeLevel"] codeIndex = argDict["codeIndex"] # Processing grid sources for grd in argDict["grids"]: if not match(grd.par["NAME"].val, argDict["filters"]): continue # dict with (user,edb) as key and a list of gc to be included as value subEdbs = {} allFound = True for gc in mapping: # The mapping can contain only users for each geocode, in that case # The users are stored as values and the original edb name is used # as default # If both user and edb name is given, the values are given in a # list for each gc if isinstance(mapping[gc], list): tmp_user = mapping[gc][0] tmp_edb = mapping[gc][1] else: tmp_user = mapping[gc] tmp_edb = edb.name subEdb = Edb(argDict["domain"], tmp_user, tmp_edb) if (tmp_user, tmp_edb) in subEdbs: subEdbs[(tmp_user, tmp_edb)].append(gc) else: subEdbs[(tmp_user, tmp_edb)] = [gc] # Check if edb exist, and if so if grid exist # (to save time when updating) if subEdb.exists(): if grd.name not in subEdb.listGrids(): allFound = False continue else: allFound = False if allFound: log.info( "Splitted grid " + "%s already exist for all" % grd.name + " geocodes, remove to update") continue log.debug("Reading grid: %s" % grd.name) grd.readData() for user_edb, gcList in subEdbs.iteritems(): subEdb = Edb(argDict["domain"], user_edb[0], user_edb[1]) subEdb.rsrc = edb.rsrc # extract data from grid that maches any of the geocodes in gcList subGrid = grd.sliceByGeoCode( codeLevel - 1, map(int, gcList), subEdb ) if subGrid is not None: if not subEdb.exists(): subEdb.create(edbRsrc=edb.rsrcPath()) log.debug("Created edb for user %s" % subEdb.user) log.debug( "Writing grid: %s to edb %s for user %s" % ( grd.name, subEdb.name, subEdb.user) ) subGrid.load() if argDict["sources"] is not None: sub_source_writers = {} # subCompanydbs = {} # subFacilitydbs = {} # Read substance groups subgrpdb = Subgrpdb(edb) subgrpdb.read() facilitydb = Facilitydb(edb) facilitydb.read() companydb = Companydb(edb) companydb.read() for src in argDict["sources"]: if not match(src.NAME, argDict["filters"]): continue # gc = int(src.GEOCODE[codeIndex - 1][codeLevel - 1]) gc = int(src.GEOCODE[codeIndex - 1].split(".")[codeLevel - 1]) if src.PX is not None and src.PX != 0: facility = facilitydb.index[(src.PX, src.PY)] company = companydb.index[facility["COMPANY"]] # If no mapping is provided for a gc it is ignored if gc not in mapping: continue # create sub-edb and corresponding db:s if gc not in sub_source_writers: tmp_user = mapping[gc] subEdb = Edb(argDict["domain"], tmp_user, edb.name) # If a sub-edb does not exist, # create a new one with edb.rsrc from original edb if not subEdb.exists(): subEdb.create(edbRsrc=edb.rsrcPath()) sub_source_writers[gc] = ModelWriter( SourceStream(subEdb, mode='w') ) # subCompanydbs[gc] = Companydb(subEdb) # subFacilitydbs[gc] = Facilitydb(subEdb) # Write substance groups to subEdb subgrpdb.setEdb(subEdb) log.info("Writing subgrpdb for user %s" % subEdb.user) subgrpdb.write() # subCompanydbs[gc].append(company, force=True) # subFacilitydbs[gc].append(facility,force=True) sub_source_writers[gc].write(src) # write all sources for gc in sub_source_writers: # log.info("Writing companies to edb for gc %i" % gc) # subCompanydbs[gc].write() # log.info("Writing facilities to edb for gc %i" % gc) # subFacilitydbs[gc].write() log.info("Writing sources to edb for gc %i" % gc) return argDict
def main(): # Parse command line arguments parser = argparse.ArgumentParser(description=__doc__) utils.add_standard_command_options(parser) parser.add_argument("controlfile", metavar='CONTROLFILE', action="store", help="Controlfile for topdown processing") parser.add_argument("-t", "--template", metavar='TEMPLATEFILE', action="store",dest="cf",default=None, help="Generate default controlfile") args = parser.parse_args() if args.cf is not None: generateCf(args.cf) log.info("Wrote default controlfile") sys.exit(0) log.info("Starting topdown processing") # Opening controlfile cf = ControlFile(args.controlfile) dmn = Domain() log.info("Reading topdown table") tdTableName = cf.findExistingPath("topDownTable:") tdTable = DataTable() tdTable.keys.append("Code") tdTable.read(tdTableName,delimiter=";") log.info("Reading national totals table") natTotalTableName = cf.findExistingPath("nationalTotalTable:") natTable = DataTable(desc=[{"id": "Code", "type":unicode}, {"id": "description", "type":unicode}]) natTable.keys.append("Code") natTable.read(natTotalTableName, units=True, defaultType=str) notationKeys = ["NE", "NO", "NA", "IE"] log.debug("Remove notation keys from national totals table") for row in natTable.data: for i in range(len(row)): if row[i] in notationKeys: row[i] = None log.debug("Convert all emission columns in national totals to float") for colId in natTable.listIds(): if colId not in ["Code","description"]: natTable.convertCol(colId,float) log.debug("Store units from national totals for each substance in dict") natUnits={} for col in natTable.desc: if col.get("units",None)!=None: natUnits[col["id"]]=col["units"] log.debug("Read remaining data from control file") bottomupEdbName = cf.findString("bottomUpEdb:") topDownEdbName = cf.findString("topDownEdb:") emissionsEdbName = cf.findString("emissionsEdb:") userName = cf.findString("user:"******"year:") #initialize edb objects buEdb = Edb(dmn,userName,bottomupEdbName) tdEdb = Edb(dmn,userName,topDownEdbName) eEdb = Edb(dmn,userName,emissionsEdbName) log.info("Reading/preparing EDB:s") log.info("Reading subdb") subdb = Subdb(eEdb) subdb.read() log.info("Reading subgrpdb") subgrpdb = SubgrpStream(buEdb) subgrpdb.read() log.info("Reading facilitydb") facilityIn = FacilityStream(buEdb) log.info("Reading companydb") companyIn = CompanyStream(buEdb) facilityOut = FacilityStream(eEdb,mode="w") companyOut = CompanyStream(eEdb,mode="w") log.info("Writing company db to result edb") companyOut.write(companyIn.read()) log.info("Writing facility db to result edb") facilityOut.write(facilityIn.read()) if not buEdb.exists(): log.error("Edb " + buEdb.name + " does not exist for user " + userName + " in domain " + dmn.name) sys.exit(1) if not tdEdb.exists(): log.error("Edb " + tdEdb.name + " does not exist for user " + userName + " in domain " + dmn.name) sys.exit(1) if not eEdb.exists(): log.error("Edb " + eEdb.name + " does not exist for user " + userName + " in domain " + dmn.name) sys.exit(1) keys = tdEdb.listGrids() msg = "%i keys found in edb: %s" % (len(keys), tdEdb.name) log.info(msg) # sourcedb from bottom-up edb with SourceStream(buEdb, mode='rb') as source_instream: source_reader = ModelReader(source_instream) bu_sources = list(source_reader) log.info( "%i point sources found in edb: %s" % ( len(bu_sources), buEdb.name) ) # Empty sourcedb of the result edb if cf.findBoolean("emptyEmissionSourcedb:"): eEdb.empty_sourcedb() e_sources = [] log.info("Removed point sources from edb: %s" % (eEdb.name)) else: # sourcedb from emission edb (result edb) with SourceStream(eEdb, mode='rb') as source_instream: source_reader = ModelReader(source_instream) e_sources = list(source_reader) msg = "%i point sources found in edb: %s" % (len(e_sources), eEdb.name) log.info(msg) if not path.exists(eEdb.rsrcPath()): log.error("No edb.rsrc exists for emission edb") sys.exit() else: rsrc = Rsrc(eEdb.rsrcPath()) acIndex = cf.findInt("acIndex:") codeDepth = rsrc.ac[acIndex-1].depth substances = cf.findStringList("substances:") for subst in substances: if subst not in subdb.substIndices: log.error("Substance: " + subst + " not in Airviro substance list") sys.exit() # Initialize trace for debug and additional logging if cf.findBoolean("trace:") == True: log.info("Initializing trace for detailed logging") trace = TraceDef( active=True, substances=cf.findStringList("trace.substances:"), logfile=cf.findString("trace.logfile:"), regdefgc=cf.findIntList("trace.regdef.gc:", optional=True, default=None), gcDefRaster=cf.findExistingPath("trace.gcraster:") ) else: trace = TraceDef(active=False) log.info("Initializing result table") resTablePath = cf.findString("resTable:") resTable = DataTable(desc=[{"id": "Code", "type": unicode}]) resTable.keys.append("Code") for subst in substances: resTable.addCol({"id": subst, "type": float, "unit": "%"}) # Create emission grid template (with geocodes) log.info("Reading emission grid template") eGridTemplatePath = cf.findExistingPath("emisGridTemplatePath:") eGridTemplate = Egrid(eEdb,"name") if eGridTemplatePath[-4:] == ".asc": eGridTemplatePath=eGridTemplatePath[:-4] eGridTemplate.readData(eGridTemplatePath) eGridTemplate.substances = {} eGridTemplate.par["SUBSTANCE"].val = [] dd = {"key": None, "regstat": None, "regdef": None, "bu_sources": bu_sources, "psIndices": [], "units": natUnits, "rsrc": rsrc, "subdb": subdb, "trace": trace, "subgrpdb": subgrpdb } # Process all rows in the topdown table for row in tdTable.data: code = row[tdTable.colIndex["Code"]] active = row[tdTable.colIndex["Active"]] statType = row[tdTable.colIndex["Stat_type"]] if active == "no": continue log.info("Code: "+code) distributed=False # Add '-' to the code to reach max length (fix for a GUI bug) airviroCode = code # while len(airviroCode.split(".")) < codeDepth: # airviroCode += ".-" tdrow = tdTable.data[tdTable.rowIndex([code])] nrow = natTable.data[natTable.rowIndex([code])] # Create a resTable row to fill with data resrow = [None] * resTable.ncols resrow[0] = code # Check if national totals are non-zero nonZero = False for val in nrow: if val != None: if val > 0: nonZero = True break # Filter out indices for pointsources with the current ac # Also including sources coded with sub-codes # This allows to estimate top-down emissions on a higher code-level psIndices = [] for i, ps in enumerate(bu_sources): codeMatch = False for emis in ps.EMISSION: # It is assumed that the first code is used while processing topdown ac = emis.ACTCODE[0] if ac[-1] == ".": ac=ac[:-1] # if ac[:len(code)] == code: if ac == code: codeMatch = True break if not codeMatch: for emis in ps.SUBGRP: # It is assumed that the first code is used while processing topdown ac = emis.ACTCODE[0] if ac[:len(code)] == code: codeMatch = True break if codeMatch: psIndices.append(i) dd["psIndices"] = psIndices keyName = row[tdTable.colIndex["Key"]] #If no distribution key specified and no ps in bottom-up edb - cont. if keyName is None and psIndices == []: log.debug("No key and no point sources found for code: %s, skipping..." % code) resTable.addRow(resrow) continue if psIndices!=[]: msg = "--Found %i pointsources" % len(psIndices) log.info(msg) if keyName is not None: if keyName not in keys: log.error("No such key: " + keyName) sys.exit() msg = "--Key: %s" % keyName log.info(msg) keyGrid = Egrid(tdEdb, keyName) keyGrid.readData() log.debug("Read key: " + keyName + " from topdownEdb") # create emission grid to store distributed emissions eGrid = deepcopy(eGridTemplate) eGrid.name = code.replace(".", "_") eGrid.par["NAME"].val = code eGrid.par["INFO2"].val = "Distribution key: " + keyGrid.par["NAME"].val eGrid.par["ACTIVITYCODE"].val = [airviroCode.split(".")] regstatName = row[tdTable.colIndex["Regstat"]] regdefName = row[tdTable.colIndex["Regdef"]] if regstatName is not None: if regdefName is None: log.error("No region definition given for regional statistics: " + regstatName) sys.exit(1) regstatPath = path.join(dmn.domainPath(), "topdown", "regstat", regstatName) regstat = DataTable() log.info("regstatPath: "+regstatPath) regstat.read(regstatPath, units=True, defaultType=float, delimiter=";") if not "Geocode" in regstat.listIds(): log.error("No Geocode column found in regstat") sys.exit(1) regstat.convertCol("Geocode", int) regstat.keys.append("Geocode") # Making Geocode the primary key # create list of unique geo codes geocodes = [row[regstat.colIndex["Geocode"]] for row in regstat.data] geocodes = unique(geocodes) for colId in regstat.listIds(): if colId.lower() == "year": rows = [] regstat.convertCol(colId, int) # Make it possible to accumulate year regstat.setKeys(regstat.keys + [colId]) # Calculates the total emission for each geocode # in case there are multiple rows for different fuels etc colsToSum = regstat.listIds() colsToSum.remove(colId) colsToSum.remove("Geocode") for gc in geocodes: # sums all numeric values in colsToSum for # rows matching row id [gc,year] #returns an accumulated row and appends it to rows rowId = regstat.dict2RowId({"Geocode": gc, colId: year}) rows.append(regstat.accumulate(rowId, "sum", colsToSum)) regstat.data = rows # replace original rows with accumulated rows regstat.keys.remove(colId) break # dd["regstat"] = regstat regdef = Raster() regdefPath = path.join(dmn.domainPath(), "topdown", "regdef", regdefName) regdef.read(regdefPath) dd["regstat"] = regstat dd["regdef"] = regdef else: dd["regstat"] = None dd["regdef"] = None if dd["regstat"] is not None and len(bu_sources) > 0 and statType == "fixed": log.info("--Regionalizing pointsources") dd = regionalizePS(dd, code) if keyName is not None and nonZero: regionalizedDefault = False # Spatial distribution of emissions for subst in substances: sInd = subdb.substIndices[subst] toUnit = dd["units"][subst] + "/year" ntot = nrow[natTable.colIndex[subst]] pstot = 0 for i in dd["psIndices"]: source = dd["bu_sources"][i] # TODO: should give reference to subgrps to include emis from them pstot += source.get_emis( sInd, toUnit, eEdb, actcodes=[code] ) if ntot is None or ntot == 0: if pstot > 0: # 9999 is used as marker for no national total resrow[resTable.colIndex[subst]] = 9999.0 log.warning( "Nattot is 0 but ps tot is: %f %s" % (pstot, toUnit)) continue nrest = ntot - pstot resrow[resTable.colIndex[subst]] = 100.0 if abs(nrest / ntot) < 0.0001: nrest = 0 log.info( "--Rest is < 0.01 % of national total, rounded to zero" ) continue elif nrest < 0: log.warning( "--National rest is below zero, %4.2f proc for %s" % ( -1 * nrest / ntot * 100, subst) ) dd["trace"].write() # continue log.info( "---Substance: "+subst+ ", rest is: " + str(nrest) + toUnit + " = " + str(nrest / ntot * 100.0) + "%" ) try: keyRast = keyGrid.substances[sInd] except KeyError: keyRast = keyGrid.substances[subdb.substIndices["all"]] dd["key"] = keyRast if dd["regstat"] is not None: if (subst not in regstat.colIndex and sInd not in keyGrid.substances and not regionalizedDefault): dd = regionalizeKey(dd, subst, code) regionalizedDefault = True else: dd = regionalizeKey(dd, subst, code) emisRast = distribute(dd["key"], nrest) emisRast = emisRast * unitConvFac(toUnit, "ton/year") eGrid.addData(emisRast, dd["subdb"].substIndices[subst]) distributed = True else: # resTable is filled # In case all national totals are zero but there are ps for subst in substances: sInd = dd["subdb"].substIndices[subst] toUnit = dd["units"][subst] + "/year" ntot = nrow[natTable.colIndex[subst]] pstot = 0 for i in dd["psIndices"]: source = dd["bu_sources"][i] # subgrps are not used! pstot += source.get_emis(sInd, toUnit, buEdb, actcodes=[code]) if ntot!=0 and ntot is not None: resrow[resTable.colIndex[subst]] = pstot / ntot * 100.0 else: resrow[resTable.colIndex[subst]] = -999.0 if len(dd["psIndices"]) > 0: tmp_sources = (bu_sources[i] for i in dd["psIndices"]) with SourceStream(eEdb, mode='wb') as out_source_stream: source_writer = ModelWriter(out_source_stream) for source in tmp_sources: source_writer.write(source) log.debug("Wrote ps to emission edb") if distributed: eGrid.load() log.debug("Wrote emission grid to emission edb") dd["trace"].write() resTable.addRow(resrow) resTableFile = open(resTablePath,"w") resTable.write(resTableFile) log.info("Finished topdown process")
def main(): #-----------Setting up and unsing option parser----------------------- parser=OptionParser(usage= usage, version=version) parser.add_option("-l", "--loglevel", action="store",dest="loglevel",default=2, help="Sets the loglevel (0-3 where 3=full logging)") parser.add_option("-t", "--template", action="store",dest="cf",default=None, help="Generate default controlfile") parser.add_option("-f", "--force", action="store_true",dest="force",default=False, help="To start the process without confirming the domain") parser.add_option("-n", "--nproc", action="store",dest="nproc",default=1, help="Number of gifmap processes to run at a time") (options, args) = parser.parse_args() #------------Setting up logging capabilities ----------- rootLogger=logger.RootLogger(int(options.loglevel)) log=rootLogger.getLogger(sys.argv[0]) #logger=basicLogger(int(options.loglevel),sys.argv[0]) if options.cf!=None: controlfile.generateCf(path.abspath(options.cf),controlFileTemplate) print("Wrote default controlfile") sys.exit() if len(args)!=1: parser.error("Incorrect number of arguments") domainName=os.environ["AVDBNAME"] dmn = domain.Domain(domainName) if not options.force: answer=raw_input("Chosen dbase is: "+domainName+",continue(y/n)?") if answer=="y": dmn=domain.Domain() else: sys.exit("Interrupted by user") #Opening controlfile #---retrieving data from control file---- cf=controlfile.ControlFile(fileName=path.abspath(args[0])) #Get search parameters from control file substances=cf.findStringList("substances:") outputDir=cf.findExistingPath("outputDir:") baseSearches=cf.findStringList("searches:") fromProj=cf.findString("fromProj:") toProj=cf.findString("toProj:") resample = cf.findBoolean("resample:") dmn=domain.Domain(os.environ.get('AVDBNAME')) substDict=dmn.listSubstanceIndices() if resample: #Get proj4 projection definitions from aliases try: fromProj=transcoord.proj4Dict[fromProj] except KeyError: print("Projection %s not found in proj4Dictin transCoord.py" %fromProj) try: toProj=transcoord.proj4Dict[toProj] except KeyError: print("Projection %s not found in proj4Dictin transCoord.py" %toProj) #Get output grind parameters from controlfile out_xll=cf.findFloat("out_xll:") out_yll=cf.findFloat("out_yll:") out_ncols=cf.findInt("out_ncols:") out_nrows=cf.findInt("out_nrows:") out_cellsize=cf.findFloat("out_cellsize:") #Build list with search definitions searches=[] for search_id in baseSearches: prefix=cf.findString("search."+search_id+".prefix:") macroPath=cf.findString("search."+search_id+".macro:") starttime=cf.findString("search."+search_id+".starttime:") endtime=cf.findString("search."+search_id+".endtime:") parVals = cf.findParam("search."+search_id+".par.",findAll=True) parDict={} for p in parVals: ind=p.index(":") par = p[:ind+1] val=p[ind+1:].strip() parDict[par]=val alobVals = cf.findParam("search."+search_id+".alob.",findAll=True) alobDict={} for a in alobVals: ind=a.index(":") key = a[:ind] val=a[ind+1:].strip() alobDict[par]=val macro = controlfile.ControlFile(fileName=macroPath,removeComments=False) unitIndex=macro.findInt("edb.unit:") edbName=macro.findString("edb.edb:") userName=macro.findString("edb.user:"******"FROM :",starttime) macro.setParam("TO :",endtime) for key,val in parDict.iteritems(): macro.setParam(key,val) for key,val in alobDict.iteritems(): macro.setParam(key,"R"+val,addSpace=False) edb=Edb(dmn.name,userName,edbName) rsrc=Rsrc(edb.rsrcPath()) unitName=rsrc.search[unitIndex] for substance in substances: filename=path.join(outputDir,prefix+"_"+substance.replace(" ","_")+".asc") searches.append( {'id':search_id, 'macro':macro, 'prefix':prefix, 'substance':substance, 'unit':unitName, 'filename':filename } ) if resample: summaryTable=datatable.DataTable(desc=[{"id":'search_id',"type":unicode}, {"id":'substance',"type":unicode}, {"id":'macro',"type":unicode}, {"id":'filename',"type":unicode}, {"id":'sum',"type":float}, {"id":'proj_sum',"type":float}, {"id":'unit',"type":unicode}], keys=["search_id","substance"]) else: summaryTable=datatable.DataTable(desc=[{"id":'search_id',"type":unicode}, {"id":'substance',"type":unicode}, {"id":'macro',"type":unicode}, {"id":'filename',"type":unicode}, {"id":'sum',"type":float}, {"id":'unit',"type":unicode}], keys=["search_id","substance"]) #Inititalising parallell run nproc=int(options.nproc) #numer of processes to run in parallell running_proc=0 #counter for number of running processes pids={} #dictionary to store process output and info todo=len(searches) #list of processes to be run searchInd=0 #index of the process def countRunning(pids): """Count the number of running processes""" for pid, props in pids.iteritems(): props['ret_val']=props['proc'].poll() return len([pid for pid in pids if pids[pid]['ret_val'] is None]) while todo+running_proc>0: #loop until all processes have finished if running_proc==nproc or todo==0: print("Running gifmap...") #When nproc processes are started, the program waits here #until one is finished before adding a new one while countRunning(pids)==running_proc: time.sleep(5) #mark first found finished process as done for pid,props in pids.iteritems(): if not props['ret_val'] is None and not props['done']: props['done']=True break command=props["cmd"] if props['ret_val']!=0: errMsg=pids[pid]["proc"].stderr.read() print("Error while running command: %s\n%s" %(pids[pid],errMsg)) sys.exit(1) print("Finished search %s" %props['search_id']) #Find search in search list running_proc-=1 for f_search in searches: if f_search["id"]==props['search_id'] and f_search['substance']==props['substance']: break f_search['done']=True try: f_search['res']=res2rast(f_search['filename']) #Store original result from search f_search['sum']=f_search['res'].sum() except: f_search['res']=None output=pids[pid]["proc"].stdout.read() f_search['sum']=re.compile("#EMIS (\S*).*").search(output).group(1) print "Could not extract result raster from %s" %f_search['filename'] print "Uses total from gifmap log" if resample: outRast = raster.Raster(Xll=out_xll,Yll=out_yll,Ncols=out_ncols, Nrows=out_nrows,Cellsize=out_cellsize, Nodata=-9999) #Run post-processing of result for finsished processes if f_search['sum']>0: print("Projecting result to target CRS") f_search['res'] = transcoord.transformEmisRaster(f_search['res'],outRast, fromProj,toProj, tmpDir=dmn.tmpDir()) #Store projected result from search f_search['proj_sum']=f_search['res'].sum() else: f_search['proj_sum']=0 #Write result to file if f_search['res'] is not None: f_search['res'].write(f_search['filename']) #Add total emission to summary table if resample: summaryTable.addRow( [f_search['id'], f_search['substance'], f_search['macro'].name, f_search['filename'], f_search['sum'], f_search['proj_sum'], f_search['unit']]) else: summaryTable.addRow( [f_search['id'], f_search['substance'], f_search['macro'].name, f_search['filename'], f_search['sum'], f_search['unit']]) #Add another search process elif todo>0: c_search=searches[searchInd] substance=c_search["substance"] substanceIndex=substDict[substance] c_search['macro'].setParam("ELEMENT :",substanceIndex) c_search['macro'].write() command="gifmap -T -i "+c_search['macro'].name+" -o "+c_search['filename'] p=subprocess.Popen(command,stderr=subprocess.PIPE,stdout=subprocess.PIPE,shell=True) #Add info and handles to process dictionary pids[p.pid]={"proc":p, "search_id":c_search['id'], "substance":c_search['substance'], "cmd":command, 'ret_val':None, 'done':False} print( "Started search %s, substance %s" %( c_search['id'],c_search['substance'])) running_proc+=1 todo-=1 searchInd+=1 #Each process needs some time to read #the macro before it is changed time.sleep(10) #summaryTable.sortRows() tableFile=open(path.join(outputDir,"summaryTable.txt"),'w') summaryTable.write(tableFile) print("Finished")
def main(): # setting up parser parser = OptionParser(usage=usage, version=version) (options, args) = parser.parse_args() # Setting up logging environment logLevel = os.environ.get("LOG_LEVEL") if logLevel == None or logLevel == "": logLevel = 2 logLevels = {0: logging.NOTSET, 1: logging.WARNING, 2: logging.INFO, 3: logging.DEBUG} rootLogger = logging.getLogger("") logLevelObj = logLevels[int(logLevel)] # info is written to stderr # errors are written stdout rootLogger.setLevel(logLevelObj) infoHandler = logging.StreamHandler(sys.stderr) infoHandler.setLevel(logLevel) errHandler = logging.StreamHandler(sys.stdout) errHandler.setLevel(logging.ERROR) formatter = logging.Formatter("%(name)-12s: %(levelname)-8s %(message)s") infoHandler.setFormatter(formatter) errHandler.setFormatter(formatter) rootLogger.addHandler(infoHandler) rootLogger.addHandler(errHandler) logger = logging.getLogger("trendReport") if len(args) != 3: parser.error("Incorrect number of arguments") metadatafile = path.abspath(args[0]) # file with meta data for trend report infile = path.abspath(args[1]) # file with emission data per code outfile = path.abspath(args[2]) # path for output excel report if not path.exists(metadatafile): logger.error("Input asciiReport does not exist") sys.exit(-1) if not path.exists(infile): logger.error("Input asciiReport does not exist") sys.exit(-1) # reading emission data file f = open(infile, "r") dlines = f.readlines() f.close() # reading meta-data file f = codecs.open(metadatafile, "r", "HP Roman8") mdlines = f.readlines() f.close() # parsing meta-data file name = mdlines.pop(0)[5:].strip() nrmacros = int(mdlines.pop(0).split()[1]) nrsubstances = int(mdlines.pop(0).split()[1]) units = mdlines.pop(0)[5:].strip() macros = [] for mIndex in range(nrmacros): edbuser = mdlines.pop(0).split()[1] edbname = mdlines.pop(0).split()[1] label = mdlines.pop(0).split()[1] macros.append({"edbuser": edbuser, "edbname": edbname, "label": label}) # finding path to default edb.rsrc file in domain domainPath = os.environ["DBAS_PATH"] domainName = os.environ["AVDBNAME"] rsrcPath = path.join(domainPath, "dba/edb/edb.rsrc") # Creating codeTable object (for look-ups of activitycode names and geocodenames) try: cTable = codetable.CodeTable(rsrcPath) except: logger.error("Could not build code tree from .../dba/edb/edb.rsrc") sys.exit(-1) # parsing emission data file macroIndex = 0 emissions = [] substances = [] for line in dlines: data = line.split() if data[0] == """#MACRO""": # if header, update substance and macro index vals = line.split('"') macroIndex = int(data[1]) subst = vals[1] if subst not in substances: substances.append(subst) else: # else get codes and corresponding name ac = data[1] ac = ac.split(".") if ac[0] == "<all>": # add "all" to get the right depth in the dictionary ac = ["all", "all"] elif len(ac) == 1: ac.append("all") gc = data[3] gc = gc.split(".") if gc[0] == "<all>": gc = ["all", "all"] elif len(gc) == 1: gc.append("all") gcLev1Name = cTable.gc[gc[0]]["name"] if gc[1] != "all": gcLev2Name = cTable.gc[gc[0]][gc[1]]["name"] else: gcLev2Name = "Alla" acLev1Name = cTable.ac[ac[0]]["name"] if ac[1] != "all": acLev2Name = cTable.ac[ac[0]][ac[1]]["name"] else: acLev2Name = "Alla" val = data[4] emissions.append( { "label": label, "substance": subst, "gcLev1": gcLev1Name, "gcLev2": gcLev2Name, "acLev1": acLev1Name, "acLev2": acLev2Name, "val": val, "macroIndex": macroIndex, } ) # Create style objects for excel output header1Style = xlwt.easyxf("font: name Times New Roman,color-index black, bold on", num_format_str="0.000E+00") normalStyle = xlwt.easyxf("font: name Times New Roman,color-index black, bold off", num_format_str="0.000E+00") excelBook = xlwt.Workbook() # Creating info sheet infoWs = excelBook.add_sheet("Info") infoWs.col(0).width = 256 * 20 infoWs.col(1).width = 256 * 25 infoWs.col(2).width = 256 * 20 infoWs.col(3).width = 256 * 200 infoWs.write(0, 0, u"Rapportnamn:", header1Style) infoWs.write(0, 1, name, header1Style) infoWs.write(1, 0, u"Beskrivning av dataunderlaget", header1Style) infoWs.write(3, 0, u"Makron (specificerar utsökningar ur databasen)", header1Style) infoWs.write(4, 0, u"Etikett", header1Style) infoWs.write(4, 1, u"Ägare till EDB", header1Style) infoWs.write(4, 2, u"EDB (emissiondatabas)", header1Style) infoWs.write(4, 3, u"Beskrivning", header1Style) for m in range(nrmacros): infoWs.write(5 + m, 0, macros[m]["label"]) infoWs.write(5 + m, 1, macros[m]["edbuser"]) infoWs.write(5 + m, 2, macros[m]["edbname"]) # reading edb description file (if it exists) edb = Edb(domainName, macros[m]["edbuser"], macros[m]["edbname"]) infoWs.write(5 + m, 3, edb.desc().replace("\n", " ")) # split substances in green house gases and air quality related ghgList = [s for s in substances if s in ghgs] aqList = [s for s in substances if s not in ghgs] # Write air quality headers firstRow = 3 if len(aqList) > 0: aqWs = excelBook.add_sheet(u"Luftföroreningar") aqWs.col(0).width = 256 * 25 aqWs.col(1).width = 256 * 30 aqWs.col(2).width = 256 * 20 aqWs.col(3).width = 256 * 15 for col in range(nrsubstances * nrmacros): aqWs.col(col + 4).width = 256 * 15 aqWs.write(0, 0, u"Rapportnamn:", header1Style) aqWs.write(0, 1, name, header1Style) aqWs.write(1, 0, u"Emissioner av luftföroreningar", header1Style) aqWs.write(1, 1, u"Enhet: " + units, header1Style) aqWs.write(firstRow, 0, "Huvudsektor", header1Style) aqWs.write(firstRow, 1, "Undersektor", header1Style) aqWs.write(firstRow, 2, u"Län", header1Style) aqWs.write(firstRow, 3, "Kommun", header1Style) # Write ghg headers if len(ghgList) > 0: ghgWs = excelBook.add_sheet(u"Växthusgaser") ghgWs.col(0).width = 256 * 25 ghgWs.col(1).width = 256 * 30 ghgWs.col(2).width = 256 * 20 ghgWs.col(3).width = 256 * 15 for col in range(nrsubstances * nrmacros): ghgWs.col(col + 4).width = 256 * 15 ghgWs.write(0, 0, u"Rapportnamn:", header1Style) ghgWs.write(0, 1, name, header1Style) ghgWs.write(1, 0, u"Emissioner av Växthusgaser", header1Style) ghgWs.write(2, 0, "Uttryckt i CO2-ekvivalenter", header1Style) ghgWs.write(1, 1, u"Enhet: " + units, header1Style) ghgWs.write(firstRow, 0, "Huvudsektor", header1Style) ghgWs.write(firstRow, 1, "Undersektor", header1Style) ghgWs.write(firstRow, 2, u"Län", header1Style) ghgWs.write(firstRow, 3, "Kommun", header1Style) def getColInd(nmacros, substances, macroInd, subst): # gets the column index in excel file sInd = substances.index(subst) return 4 + macroInd + sInd * nmacros # write macro labels and substance headers for air quality sheet for sInd, subst in enumerate(aqList): for mInd, macro in enumerate(macros): col = getColInd(nrmacros, aqList, mInd, subst) aqWs.write(firstRow - 1, col, macro["label"], header1Style) aqWs.write(firstRow, col, subst, header1Style) # write macro labels and substance headers for ghg sheet for sInd, subst in enumerate(ghgList): for mInd, macro in enumerate(macros): col = getColInd(nrmacros, ghgList, mInd, subst) ghgWs.write(firstRow - 1, col, macro["label"], header1Style) ghgWs.write(firstRow, col, subst, header1Style) # looping over all emissions, writing them to the correct column and row ghgRow = [] aqRow = [] for m in range(nrmacros * nrsubstances + 4): ghgRow.append(firstRow + 1) for m in range(nrmacros * nrsubstances + 4): aqRow.append(firstRow + 1) for emis in emissions: subst = emis["substance"] emisVal = emis["val"] macroInd = emis["macroIndex"] if subst in ghgList: col = getColInd(nrmacros, ghgList, macroInd, subst) row = ghgRow[col] if ghgRow[0] <= +row: ghgWs.write(row, 0, emis["acLev1"], normalStyle) ghgWs.write(row, 1, emis["acLev2"], normalStyle) ghgWs.write(row, 2, emis["gcLev1"], normalStyle) ghgWs.write(row, 3, emis["gcLev2"], normalStyle) ghgRow[0] += 1 # converts the emission to CO2-ekquivalents ghgWs.write(row, col, float(emisVal) * float(ekvFactors[subst]), normalStyle) ghgRow[col] += 1 else: col = getColInd(nrmacros, aqList, macroInd, subst) row = aqRow[col] if aqRow[0] <= +row: aqWs.write(row, 0, emis["acLev1"], normalStyle) aqWs.write(row, 1, emis["acLev2"], normalStyle) aqWs.write(row, 2, emis["gcLev1"], normalStyle) aqWs.write(row, 3, emis["gcLev2"], normalStyle) aqRow[0] += 1 aqWs.write(row, col, float(emisVal), normalStyle) aqRow[col] += 1 excelBook.save(outfile)
def main(): #-----------Setting up and using option parser----------------------- parser=OptionParser(usage= usage, version=version) parser.add_option( '-v', action='store_const', dest='loglevel', const=logging.DEBUG, default=get_loglevel(), help='produce verbose output' ) # parser.add_option("-l", "--loglevel", # action="store",dest="loglevel",default=2, # help="Sets the loglevel (0-3 where 3=full logging)") parser.add_option("-t", "--template", action="store",dest="cf",default=None, help="Generate default controlfile") (options, args) = parser.parse_args() #------------Setting up logging capabilities ----------- logging.basicConfig( format='%(levelname)s:%(name)s: %(message)s', level=options.loglevel, ) global log log = logging.getLogger(parser.prog) if options.cf!=None: generateCf(path.abspath(options.cf)) log.info("Wrote default controlfile") sys.exit() if len(args)!=1: parser.error("Incorrect number of arguments") #Opening controlfile cf=ControlFile(fileName=path.abspath(args[0])) domain = Domain() edbName=cf.findString("edb:") userName=cf.findString("user:"******"Edb "+edbName+" does not exist for user "+userName+" in domain "+domain.name) sys.exit() year=cf.findInt("year:") substList=cf.findStringList("substances:") #Remove doubles from substance list substances={'nat':[]} for subst in substList: if subst not in substances['nat']: substances['nat'].append(subst) #Make dictionary for substance,units couples unitDict={} defaultUnit=cf.findString("nationalTotals.defaultUnit:") unitList=cf.findStringList("nationalTotals.units:") for i in range(len(unitList)): unitSubstances=cf.findStringList("nationalTotals.unit."+str(i)+":") for s in unitSubstances: unitDict[s]=unitList[i] for subst in substances['nat']: if subst not in unitDict.keys(): unitDict[subst]=defaultUnit nationalTotalsTablePath=cf.findString("nationalTotalsTable:") nationalTotalsFile=codecs.open(nationalTotalsTablePath,'w',encoding="latin6") nationalTotalsTreePath=cf.findString("nationalTotalsTree:") nationalTotalsTreePath=path.abspath(nationalTotalsTreePath) nationalTotalTreeFile=open(nationalTotalsTreePath,'w') log.info("Reading emissions from CRF") CRFTree=CodeEmisTree(name="CRF") CRFPath=cf.findExistingPath("CRF.path:") CRFTree.parseCRFXML(CRFPath,year) CRFTree.calculateSums(byAttribute="Category") #CRFTree.write(sys.stdout) CRFTable=CRFTree.createTable(keys=["Category","Classification","AWMS"]) HFC={ "HFC-23":11700, "HFC-32":650, "HFC-41":150, "HFC-43-10 mee":1300, "HFC-125":2800, "HFC-134":1000, "HFC-134a":1300, "HFC-152a":140, "HFC-143":300, "HFC-143a":3800, "HFC-227ea":2900, "HFC-236fa":6300, "HFC-245ca":560 } PFC={ "CF4":6500, "C2F6":9200, "C3F8":7000, "C4F10":7000, "c-C4F8":8700, "C5F12":7500, "C6F14":7400 } CRFTable.aggregateCols({"id":"HFC-tot","type":"str","units":"Gg"},HFC) CRFTable.aggregateCols({"id":"PFC-tot","type":"str","units":"Gg"},PFC) CRFTable.sortRows() CRFTableKeys=cf.findStringList("CRF.tableKeys:") CRFTable.keys=CRFTableKeys #CRFTable.write(sys.stdout) log.info("Reading emissions from NFR") NFRTree=CodeEmisTree(name="NFR") NFRPath=cf.findExistingPath("NFR.path:") # pdb.set_trace() NFRTree.parseNFRTable(filename=NFRPath) NFRTree.calculateSums() NFRTable=NFRTree.createTable() NFRTableKeys=cf.findStringList("NFR.tableKeys:") NFRTable.keys=NFRTableKeys colIndex=NFRTable.getColIndex("Code") for row in NFRTable.data: if isinstance(row[colIndex],str): row[colIndex]=row[colIndex].replace(" ","") log.info("Reading emissions from Mobile") MobilePath=cf.findExistingPath("Mobile.path:") MobileTable=DataTable() MobileTable.read(MobilePath,units=True) MobileTable=MobileTable.filtered({"Year":str(year)}) MobileTableKeys=cf.findStringList("Mobile.tableKeys:") MobileTable.keys=MobileTableKeys #Remove biogenic emissions of CO2 #Remove GHG from Bunkers, except for international aviation LTO for row in MobileTable.data: # if "Bio" in row[MobileTable.getColIndex("Codename")]: if "Bio" in row[MobileTable.getColIndex("Fueltype")]: row[MobileTable.getColIndex("CO2")]=None # if row[MobileTable.getColIndex("Localcodename")]=="Bunkers" and row[MobileTable.getColIndex("Localsubcodename")]!="LTO": if row[MobileTable.getColIndex("Subsector")]=="Bunkers" and row[MobileTable.getColIndex("Region")]!="LTO": row[MobileTable.getColIndex("CO2")]=None row[MobileTable.getColIndex("CH4")]=None row[MobileTable.getColIndex("N2O")]=None log.info("Reading emissions from Manual") ManualPath=cf.findExistingPath("Manual.path:") ManualTable=DataTable() ManualTable.read(ManualPath,units=True) ManualTableKeys=cf.findStringList("Manual.tableKeys:") ManualTable.keys=ManualTableKeys srcTables={'NFR':NFRTable, 'CRF':CRFTable, 'Mobile':MobileTable, 'Manual':ManualTable} for tKey in srcTables.keys(): substList=cf.findStringList(tKey+".substances:") substances[tKey]=substList log.info("Reading dataSourceTable") dsPath=cf.findExistingPath("sourceTable:") dsTable=DataTable() dsTable.read(dsPath) acIndex=cf.findInt("nationalTotals.acIndex:") log.info("Reading activity codes from edb") natTree=CodeEmisTree("National totals","Gg") natTree.readActivityCodes(path.join(edb.edbPath(),"edb.rsrc"),acIndex) log.info("Identifying column indices for sources in dataSourceTable") #codeHeaders={"Mobile":"Mobile CRFnodeprefix", # "NFR":"NFR Code", # "CRF":"CRF Code", # "nat":"Code", # "Manual":"Code" # } codeHeaders={"Mobile":"Mobile UNFCCC", "NFR":"NFR Code", "CRF":"CRF Code", "nat":"Code", "Manual":"Code" } codeInd={} for key,colName in codeHeaders.iteritems(): try: codeInd[key]=dsTable.getColIndex(colName) except DataTableException: log.error("Could not find '"+colName+"' in dataSourceTable") sys.exit() log.info("Collecting data from data sources") prevNatCode="" for row in dsTable.data: rowId={} srcRow={} for key in srcTables: rowId[key]=[] srcRow[key]=srcTables[key].ncols*[0] natCode=row[codeInd['nat']] if natCode!=prevNatCode: log.debug("\nProcessing: "+natCode) prevNatCode=natCode #Get row-id for each src table for tKey in srcTables.keys(): colInd=codeInd[tKey] if row[colInd]!=None: for key in srcTables[tKey].keys: try: idPart=row[dsTable.getColIndex(tKey+" "+key)] except ValueError: log.error("No column named '"+tKey+" "+key+"' found in dataSourceTable") sys.exit() rowId[tKey].append(idPart) #If not all key values = None nonNones=[val for val in rowId[tKey] if val !=None] if len(nonNones)==0: rowId[tKey]=None srcRow[tKey]=None else: # if tKey=="CRF": # pdb.set_trace() try: srcRow[tKey]=srcTables[tKey].accumulate(rowId[tKey]) except DataTableException, err: # import pdb; pdb.set_trace() log.error("While trying to get data from "+tKey+" table: "+str(err)) sys.exit() #Creating code tree path for the current code codeParts=natCode.split(".") natPath="" for i in range(1,len(codeParts)+1): natPath+=".".join(codeParts[:i]) if i < len(codeParts): natPath+="/" natNode=natTree.root.find(natPath) if natNode is None: # import pdb; pdb.set_trace() log.error("Could not find national code path : "+natCode) sys.exit() for subst in substances['nat']: log.debug("Substance: "+subst) storedSubstances=[] foundSubstances=[] rec=None #Looping over all sourceTables for tKey in srcTables: st=srcTables[tKey] if rowId[tKey]!=None and subst in substances[tKey]: try: colIndex=st.getColIndex(subst) except DataTableException: log.error("Could not find column %s in table %s" %(subst,tKey)) emis=srcRow[tKey][colIndex] unit=st.desc[colIndex].get('units',None) storedSubstances.append(subst) nodeExists=False if emis!=0 and emis!=None: if subst not in foundSubstances: foundSubstances.append(subst) else: log.error("Substance found in multiple datasources for "+natCode) sys.exit() for node in natNode: if node.tag=="Record" and node.attrib.get("substance",None)==subst: rec=node nodeExists=True break if not nodeExists: rec=ET.SubElement(natNode,"Record") break if unit==None: log.error("No units set for substance "+subst+" in source table "+tKey) sys.exit() if storedSubstances==[]: log.error("No data source specified for substance "+subst) sys.exit() if rec!=None: if emis!=None and emis!=0: try: emis=convertUnits(fromUnit=unit,toUnit=unitDict[subst],val=emis) except TypeError: emis=0 if not nodeExists: rec.attrib["substance"]=subst else: emis+=float(rec.attrib["emission"]) rec.attrib["emission"]=str(emis)