def main(): domain = Domain() # Parse command line arguments parser = argparse.ArgumentParser(description=__doc__) utils.add_standard_command_options(parser) parser.add_argument( "-t", "--templatedir", action="store", dest="templatedir", help="Generate grid .asc template in specified dir" ) parser.add_argument( "-i", "--inputdir", action="store", dest="inputdir", help="Directory containing input files" ) parser.add_argument( "-u", "--user", action="store", dest="user", help="User name (to fetch substance group info)" ) parser.add_argument( "-e", "--edb", action="store", dest="edb", help="EDB name (to fetch substance group info)" ) parser.add_argument( "-d", "--dynamicdir", action="store", dest="dynamicdir", help="Directory with grids for dynamic parameters" ) parser.add_argument( "--intensity", action="store_true", dest="intensity", help="If input rasters are given as ton/(year*km2)" ) args = parser.parse_args() if args.templatedir is not None: generateTemplate( path.join(args.templatedir, "grid_template.asc") ) log.info("Wrote default grid template") sys.exit() substances = domain.listSubstanceIndices() log.debug( "Using substance list of current domain: " + domain.name ) if args.user is not None and args.edb is not None: edb = Edb(domain, args.user, args.edb) with SubgrpStream(edb, mode='r') as subgrpstream: subgrp_reader = ModelReader(subgrpstream) subgrps = list(subgrp_reader) else: subgrps = None dirs = glob.glob(args.inputdir) msg = "directories:" + str([path.basename(d) for d in dirs]) log.info(msg) dyndir = args.dynamicdir if dyndir is not None: dyndir = path.abspath(dyndir) dynamic_rasters = glob.glob(path.join(dyndir, "*.txt")) for d in dirs: log.debug("Processing: " + d) raster_paths = glob.glob(path.join(d, "*.txt")) if len(raster_paths) == 0: log.error("No rasters in directory: " + d) sys.exit() log.debug( "Rasters in directory: " + str([path.basename(r) for r in raster_paths]) ) substance_rasters = [] subgrp_rasters = [] dynamic_rasters = [] for rp in raster_paths: gridname = path.basename(rp) nameparts = gridname.split("__") log.debug("Raster prefix: " + nameparts[0]) if len(nameparts) < 3: # unvalid raster name msg = ("Not able to extract prefix (substance, " + "substance group or dynamic) from raster " + "filename %s" % path.basename(rp)) log.error(msg) sys.exit() if nameparts[0] == "subgrp": subgrp_rasters.append(rp) elif nameparts[0] == "substance": substance_name = nameparts[1] try: substance_index = substances[substance_name] except KeyError: try: substance_index = int(substance_name) except: log.error( "Substance: " + substance_name + " not found in subdb of current domain: " + domain.name ) sys.exit(1) substance_rasters.append(rp) elif nameparts[0] == "dynamic": try: dyn_name = gridname.split("__")[1] dyn_level = int(path.basename(rp).split("__")[2]) except: log.error( "Could not extract name of dynamic " + "parameter and level for raster: " + gridname ) sys.exit(1) dynamic_rasters.append(rp) else: log.error( "Prefix of raster: " + nameparts[0] + " is unvalid" ) sys.exit(1) if args.dynamicdir is not None: for rp in dynamic_rasters: gridname = path.splitext(path.basename(rp))[0] nameparts = gridname.split("__") if nameparts[0] == "dynamic": try: dyn_name = gridname.split("__")[1] dyn_level = int(gridname.split("__")[2]) except: log.error( "Could not extract name of dynamic " + "parameter and level for raster: " + gridname ) sys.exit(1) dynamic_rasters.append(rp) if len(subgrp_rasters) > 1: log.error("There can be only one subgrp raster per grid source") sys.exit(1) if len(subgrp_rasters) > 0 and len(substance_rasters) > 0: log.error( "Both subgrp rasters and substance rasters " + "in the same grid/directory is not allowed" ) sys.exit(1) asc_path = path.join(d, "grid_template.asc") if not path.exists(asc_path): msg = ( "Could not find " "%s, using default template .asc file" % asc_path ) log.warning(msg) generateTemplate(asc_path) grid = EmissionGrid() grid.read_meta_from_file(asc_path) rast = raster.Raster() if len(substance_rasters) > 0: log.debug("Reading substance raster: " + substance_rasters[0]) try: rast.read(substance_rasters[0]) except IOError as e: log.error(e) sys.exit(1) rast.nodataToZero() elif len(subgrp_rasters) > 0: log.debug("Reading subgrp raster: " + subgrp_rasters[0]) try: rast.read(subgrp_rasters[0]) except IOError as e: log.error(e) sys.exit(1) rast.nodataToZero() else: log.error( "Not possible to create grid without any substance " + "rasters or subgrp raster" ) sys.exit(1) grid.X = int(rast.xll) grid.Y = int(rast.yll) grid.DX = int(rast.cellsize) grid.DY = int(rast.cellsize) grid.NX = rast.ncols grid.NY = rast.nrows if len(subgrp_rasters) > 0: subgrp_name = path.basename(subgrp_rasters[0]).split("__")[1] if subgrps is None: subgrp_index = 1 else: subgrp_index = next( (s.INDEX for s in subgrps if s.NAME == subgrp_name), None ) if subgrp_index is None: log.warning( "Could not find subgrp named " + "%s in edb, using default index 1" % subgrp_name ) subgrp_index = 1 grid.FUEL = subgrp_index log.debug("Adding subgrp raster to grid") rast.read(subgrp_rasters[0]) rast.nodataToZero() if args.intensity: # Convert from ton/(year*km2) to ton/year rast.data *= rast.cellsize * rast.cellsize / 1.0e6 grid.add_field_from_raster(rast, subgrp_index, subgrp=True) else: for rp in substance_rasters: rast.read(rp) rast.nodataToZero() if args.intensity: # Convert from ton/(year*km2) to ton/year rast.data *= rast.cellsize * rast.cellsize / 1.0e6 subst = path.basename(rp).split("__")[1] try: substance_index = substances[subst] except KeyError: substance_index = int(subst) log.debug( "Adding substance " + subst + " to grid") grid.add_field_from_raster(rast, substance_index) dynamic_raster_dict = {"GEOCODE": [], "ACTIVITYCODE": []} for dr in dynamic_rasters: nameparts = path.basename(dr).split("__") if nameparts[1] == "GEOCODE": dynamic_raster_dict["GEOCODE"].append(dr) elif nameparts[1] == "ACTIVITYCODE": dynamic_raster_dict["ACTIVITYCODE"].append(dr) else: dynamic_raster_dict[nameparts[1]] = [dr] # Sorting the code rasters after code level # Function that returns the code level from the raster name cmpfunc = lambda x: int(path.basename(x)[: -4].split("__")[2]) dynamic_raster_dict["GEOCODE"].sort(key=cmpfunc) dynamic_raster_dict["ACTIVITYCODE"].sort(key=cmpfunc) for dp in dynamic_raster_dict.keys(): if len(dynamic_raster_dict[dp]) > 0: log.debug( "Adding dynamic raster for " + dp + ": " + str(dynamic_raster_dict[dp]) ) grid.addDynamicRasters(dp, dynamic_raster_dict[dp]) grid_dir = path.abspath(d) grid_name = path.basename(grid_dir) grid_path = path.join(grid_dir, grid_name) grid.write_data_to_file(grid_path) msg = "Wrote grid for %s" % path.basename(d) log.info(msg) log.info("Finished successfully")
def main(): #opening error log file log = open(os.path.join(os.environ["AVPATH"],"tmp","loAirviro.tmp"),"a") #-----------Parsing and validating arguments----------- #Order of arguments #action,domain,user,edb,db args=sys.argv[1:] action=args[0] validActions=["read","write","listEdbs","listDomains"] if action not in validActions: log.write("loAirviroService: Invalid action: %s\n" %action) return 1 domains=listDomains() if len(args)>1: domainName=args[1] if domainName in domains: dmn=Domain(domainName) dmn.setDbp() else: log.write("loAirviroService: Invalid domain %s\n" %domainName) return 1 if len(args)>2: edbDict=dmn.listEdbs() userName=args[2] if userName not in edbDict: log.write("loAirviroService: No edb's exist for user %s\n" %userName) return 1 else: userName=None if len(args)>3: edbName=args[3] if edbName not in edbDict[userName]: log.write("loAirviroService: edb %s does not exist for user %s\n" %(edbName,userName)) return 1 else: edbName=None if len(args)>4: db=args[4] validDbs=["subdb_0", "subdb_1","subdb_2","subdb_3","subdb_4","subdb_5","sourcedb","roaddb","roadtypedb","timevardb_point","timevardb_road","subgrpdb","emfacdb"] if db not in validDbs: log.write("loAirviroService.py: Invalid database requested: %s\n" %db) return 1 else: db=None #----------Performing requested action-------------------- if action=="listDomains": if len(args)!=1: print "Invalid number of arguments, usage:\n%s" %usage return 1 for d in domains: print d return 0 elif action =="listEdbs": if len(args)!=3: print "Invalid number of arguments, usage:\n%s" %usage return 1 for userName, edbList in edbDict.items(): sys.stdout.write(userName+":") for edbName in edbList: sys.stdout.write(" "+edbName) sys.stdout.write("\n") sys.stdout.flush() return 0 elif action =="read": if len(args)!=5: sys.stdout.write("Invalid number of arguments, usage:\n%s" %usage) return 1 if "subdb" in db: db,key = db.split("_") proc=getSubdbReader(dmn.name,user=userName,edb=edbName,searchkey=int(key)) elif "timevar" in db: db,key=db.split("_") try: proc=getTimevardbReader(dmn.name,userName,edbName,key) except PyAirviroValueException: log.write("loAirviroService.py: Invalid source type requested: %s\n" %key) return 1 elif db in ["subgrpdb","emfacdb","sourcedb"]: proc=getDbReader(db,dmn.name,userName,edbName) else: print "Invalid request" return 1 _procstderr=codecs.getreader("HP Roman8")(proc.stderr) _procstdout=codecs.getreader("HP Roman8")(proc.stdout) out=codecs.getwriter("HP Roman8")(sys.stdout) line = _procstdout.readline() while line or proc.poll(): out.write(line) line= _procstdout.readline() _procstdout.flush() errMsg=_procstderr.read() if errMsg!="": log.write("loAirviroService.py:"+errMsg+"\n") elif action=="write": if len(args)!=5: sys.stdout.write("Invalid number of arguments, usage:\n%s" %usage) return 1 if "subdb" in db: db,key = db.split("_") proc=getSubdbWriter(dmn.name,user=userName,edb=edbName,searchkey=int(key)) elif "timevar" in db: db,key=db.split("_") try: proc=getTimevardbWriter(dmn.name,userName,edbName,key) except PyAirviroValueException: log.write("loAirviroService.py: Invalid source type requested: %s\n" %key) return 1 elif db in ["subgrpdb","emfacdb","sourcedb","roaddb"]: proc=getDbWriter(db,dmn.name,userName,edbName) else: print "Invalid request" return 1 _procstderr=codecs.getreader("HP Roman8")(proc.stderr) _procstdout=codecs.getreader("HP Roman8")(proc.stdout) _procstdin= codecs.getwriter("HP Roman8")(proc.stdin) inStream=codecs.getreader("UTF-8")(sys.stdin) line = inStream.readline() while line: _procstdin.write(line) inStream.readline() _procstdin.write(line) errMsg=_procstderr.read() outMsg=_procstdout.read() if errMsg!="": log.write("loAirviroService.py:"+errMsg+"\n")
def main(): domain = Domain() # Parse command line arguments parser = argparse.ArgumentParser(description=__doc__) utils.add_standard_command_options(parser) parser.add_argument( '--site', action='store', dest='site', help='APUB-site', ) # parser.add_argument( # '-i', required=True, # action='store', dest='res', # help='.RES file of the input result', # ) # parser.add_argument( # '--ext', required=True, # action='store', dest='ext', # help='ext for the result field to be exported', # ) # parser.add_argument( # '-d', '--date', # action="store", dest='date', # type=lambda s: datetime.datetime.strptime(s, '%y%m%d%H'), # help="Time stamp to show 'YYMMDDhh, " + # "default is first timestamp of result'" # ) # parser.add_argument( # '--substance', # action='store', dest='substance', # help='Substance/ext for the result field to be imported', # ) parser.add_argument( '--areaid', action='store', dest='areaid', help='Area id' ) args = parser.parse_args() site = os.environ.get("SITE", None) or args.site if site is None: log.error("No apub site specified") sys.exit(1) with open(path.join('/var/www/html', site, 'gmapgridoverlay.htm')) as html_template: template = Template(html_template.read()) rf = ControlFile( path.join(os.environ["RSRCDIR"], "apub." + site + ".gmapgridoverlay.rf"), "HP Roman8" ) form = cgi.FieldStorage() viewports = domain.listViewports() areaid = form.getfirst('areaid', None) if areaid is not None: areaid = cgi.escape(areaid) areaid = areaid or \ args.areaid or \ viewports[0] viewport = ViewPort() viewport.read(code=areaid) proj = get_proj4(viewport.proj) data = get_latlon_bounds( viewport.xmin(), viewport.ymin(), viewport.xmax(), viewport.ymax(), proj) print(template.substitute(data))
def main(): #-----------Setting up and unsing option parser----------------------- parser=OptionParser(usage= usage, version=version) parser.add_option("-u",'--user', action="store",dest="user", help="Name of target edb user") parser.add_option("-e","--edb", action="store",dest="edb", help="Name of target edb") parser.add_option("-y","--year", action="store",dest="year", help="Cut out for given year") parser.add_option("-f","--file", action="store",dest="file", help="File to get data from") parser.add_option("-l", "--loglevel", action="store",dest="loglevel",default=2, help="Sets the loglevel (0-3 where 3=full logging)") (options, args) = parser.parse_args() #--------------------Init logger----------------------- rootLogger = logger.RootLogger(level=options.loglevel) global log log = rootLogger.getLogger(sys.argv[0]) #-----------------Validating options------------------- if options.user is None: log.error("Need to specify -u <user>") return 1 if options.edb is None: log.error("Need to specify -e <edb>") return 1 if options.year is None: log.error("Need to specify -y <year>") return 1 if options.file is None: log.error("Need to specify -f <file>") return 1 # if len(options.suffix)>4: # log.error("Limit your suffix length to 4 characters") # return 1 if len(options.year)!=4: log.error("Year should be given with four digits") return 1 dmn=Domain() edb=Edb(dmn,options.user,options.edb) subgrpdb=Subgrpdb(edb) rf = codecs.open(options.file,"r","ISO-8859-15") species={} tmp=tempfile.NamedTemporaryFile(suffix=".tmp",dir=dmn.tmpDir()) emisKey={} try: cmd="subdb -0 -o "+tmp.name returnCode,outMsg,errMsg=utilities.execute(cmd) if returnCode!=0: raise IOError("Error while running:\n" + cmd + "\nstdout: "+outMsg+"\nstderr: "+errMsg) f=codecs.open(tmp.name,"r","HP Roman8") # content=f.read() for line in f.read().split('\n'): if line == "": break line=line.split() if line[1] == '""' or line[1] == '"-"': continue emisKey[line[1][1:-1].lower()] = line[0] f.close() except IOError: msg="stdout: "+outMsg+"\nstderr: "+errMsg raise IOError("Could not run: "+cmd+"\n"+msg) for line in rf: if "Average of em" in line: line = line.split("\t") line[-1]=line[-1][:-1] i=0 for spec in line[4:]: i+=1 if len(spec) < 13: continue if spec[13:].lower() == "pm25": species[i]="pm2.5" elif spec[13:].lower() == "bap": species[i]="benzo_a_pyrene" elif spec[13:].lower() == "diox": species[i]="dioxine" elif spec[13:].lower() == "pah": species[i]="pah-4" else: species[i]=spec[13:].lower() continue line = line[:-1].split('\t') if line[3] != options.year: continue name=line[0] i=0 substances={} for emFactor in line[4:]: i+=1 if emFactor == "": continue if species[i] in emisKey: # print "Now doing species "+species[i] index=int(emisKey[species[i]]) emFactor = float(emFactor) if species[i] == "pah-4" or species[i] == "benzo_a_pyrene": emFactor *= 10** -6 if species[i]=="dioxine": emFactor *= 10** -9 if emFactor < 10** -5: unit="g/TJ" emFactor*= 10**6 elif emFactor < 10** -2: unit="kg/TJ" emFactor*= 10**3 elif emFactor < 10: unit="ton/TJ" else: unit="Gg/TJ" emFactor*= 10** -3 substances[index]={"slope":emFactor,"offset":0,"unit":unit} else: print species[i] + " missing" subgrpdb.add_subgrp(name,substances) # subgrpdb.read() subgrpdb.write_to_file("test.txt")
def main(): #setting up parser parser = argparse.ArgumentParser(description=__doc__) utils.add_standard_command_options(parser) parser.add_argument( "-e","--edbs", action="store",dest="edbList", help="List of 'user/edb' pairs separated by :" ) parser.add_argument( "-L","--labels", action="store", dest="labels", help="List of edb labels separated by :" ) parser.add_argument( "-s","--substances", action="store",dest="substances", help="List of substance names separated by :" ) parser.add_argument( "-t","--title", action="store",dest="title", help="Report title" ) parser.add_argument( "-g","--gc-filter", action="store",dest="gcfilter", help="Filter on Geo codes, separated by :" ) parser.add_argument( "-o","--outfile", action="store",dest="outfile", help="Output filename" ) parser.add_argument( "-f","--format", action="store",dest="format", help="Output in 'excel','csv' or 'raw' " + "(Excel-format requires xlwt python module)" ) parser.add_argument("--substMapping", action="store",dest="substMapping", help="File with tab separated mappings of substance names") parser.add_argument("--markerTable", action="store",dest="markerTable", help="Table of codes to be formatted and commented") parser.add_argument( "macro",metavar="MACRO", help="A macro to use" ) args = parser.parse_args() if args.markerTable is not None: keys=["Year","GC","AC","note_1","note_2"] markerTable = DataTable(keys=keys,desc=[{"id":"Year","type":str},{"id":"GC","type":str},{"id":"AC","type":str},{"id":"note_1","type":str},{"id":"note_2","type":str}]) markerTable.read(args.markerTable) else: markerTable=None substMapping={} if args.substMapping is not None: with codecs.open(args.substMapping,encoding="HP Roman8",mode="r") as f: for line in f: oldName,newName = line.split(":") substMapping[oldName.strip()]=newName.strip() dmn = Domain() if args.gcfilter is not None: args.gcfilter = args.gcfilter.split(":") # Read original macro with codecs.open(args.macro, encoding="HP Roman8", mode="r") as f: originalContent = f.read() # Create a tmp copy of the macro, write content from the original macro macroTempFile = tempfile.NamedTemporaryFile( suffix=".sedb", dir=dmn.tmpDir() ) tmpMacro = codecs.open( macroTempFile.name, encoding="HP Roman8",mode="w" ) tmpMacro.write(originalContent) tmpMacro.flush() # Create a ControlFile obj to simplify reading and modifying macro macro = ControlFile(macroTempFile.name, removeComments=False) ebd = macro.findString("edb.edb:") user = macro.findString("edb.user:"******"edb.reportgeocode:")[-1]) acIndex = int(macro.findString("edb.reportactcode:")[-1]) if args.edbList is None: ebds = [[user, edb]] else: edbs = args.edbList.split(":") edbs = [e.split("/") for e in edbs] nedbs = len(edbs) if args.labels is None: labels = ["No label"] * len(edbs) else: labels = args.labels.split(":") if len(labels) != nedbs: log.error("Number of labels specified should match number of edb:s") sys.exit(1) if args.substances is None: log.error("Need to specify substances") sys.exit(1) else: substances = args.substances.split(":") if args.format not in ('excel','csv','raw'): log.error( "Invalid format specifier : %s, should be one of 'excel'" + ", 'csv' or 'raw'" %args.format ) sys.exit(1) elif args.format == "excel": try: import xlwt except: log.error( "trendReport.py requires python module xlwt to write excel-files") sys.exit(1) # first edb # import pdb; pdb.set_trace() edb = Edb(dmn, edbs[0][0], edbs[0][1]) # assume same code definitions in all edbs to be processed, read from first rsrc = edb.rsrc nrsubstances = len(substances) unitIndex = int(macro.findString("UNIT :")) units = rsrc.search[unitIndex] subdb = Subdb(edb) subdb.read() #decode input title using stdin encoding title=args.title.decode(sys.stdin.encoding) rawOutput = "" rawMeta = u"name: %s\nnrmacros: %i\nnrsub: %i\nunit: %s\n" %( title, nedbs, nrsubstances, units) emissions = [] for ind, edbUser in enumerate(edbs): label = labels[ind] userName = edbUser[0] edbName = edbUser[1] macro.setParam("edb.user:"******"edb.edb:", edbName) macro.setParam("USER :"******"EDB :", edbName) rawMeta += "macro.%i.edbuser: %s\n" %(ind, userName) rawMeta += "macro.%i.edbname: %s\n" %(ind, edbName) rawMeta += "macro.%i.desc: %s\n" %(ind, label) for subst in substances: log.info( "User: %s, edb: %s, substance %s" %( userName, edbName, subst) ) substanceIndex = subdb.substIndex(subst) macro.setParam("ELEMENT :", substanceIndex) macro.write() command = "xrepedb -i " + macro.name log.info("Running xrepedb for substance %s" % subst) # import pdb; pdb.set_trace() (returnCode, errMsg, outMsg) = utilities.execute(command) if returnCode != 0: log.error("Could not run %s\nstdout: %s\nstderr:%s" %( command,outMsg,errMsg)) sys.exit(1) if len(outMsg) < 10: log.error("Invalid output from xrepedb: %s" % outMsg) sys.exit(1) rawOutput += "#MACRO %i \"%s\" \"%s\"\n" % (ind, subst, labels[ind]) rawOutput += outMsg lines = outMsg.split("\n")[:-1] for lineInd, line in enumerate(lines): vals = line.split() ac = vals[1].split(".") gc = vals[3].split(".") if len(ac) == 1: if ac[0] == "<all>": acLev1 = "alla" else: acLev1 = ac[0] acLev2 = "alla" else: acLev1 = ac[0] acLev2 = ac[1] if len(gc) == 1: if gc[0] == "<all>": gcLev1 = "alla" else: gcLev1 = gc[0] gcLev2 = "alla" else: gcLev1 = gc[0] gcLev2 = gc[1] emis = float(vals[4]) if acLev1 == "alla": acLev1Name = "alla" acLev2Name = "alla" else: node = rsrc.ac[acIndex - 1].root.find(acLev1) acLev1Name = node.attrib["name"] if acLev2 == "alla": acLev2Name = "alla" else: node = rsrc.ac[acIndex-1].root.find( acLev1 + "/" + acLev2 ) acLev2Name = node.attrib["name"] if gcLev1 == "alla": gcLev1Name = "alla" gcLev2Name = "alla" else: node = rsrc.gc[gcIndex-1].root.find(gcLev1) gcLev1Name = node.attrib["name"] if gcLev2 == "alla": gcLev2Name = "alla" else: node = rsrc.gc[gcIndex - 1].root.find( gcLev1 + "/" + gcLev2 ) gcLev2Name = node.attrib["name"] if args.gcfilter is not None: if gc[0] not in args.gcfilter: # if args.gcfilter != gcLev1: continue emissions.append({"label": label, "substance": subst, "ac": '.'.join(ac), "gc": '.'.join(gc), "gcLev1": gcLev1Name, "gcLev2": gcLev2Name, "acLev1": acLev1Name, "acLev2": acLev2Name, "acLev1Code": acLev1, "acLev2Code": acLev2, "val": emis, "edbIndex": ind}) #Close tempfile to automatically remove it tmpMacro.close() if args.format == "raw": outfile = codecs.open(args.outfile,"w","HP Roman8") outfile.write(rawMeta) outfile.write(rawOutput) outfile.close() elif args.format == "csv": outfile = open(args.outfile,"w") desc = [ {'id': 'gc', 'type': unicode}, {'id': 'ac', 'type': unicode}, {'id': 'label', 'type': unicode}, {'id': 'user', 'type': unicode}, {'id': 'edb', 'type': unicode} ] for subst in substances: desc.append({'id': subst, 'type': float}) keys = ['gc', 'ac', 'label'] table = DataTable(desc=desc, keys=keys) log.info("Adding emissions to csv-table") for emis in emissions: row = [None] * len(desc) user = edbs[emis['edbIndex']][0] edb = edbs[emis['edbIndex']][1] row[table.colIndex['gc']] = emis['gc'] row[table.colIndex['ac']] = emis['ac'] row[table.colIndex['label']] = emis['label'] row[table.colIndex['user']] = user row[table.colIndex['edb']] = edb row[table.colIndex[emis['substance']]] = emis['val'] # data is appended to the correct row, or a new row is added if the # table keys do not match any existing row log.debug( "Adding row for substance %s, gc %s, ac %s" %( emis['substance'], emis['gc'], emis['ac']) ) table.addRow(row, append=True) table.write(outfile) outfile.close() else: # Create style objects for excel output header1Style = xlwt.easyxf( 'font: name Times New Roman,color-index black, bold on', num_format_str='0.000E+00' ) markerStyle1 = xlwt.easyxf( 'font: name Times New Roman,color-index red, bold off, italic on', num_format_str='0.000E+00') markerStyle2 = xlwt.easyxf( 'font: name Times New Roman,color-index orange, bold off, italic on', num_format_str='0.000E+00') normalStyle = xlwt.easyxf( 'font: name Times New Roman,color-index black, bold off', num_format_str='0.000E+00' ) excelBook = xlwt.Workbook() # Creating info sheet infoWs = excelBook.add_sheet("Info") infoWs.col(0).width = 256*20 infoWs.col(1).width = 256*25 infoWs.col(2).width = 256*20 infoWs.col(3).width = 256*200 infoWs.write(0,0,u"Rapportnamn:",header1Style) infoWs.write(0,1,title,header1Style) infoWs.write(1,0,u"Beskrivning av dataunderlaget",header1Style) infoWs.write(3,0,u"Makron (specificerar utsökningar ur databasen)",header1Style) infoWs.write(4,0,u"Etikett",header1Style) infoWs.write(4,1,u"Ägare till EDB",header1Style) infoWs.write(4,2,u"EDB (emissiondatabas)",header1Style) infoWs.write(4,3,u"Beskrivning",header1Style) for i,edbUser in enumerate(edbs): userName=edbUser[0] edbName=edbUser[1] label=labels[i] infoWs.write(5+i,0,label) infoWs.write(5+i,1,userName) infoWs.write(5+i,2,edbName) #reading edb description file (if it exists) edb=Edb(dmn,userName,edbName) infoWs.write(5+i,3,edb.desc().replace("\n"," ")) #split substances in green house gases and air quality related ghgList=[s for s in substances if s in ghgs] aqList=[s for s in substances if s not in ghgs] #Write air quality headers firstRow=4 #Add two rows for marker comments if markerTable is not None: firstRow+=2 if len(aqList)>0: aqWs = excelBook.add_sheet(u"Luftföroreningar") aqWs.col(0).width = 256*25 aqWs.col(1).width = 256*30 aqWs.col(2).width = 256*20 aqWs.col(3).width = 256*15 for col in range(nrsubstances*nedbs): aqWs.col(col+4).width=256*15 aqWs.write(0,0,u"Rapportnamn:",header1Style) aqWs.write(0,1,title,header1Style) aqWs.write(1,0,u"Emissioner av luftföroreningar",header1Style) aqWs.write(1,1,u"Enhet: "+units,header1Style) if markerTable is not None: aqWs.write(2,0,u"OBS! Röd kursiv text anger osäkra värden p.g.a. att en stor del av emissionen är fördelad med schabloner inom kommungruppen. Granska underkategorin \"Energiförsörjning via el-värmeverk samt inom industrin\" för att se eventuella misstänkta värden.",markerStyle1) aqWs.write(3,0,u"OBS! Orange kursiv text anger osäkra värden p.g.a. att trenden varierar kraftigt och eventuellt felaktigt, ytterligare verifiering krävs. Granska underkategorin \"Energiförsörjning via el-värmeverk samt inom industrin\" för att se eventuella misstänkta värden.",markerStyle2) aqWs.write(firstRow,0,"Huvudsektor",header1Style) aqWs.write(firstRow,1,"Undersektor",header1Style) aqWs.write(firstRow,2,u"Län",header1Style) aqWs.write(firstRow,3,"Kommun",header1Style) #Write ghg headers if len(ghgList)>0: ghgWs = excelBook.add_sheet(u"Växthusgaser") ghgWs.col(0).width = 256*25 ghgWs.col(1).width = 256*30 ghgWs.col(2).width = 256*20 ghgWs.col(3).width = 256*15 for col in range(nrsubstances*nedbs): ghgWs.col(col+4).width=256*15 ghgWs.write(0,0,u"Rapportnamn:",header1Style) ghgWs.write(0,1,title,header1Style) ghgWs.write(1,0,u"Emissioner av Växthusgaser",header1Style) ghgWs.write(2,0,u"CO2-ekv. efter ämnesnamn innebär att emissionen är uttryckt i CO2-ekvivalenter",header1Style) if markerTable is not None: ghgWs.write(3,0,u"OBS! Röd kursiv text anger osäkra värden p.g.a. att en stor del av emissionen är fördelad med schabloner inom kommungruppen. Granska underkategorin \"Energiförsörjning via el-värmeverk samt inom industrin\" för att se eventuella misstänkta värden.",markerStyle1) ghgWs.write(4,0,u"OBS! Orange kursiv text anger osäkra värden p.g.a. att trenden varierar kraftigt och eventuellt felaktigt, ytterligare verifiering krävs. Granska underkategorin \"Energiförsörjning via el-värmeverk samt inom industrin\" för att se eventuella misstänkta värden.",markerStyle2) ghgWs.write(1,1,u"Enhet: "+units,header1Style) ghgWs.write(firstRow,0,"Huvudsektor",header1Style) ghgWs.write(firstRow,1,"Undersektor",header1Style) ghgWs.write(firstRow,2,u"Län",header1Style) ghgWs.write(firstRow,3,"Kommun",header1Style) def getColInd(nmacros, substances,macroInd,subst): #gets the column index in excel file sInd=substances.index(subst) #Including extra columns to write CO2-equivalents nSubstWithCO2equivalents=0 for s in substances[:sInd+1]: if s in doubleColumns: nSubstWithCO2equivalents+=1 return 4 + macroInd+sInd*nmacros+nSubstWithCO2equivalents*(macroInd+1) #write macro labels and substance headers for air quality sheet for sInd,subst in enumerate(aqList): for i,edbUser in enumerate(edbs): col=getColInd(nedbs,aqList,i,subst) aqWs.write(firstRow-1,col,labels[i],header1Style) #If a substance name is given in mapping this is used, otherwise #The substance bname from the airviro substance list is used aqWs.write(firstRow,col,substMapping.get(subst,subst),header1Style) #write macro labels and substance headers for ghg sheet for sInd,subst in enumerate(ghgList): for i,edbUser in enumerate(edbs): col=getColInd(nedbs,ghgList,i,subst) #If CO2-equivalents are calculated, an extra column is needed if subst in doubleColumns: ghgWs.write(firstRow-1,col-1,labels[i],header1Style) ghgWs.write(firstRow-1,col,labels[i],header1Style) #If CO2-equivalents are calculated, an extra column is needed if subst in doubleColumns: #debug statement #print "writing subst %s in col %i and %i" %(subst,col-1,col) ghgWs.write(firstRow,col-1,substMapping.get(subst,subst),header1Style) ghgWs.write(firstRow,col,substMapping.get(subst,subst)+"CO2-ekv.",header1Style) elif subst in storedAsCO2equivalents: #debug statement #print "writing subst %s in col %i" %(subst,col) ghgWs.write(firstRow,col,substMapping.get(subst,subst)+"CO2-ekv.",header1Style) else: #debug statement #print "writing subst %s in col %i" %(subst,col) ghgWs.write(firstRow,col,substMapping.get(subst,subst),header1Style) #looping over all emissions, writing them to the correct column and row ghgRow=[] aqRow=[] for m in range(nedbs*nrsubstances+4+3*nedbs): ghgRow.append(firstRow+1) for m in range(nedbs*nrsubstances+4): aqRow.append(firstRow+1) for emis in emissions: subst = emis["substance"] emisVal=emis["val"] edbInd=emis["edbIndex"] #Check if gc, ac and year can be found in the error list #debugging marker style if markerTable is not None: TableRowInd=markerTable.rowIndices([labels[edbInd], emis["gc"], emis["ac"], "ja","*"]) if len(TableRowInd) >0: valueStyle=markerStyle1 else: TableRowInd=markerTable.rowIndices([labels[edbInd], emis["gc"], emis["ac"], "*","ja"]) if len(TableRowInd)>0: valueStyle=markerStyle2 else: valueStyle=normalStyle else: valueStyle=normalStyle if subst in ghgList: col=getColInd(nedbs,ghgList,edbInd,subst) row=ghgRow[col] if ghgRow[0]<=+row: ghgWs.write(row,0,emis["acLev1"],normalStyle) ghgWs.write(row,1,emis["acLev2"],normalStyle) ghgWs.write(row,2,emis["gcLev1"],normalStyle) ghgWs.write(row,3,emis["gcLev2"],normalStyle) ghgRow[0]+=1 #converts the emission to CO2-ekquivalents if subst in doubleColumns: ghgWs.write(row,col-1,float(emisVal),valueStyle) ghgWs.write(row,col,float(emisVal)*float(ekvFactors[subst]),valueStyle) else: ghgWs.write(row,col,float(emisVal),valueStyle) ghgRow[col]+=1 else: col=getColInd(nedbs,aqList,edbInd,subst) row=aqRow[col] if aqRow[0]<=+row: aqWs.write(row,0,emis["acLev1"],normalStyle) aqWs.write(row,1,emis["acLev2"],normalStyle) aqWs.write(row,2,emis["gcLev1"],normalStyle) aqWs.write(row,3,emis["gcLev2"],normalStyle) aqRow[0]+=1 aqWs.write(row,col,float(emisVal),valueStyle) aqRow[col]+=1 excelBook.save(args.outfile) log.info("Finished!")
def main(): #-----------Setting up and unsing option parser----------------------- parser=OptionParser(usage= usage, version=version) parser.add_option("-u",'--user', action="store",dest="user", help="Name of target edb user") parser.add_option("-e","--edb", action="store",dest="edb", help="Name of target edb") parser.add_option("-v","--viewports", action="store",dest="viewports", help="Comma-separated list of area id's to be cut out, default is all") parser.add_option("-y","--year", action="store",dest="year", help="Cut out for given year") parser.add_option("-l", "--loglevel", action="store",dest="loglevel",default=2, help="Sets the loglevel (0-3 where 3=full logging)") parser.add_option("-s","--suffix", action="store",dest="suffix",default="v1", help="Sets suffix to names of generated edb's to support version management, default is 'v1'") (options, args) = parser.parse_args() #--------------------Init logger----------------------- rootLogger = logger.RootLogger(level=options.loglevel) global log log = rootLogger.getLogger(sys.argv[0]) #-----------------Validating options------------------- if options.user is None: log.error("Need to specify -u <user>") return 1 if options.edb is None: log.error("Need to specify -e <edb>") return 1 if options.year is None: log.error("Need to specify -y <year>") return 1 if len(options.suffix)>4: log.error("Limit your suffix length to 4 characters") return 1 if len(options.year)!=4: log.error("Year should be given with four digits") return 1 dmn=Domain() viewports=[] if options.viewports is not None: viewportIds=options.viewports.split(",") else: viewportIds=dmn.listViewports() for vpId in viewportIds: vp=ViewPort() vp.read(path.join(dmn.wndPath(),"modell.par"),vpId) viewports.append(vp) edb=Edb(dmn,options.user,options.edb) log.info("Reading sourcedb...") sourcedb=Sourcedb(edb) sourcedb.read() log.info("Reading emfacdb...") emfacdb=Emfacdb(edb) emfacdb.read() log.info("Reading subdb...") subdb=Subdb(edb) subdb.read() edbDotRsrc=edb.rsrcPath() for vpInd,vp in enumerate(viewports): targetEdbName=vp.code+"_"+options.year+"_"+options.suffix tEdb=Edb(dmn,options.user,targetEdbName) if tEdb.exists(): log.info("Edb %s already exists, remove first to update" %targetEdbName) continue tEdb.create(edbRsrc=edbDotRsrc) log.info("Created empty edb %s" %targetEdbName) subdb.setEdb(tEdb) subdb.write() log.info("Wrote searchkeys") emfacdb.setEdb(tEdb) emfacdb.write() log.info("Wrote emfacdb") tSourcedb=Sourcedb(tEdb) log.info("Cutting out viewport %s (%i/%i)" %(vp.code,vpInd+1,len(viewports))) for srcInd,src in enumerate(sourcedb.sources): if includeShip(src,vp.code,src["Y1"],options.year): log.debug("Ship %i/%i included in %s" %(srcInd+1,len(sourcedb.sources),tEdb.name)) tSourcedb.sources.append(src) tSourcedb.write() log.info("Wrote exatracted sources to %s" %tEdb.name) tEdb.setDesc("This edb has been extracted from %s under user %s, " %(edb.name,edb.user)+ "and includes all ships that have visited the map area %s (%s) during %s\n" %(vp.code,vp.name,options.year)) log.info("Finished!") return 0
def main(): #-----------Setting up and unsing option parser----------------------- parser=OptionParser(usage= usage, version=version) parser.add_option("-l", "--loglevel", action="store",dest="loglevel",default=2, help="Sets the loglevel (0-3 where 3=full logging)") parser.add_option("-u", "--user", action="store", dest="user", default=None, help="Specify user manually") parser.add_option("-e", "--edb", action="store", dest="edb", default=None, help="Name of target edb") parser.add_option("-t", "--template", action="store",dest="cf",default=None, help="Generate default controlfile") parser.add_option("-f", "--force", action="store_true",dest="force",default=False, help="To start the process without confirming the domain") (options, args) = parser.parse_args() # Setup logging logging.configure(terminal_level=logging.DEBUG) log = logging.getLogger(__name__) if options.cf!=None: generateCf(path.abspath(options.cf)) log.info("Wrote default controlfile") sys.exit() if len(args)!=1: parser.error("Incorrect number of arguments") if options.edb ==None: parser.error("Need to specify edb using flag -e") if options.user ==None: parser.error("Need to specify user using flag -u") dmn = Domain() if not options.force: answer=raw_input("Chosen dbase is: "+dmn.name+",continue(y/n)?") if answer!="y": sys.exit("Interrupted by user") if not dmn.edbExistForUser(options.edb,options.user): log.error("Edb "+options.edb+" does not exist for user "+ options.user+" in domain "+dmn.name) sys.exit() #---Creating edb and rsrc objects------------------ edb=Edb(dmn,options.user,options.edb) rsrc=Rsrc(edb.rsrcPath()) #Opening controlfile #---retrieving data from control file---- cf=ControlFile(fileName=path.abspath(args[0])) substances=cf.findStringList("substances:") outputDir=cf.findExistingPath("outputDir:") acIndex=cf.findInt("acIndex:") macroFileName=path.abspath(cf.findExistingPath("xrepedbMacro:")) fromProj=cf.findString("fromProj:") toProj=cf.findString("toProj:") try: fromProj=transcoord.proj4Dict[fromProj] except KeyError: log.error("Projection %s not found in proj4Dictin transCoord.py" %fromProj) try: toProj=transcoord.proj4Dict[toProj] except KeyError: log.error("Projection %s not found in proj4Dictin transCoord.py" %toProj) formats = cf.findStringList("formats:") units = cf.findString("units:") writeGrids=cf.findBoolean("writeGrids:",optional=True,default=True) edb_xll=cf.findInt("edb_xll:") edb_yll=cf.findInt("edb_yll:") edb_ncols=cf.findInt("edb_ncols:") edb_nrows=cf.findInt("edb_nrows:") edb_cellsize=cf.findFloat("edb_cellsize:") if fromProj!=toProj: out_xll=cf.findFloat("out_xll:") out_yll=cf.findFloat("out_yll:") out_ncols=cf.findInt("out_ncols:") out_nrows=cf.findInt("out_nrows:") out_cellsize=cf.findFloat("out_cellsize:") #----------------------------------------- #Finds index to search units unitIndex=None for key,unit in rsrc.search.iteritems(): if isinstance(key,int): if rsrc.search[key]==units: unitIndex=key break if unitIndex is None: log.error("Search units: %s not defined in edb.rsrc" %units) sys.exit() macro = ControlFile(fileName=macroFileName,removeComments=False) #preparing export macro macro.setParam("general.database:",dmn.name) xmin=edb_xll xmax=edb_xll+edb_ncols*edb_cellsize ymin=edb_yll ymax=edb_yll+edb_nrows*edb_cellsize macro.setParam("edb.mapopt.bounds:", "%i %i %i %i" %(xmin, xmax, ymin, ymax)) macro.setParam("edb.user:"******"edb.edb:",edb.name) macro.setParam("REGION :","%i %i %i %i" %(xmin, xmax, ymin, ymax)) macro.setParam("USER :"******"EDB :",edb.name) macro.setParam("GRID :", "%i %i %i %i %i %i" %(edb_xll,edb_yll,edb_ncols,edb_nrows,edb_cellsize,edb_cellsize)) macro.setParam("edb.unit:",unitIndex) macro.setParam("UNIT :",unitIndex) # macro.setParam("NOACTCODE :",acIndex) macro.setParam("NOACTCODE :",len(rsrc.ac)) #Get activity code tree acTree=codeemistree.CodeEmisTree("Activity codes",units=units) acTree.readActivityCodes(rsrc.path,acIndex) substDict=dmn.listSubstanceIndices() edbRast = Raster(Xll=edb_xll,Yll=edb_yll,Ncols=edb_ncols, Nrows=edb_nrows,Cellsize=edb_cellsize, Nodata=-9999,init=0) if fromProj!=toProj: outRastTemplate = Raster(Xll=out_xll,Yll=out_yll,Ncols=out_ncols, Nrows=out_nrows,Cellsize=out_cellsize, Nodata=-9999) else: outRastTemplate=Raster() outRastTemplate.assign(edbRast) for node in acTree.root.getiterator(): if node.tag=="root" or node.tag=="Record": continue ac=node.tag log.debug("Activity code: "+ac) #Finds row index for activity codes in macro #Add a row with the current ac #If there are many ac already given, these are #replaced by the current ac macroLines=macro.content.split("\n") actCodeInd=None geoCodeInd=None for lineInd,line in enumerate(macroLines): if "NOACTCODE" in line: actCodeInd=lineInd if "NOGEOCODE" in line: geoCodeInd=lineInd if len(ac.split('.')) >= rsrc.ac[acIndex-1].depth: macroLines=macroLines[:actCodeInd+1]+["none"]*(acIndex-1)+[ac]+["none"]*(len(rsrc.ac)-acIndex)+macroLines[geoCodeInd:] else: macroLines=macroLines[:actCodeInd+1]+["none"]*(acIndex-1)+[ac+'.']+["none"]*(len(rsrc.ac)-acIndex)+macroLines[geoCodeInd:] macro.content="\n".join(macroLines) macro.write() #boolean raster marking where there is data for any of the substances if 'CLRTAP' in formats: dataMarker = Raster(Xll=out_xll,Yll=out_yll,Ncols=out_ncols, Nrows=out_nrows,Cellsize=out_cellsize, Nodata=-9999,init=0) rasterDict={} substancesWithData=[] for substance in substances: log.debug("Substance %s" %substance) substanceIndex=substDict[substance] macro.setParam("ELEMENT :",substanceIndex) macro.write() command="xrepedb -i "+macro.name (returnCode,errMsg,outMsg)=utilities.execute(command) tmp=outMsg.split()[10:-2] tmp.sort() if tmp[0] == '0.000000E+00' and tmp[-1] == '0.000000E+00': print "ERROR: The field for "+substance+" is empty!" continue # pdb.set_trace() emisRast=string2rast(outMsg,edbRast) emisSum=emisRast.sum() outRast=Raster() outRast.assign(outRastTemplate) rec=ET.SubElement(node,"Record") rec.attrib["substance"]=substance rec.attrib["emission"]=str(emisSum) if emisSum>0 and writeGrids: if substance not in substancesWithData: substancesWithData.append(substance) if fromProj!=toProj: exportRast = transcoord.transformEmisRaster(emisRast,outRast,fromProj,toProj,tmpDir=dmn.tmpDir()) else: exportRast=emisRast if 'CLRTAP' in formats: dataMarker.data = numpy.where(exportRast.data > 0, 1, dataMarker.data) rasterDict[substance]=exportRast categoryDirPath = path.join(outputDir, ac) if not path.isdir(categoryDirPath): os.mkdir(categoryDirPath) if 'ESRI Ascii grid' in formats: fileName = path.join(categoryDirPath, substance+ ".asc") exportRast.write(fileName) log.debug("Grid for " + substance + "written to outputDir for category: " + ac) summaryTable=acTree.createTable(writeAll=True) summaryTable.sortRows() tableFile=open(path.join(outputDir,"summaryTable.txt"),'w') summaryTable.write(tableFile) if len(rasterDict)>0 and 'CLRTAP' in formats: #creating substance header in the same order as the substances in the template header = "i\tj\t" #headerList=["SO2","NOx","NH3","NMVOC","CO","TSP","PM10","PM25","Pb ","Cd","Hg","As","Cr","Cu","Ni","Se","Zn","Aldrin","Chlordane","Chlordecone","Dieldrin","Endrin","Heptachlor","Hexabromobiphenyl","Mirex","Toxaphene","HCH","DDT","PCB","DIOX","PAH","HCB","PCP","SCCP"] for s in substancesWithData: header += s + "\t" #remove the tab after the last column and add a newline instead header = header[: - 1]+ "\n" #Creating file for EMEP-data fileName = "CLRTAP_" + ac + ".txt" categoryDirPath = path.join(outputDir, ac) if not path.isdir(categoryDirPath): os.mkdir(categoryDirPath) fid = open(path.join(categoryDirPath, fileName), 'w') fid.writelines(header) sum=0 #Writing indexes and data for all non-zero elements for row in range(dataMarker.nrows): for col in range(dataMarker.ncols): if dataMarker.data[row, col] > 0: (i, j) = dataMarker.getCentreCoords(row, col) fid.write(str(i) + "\t" + str(j) + "\t") for substWithData in substancesWithData[:-1]: fid.write(str(rasterDict[substWithData].data[row, col]) + "\t") sum+=rasterDict[substWithData].data[row, col] fid.write(str(rasterDict[substancesWithData[-1]].data[row, col]) + "\n") sum+=rasterDict[substancesWithData[-1]].data[row, col] fid.close() log.info("wrote emissions to clrtap-file: " + path.join(categoryDirPath, fileName)) log.info("Finished")
def main(): # Parse command line arguments parser = argparse.ArgumentParser(description=__doc__) utils.add_standard_command_options(parser) parser.add_argument("controlfile", metavar='CONTROLFILE', action="store", help="Controlfile for topdown processing") parser.add_argument("-t", "--template", metavar='TEMPLATEFILE', action="store",dest="cf",default=None, help="Generate default controlfile") args = parser.parse_args() if args.cf is not None: generateCf(args.cf) log.info("Wrote default controlfile") sys.exit(0) log.info("Starting topdown processing") # Opening controlfile cf = ControlFile(args.controlfile) dmn = Domain() log.info("Reading topdown table") tdTableName = cf.findExistingPath("topDownTable:") tdTable = DataTable() tdTable.keys.append("Code") tdTable.read(tdTableName,delimiter=";") log.info("Reading national totals table") natTotalTableName = cf.findExistingPath("nationalTotalTable:") natTable = DataTable(desc=[{"id": "Code", "type":unicode}, {"id": "description", "type":unicode}]) natTable.keys.append("Code") natTable.read(natTotalTableName, units=True, defaultType=str) notationKeys = ["NE", "NO", "NA", "IE"] log.debug("Remove notation keys from national totals table") for row in natTable.data: for i in range(len(row)): if row[i] in notationKeys: row[i] = None log.debug("Convert all emission columns in national totals to float") for colId in natTable.listIds(): if colId not in ["Code","description"]: natTable.convertCol(colId,float) log.debug("Store units from national totals for each substance in dict") natUnits={} for col in natTable.desc: if col.get("units",None)!=None: natUnits[col["id"]]=col["units"] log.debug("Read remaining data from control file") bottomupEdbName = cf.findString("bottomUpEdb:") topDownEdbName = cf.findString("topDownEdb:") emissionsEdbName = cf.findString("emissionsEdb:") userName = cf.findString("user:"******"year:") #initialize edb objects buEdb = Edb(dmn,userName,bottomupEdbName) tdEdb = Edb(dmn,userName,topDownEdbName) eEdb = Edb(dmn,userName,emissionsEdbName) log.info("Reading/preparing EDB:s") log.info("Reading subdb") subdb = Subdb(eEdb) subdb.read() log.info("Reading subgrpdb") subgrpdb = SubgrpStream(buEdb) subgrpdb.read() log.info("Reading facilitydb") facilityIn = FacilityStream(buEdb) log.info("Reading companydb") companyIn = CompanyStream(buEdb) facilityOut = FacilityStream(eEdb,mode="w") companyOut = CompanyStream(eEdb,mode="w") log.info("Writing company db to result edb") companyOut.write(companyIn.read()) log.info("Writing facility db to result edb") facilityOut.write(facilityIn.read()) if not buEdb.exists(): log.error("Edb " + buEdb.name + " does not exist for user " + userName + " in domain " + dmn.name) sys.exit(1) if not tdEdb.exists(): log.error("Edb " + tdEdb.name + " does not exist for user " + userName + " in domain " + dmn.name) sys.exit(1) if not eEdb.exists(): log.error("Edb " + eEdb.name + " does not exist for user " + userName + " in domain " + dmn.name) sys.exit(1) keys = tdEdb.listGrids() msg = "%i keys found in edb: %s" % (len(keys), tdEdb.name) log.info(msg) # sourcedb from bottom-up edb with SourceStream(buEdb, mode='rb') as source_instream: source_reader = ModelReader(source_instream) bu_sources = list(source_reader) log.info( "%i point sources found in edb: %s" % ( len(bu_sources), buEdb.name) ) # Empty sourcedb of the result edb if cf.findBoolean("emptyEmissionSourcedb:"): eEdb.empty_sourcedb() e_sources = [] log.info("Removed point sources from edb: %s" % (eEdb.name)) else: # sourcedb from emission edb (result edb) with SourceStream(eEdb, mode='rb') as source_instream: source_reader = ModelReader(source_instream) e_sources = list(source_reader) msg = "%i point sources found in edb: %s" % (len(e_sources), eEdb.name) log.info(msg) if not path.exists(eEdb.rsrcPath()): log.error("No edb.rsrc exists for emission edb") sys.exit() else: rsrc = Rsrc(eEdb.rsrcPath()) acIndex = cf.findInt("acIndex:") codeDepth = rsrc.ac[acIndex-1].depth substances = cf.findStringList("substances:") for subst in substances: if subst not in subdb.substIndices: log.error("Substance: " + subst + " not in Airviro substance list") sys.exit() # Initialize trace for debug and additional logging if cf.findBoolean("trace:") == True: log.info("Initializing trace for detailed logging") trace = TraceDef( active=True, substances=cf.findStringList("trace.substances:"), logfile=cf.findString("trace.logfile:"), regdefgc=cf.findIntList("trace.regdef.gc:", optional=True, default=None), gcDefRaster=cf.findExistingPath("trace.gcraster:") ) else: trace = TraceDef(active=False) log.info("Initializing result table") resTablePath = cf.findString("resTable:") resTable = DataTable(desc=[{"id": "Code", "type": unicode}]) resTable.keys.append("Code") for subst in substances: resTable.addCol({"id": subst, "type": float, "unit": "%"}) # Create emission grid template (with geocodes) log.info("Reading emission grid template") eGridTemplatePath = cf.findExistingPath("emisGridTemplatePath:") eGridTemplate = Egrid(eEdb,"name") if eGridTemplatePath[-4:] == ".asc": eGridTemplatePath=eGridTemplatePath[:-4] eGridTemplate.readData(eGridTemplatePath) eGridTemplate.substances = {} eGridTemplate.par["SUBSTANCE"].val = [] dd = {"key": None, "regstat": None, "regdef": None, "bu_sources": bu_sources, "psIndices": [], "units": natUnits, "rsrc": rsrc, "subdb": subdb, "trace": trace, "subgrpdb": subgrpdb } # Process all rows in the topdown table for row in tdTable.data: code = row[tdTable.colIndex["Code"]] active = row[tdTable.colIndex["Active"]] statType = row[tdTable.colIndex["Stat_type"]] if active == "no": continue log.info("Code: "+code) distributed=False # Add '-' to the code to reach max length (fix for a GUI bug) airviroCode = code # while len(airviroCode.split(".")) < codeDepth: # airviroCode += ".-" tdrow = tdTable.data[tdTable.rowIndex([code])] nrow = natTable.data[natTable.rowIndex([code])] # Create a resTable row to fill with data resrow = [None] * resTable.ncols resrow[0] = code # Check if national totals are non-zero nonZero = False for val in nrow: if val != None: if val > 0: nonZero = True break # Filter out indices for pointsources with the current ac # Also including sources coded with sub-codes # This allows to estimate top-down emissions on a higher code-level psIndices = [] for i, ps in enumerate(bu_sources): codeMatch = False for emis in ps.EMISSION: # It is assumed that the first code is used while processing topdown ac = emis.ACTCODE[0] if ac[-1] == ".": ac=ac[:-1] # if ac[:len(code)] == code: if ac == code: codeMatch = True break if not codeMatch: for emis in ps.SUBGRP: # It is assumed that the first code is used while processing topdown ac = emis.ACTCODE[0] if ac[:len(code)] == code: codeMatch = True break if codeMatch: psIndices.append(i) dd["psIndices"] = psIndices keyName = row[tdTable.colIndex["Key"]] #If no distribution key specified and no ps in bottom-up edb - cont. if keyName is None and psIndices == []: log.debug("No key and no point sources found for code: %s, skipping..." % code) resTable.addRow(resrow) continue if psIndices!=[]: msg = "--Found %i pointsources" % len(psIndices) log.info(msg) if keyName is not None: if keyName not in keys: log.error("No such key: " + keyName) sys.exit() msg = "--Key: %s" % keyName log.info(msg) keyGrid = Egrid(tdEdb, keyName) keyGrid.readData() log.debug("Read key: " + keyName + " from topdownEdb") # create emission grid to store distributed emissions eGrid = deepcopy(eGridTemplate) eGrid.name = code.replace(".", "_") eGrid.par["NAME"].val = code eGrid.par["INFO2"].val = "Distribution key: " + keyGrid.par["NAME"].val eGrid.par["ACTIVITYCODE"].val = [airviroCode.split(".")] regstatName = row[tdTable.colIndex["Regstat"]] regdefName = row[tdTable.colIndex["Regdef"]] if regstatName is not None: if regdefName is None: log.error("No region definition given for regional statistics: " + regstatName) sys.exit(1) regstatPath = path.join(dmn.domainPath(), "topdown", "regstat", regstatName) regstat = DataTable() log.info("regstatPath: "+regstatPath) regstat.read(regstatPath, units=True, defaultType=float, delimiter=";") if not "Geocode" in regstat.listIds(): log.error("No Geocode column found in regstat") sys.exit(1) regstat.convertCol("Geocode", int) regstat.keys.append("Geocode") # Making Geocode the primary key # create list of unique geo codes geocodes = [row[regstat.colIndex["Geocode"]] for row in regstat.data] geocodes = unique(geocodes) for colId in regstat.listIds(): if colId.lower() == "year": rows = [] regstat.convertCol(colId, int) # Make it possible to accumulate year regstat.setKeys(regstat.keys + [colId]) # Calculates the total emission for each geocode # in case there are multiple rows for different fuels etc colsToSum = regstat.listIds() colsToSum.remove(colId) colsToSum.remove("Geocode") for gc in geocodes: # sums all numeric values in colsToSum for # rows matching row id [gc,year] #returns an accumulated row and appends it to rows rowId = regstat.dict2RowId({"Geocode": gc, colId: year}) rows.append(regstat.accumulate(rowId, "sum", colsToSum)) regstat.data = rows # replace original rows with accumulated rows regstat.keys.remove(colId) break # dd["regstat"] = regstat regdef = Raster() regdefPath = path.join(dmn.domainPath(), "topdown", "regdef", regdefName) regdef.read(regdefPath) dd["regstat"] = regstat dd["regdef"] = regdef else: dd["regstat"] = None dd["regdef"] = None if dd["regstat"] is not None and len(bu_sources) > 0 and statType == "fixed": log.info("--Regionalizing pointsources") dd = regionalizePS(dd, code) if keyName is not None and nonZero: regionalizedDefault = False # Spatial distribution of emissions for subst in substances: sInd = subdb.substIndices[subst] toUnit = dd["units"][subst] + "/year" ntot = nrow[natTable.colIndex[subst]] pstot = 0 for i in dd["psIndices"]: source = dd["bu_sources"][i] # TODO: should give reference to subgrps to include emis from them pstot += source.get_emis( sInd, toUnit, eEdb, actcodes=[code] ) if ntot is None or ntot == 0: if pstot > 0: # 9999 is used as marker for no national total resrow[resTable.colIndex[subst]] = 9999.0 log.warning( "Nattot is 0 but ps tot is: %f %s" % (pstot, toUnit)) continue nrest = ntot - pstot resrow[resTable.colIndex[subst]] = 100.0 if abs(nrest / ntot) < 0.0001: nrest = 0 log.info( "--Rest is < 0.01 % of national total, rounded to zero" ) continue elif nrest < 0: log.warning( "--National rest is below zero, %4.2f proc for %s" % ( -1 * nrest / ntot * 100, subst) ) dd["trace"].write() # continue log.info( "---Substance: "+subst+ ", rest is: " + str(nrest) + toUnit + " = " + str(nrest / ntot * 100.0) + "%" ) try: keyRast = keyGrid.substances[sInd] except KeyError: keyRast = keyGrid.substances[subdb.substIndices["all"]] dd["key"] = keyRast if dd["regstat"] is not None: if (subst not in regstat.colIndex and sInd not in keyGrid.substances and not regionalizedDefault): dd = regionalizeKey(dd, subst, code) regionalizedDefault = True else: dd = regionalizeKey(dd, subst, code) emisRast = distribute(dd["key"], nrest) emisRast = emisRast * unitConvFac(toUnit, "ton/year") eGrid.addData(emisRast, dd["subdb"].substIndices[subst]) distributed = True else: # resTable is filled # In case all national totals are zero but there are ps for subst in substances: sInd = dd["subdb"].substIndices[subst] toUnit = dd["units"][subst] + "/year" ntot = nrow[natTable.colIndex[subst]] pstot = 0 for i in dd["psIndices"]: source = dd["bu_sources"][i] # subgrps are not used! pstot += source.get_emis(sInd, toUnit, buEdb, actcodes=[code]) if ntot!=0 and ntot is not None: resrow[resTable.colIndex[subst]] = pstot / ntot * 100.0 else: resrow[resTable.colIndex[subst]] = -999.0 if len(dd["psIndices"]) > 0: tmp_sources = (bu_sources[i] for i in dd["psIndices"]) with SourceStream(eEdb, mode='wb') as out_source_stream: source_writer = ModelWriter(out_source_stream) for source in tmp_sources: source_writer.write(source) log.debug("Wrote ps to emission edb") if distributed: eGrid.load() log.debug("Wrote emission grid to emission edb") dd["trace"].write() resTable.addRow(resrow) resTableFile = open(resTablePath,"w") resTable.write(resTableFile) log.info("Finished topdown process")
def main(): # Parse command line arguments parser = argparse.ArgumentParser(description=__doc__) parser.add_argument("-g", "--grid", action="store",dest="grid",default=None, help="Name of grid or path to grid") parser.add_argument("-s", "--substance", type=int, action="store",dest="substance", help="Index of substance to extract") parser.add_argument("-c", "--codelevel", type=int, action="store",dest="codelevel",default=None, help="Level of geocodes to sum emissions for") args = parser.parse_args() domain = Domain() grid = egrid.Egrid() grid.readData(options.grid) substances = domain.listSubstances() if options.substance is not None: try: substanceName = substances[options.substance] except KeyError: parser.error("Invalid substance index given: " + str(substanceIndices[0])) if not options.substance in grid.listSubstances(): log.error("Substance does not exist in grid") sys.exit(1) substanceIndices = [options.substance] else: substanceIndices = grid.listSubstances() gcRast = grid.dynamics["GEOCODE"][codelevel] codes = gcRast.unique() emisDict = {} for code in codes: emisDict[code] = {} for i in substanceIndices: emisDict[code][i] = 0 for row in range(grid.nrows()): for col in range(grid.ncols()): for i in substanceIndices: code = gcRast.data[row,col] if grid.substances[i].data[row,col] != grid.substances[i].nodata: emisDict[code][i] += grid.substances[i].data[row,col] sys.stdout.write("Code") for i in substanceIndices: sys.stdout.write("\t" + substances[i]) sys.stdout.write("\n") codes.sort() for code in codes: sys.stdout.write(str(int(code))) for i in substanceIndices: sys.stdout.write("\t" + str(emisDict[code][i])) sys.stdout.write("\n")