def main(): # Create the domain domain = Domain() # Parse command line arguments parser = argparse.ArgumentParser(description=__doc__) utils.add_standard_command_options(parser) utils.add_edb_command_options(parser) parser.add_argument( '-F', action="store_true", dest='force', help="Force - overwrite target edb" ) parser.add_argument( '--delimiter', default='\t', action="store", dest='delimiter', help='Delimiter used in enviman csv-tables' ) parser.add_argument( '-i', action="store", dest='indir', help=("Directory containing enviman tables in .csv format," + " named the same as Enviman MS Excel sheets, " + "but with space replaced by '_'") ) parser.add_argument( '--substancemap', action=MappingAction, dest='substancemap', help=("File mapping substance indices from enviman to Airviro") ) parser.add_argument( '--source_srs', metavar="EPSG", type=int, action='store', dest='source_srs', help='Coordinate system of enviman data' ) parser.add_argument( '--target_srs', metavar="EPSG", type=int, action='store', dest='target_srs', help='Coordinate system of Airviro data' ) args = parser.parse_args() edb = Edb(domain, args.user, args.edb) PAG_Source_path = path.join(args.indir, 'PAG_Source.csv') AnEm_Table_path = path.join(args.indir, 'AnEm_Table.csv') Mon_Table_path = path.join(args.indir, 'Mon_Table.csv') Hour_Table_path = path.join(args.indir, 'Hour_Table.csv') Comb_Table_path = path.join(args.indir, 'Comb_Table.csv') Comb_Def_path = path.join(args.indir, 'Comb_Def.csv') subgrp_ind = get_max_subgrp_index(edb) + 1 timevar_ind = get_max_timevar_index(edb) + 1 if path.exists(PAG_Source_path): sources = read_sources( PAG_Source_path, args.delimiter ) else: sources = None log.warning('PAG_Source.csv not found') if args.source_srs is not None and args.target_srs is not None: log.debug("Preparing spatial reference transformation") target_srs = osr.SpatialReference() source_srs = osr.SpatialReference() target_srs.ImportFromEPSG(args.target_srs) source_srs.ImportFromEPSG(args.source_srs) srs_transform = osr.CoordinateTransformation(source_srs, target_srs) for sourceid, source in sources.iteritems(): transform(source, srs_transform) else: log.debug("No spatial reference transformation will be performed") if path.exists(AnEm_Table_path) and sources is not None: read_emissions( sources, AnEm_Table_path, args.delimiter, args.substancemap ) else: log.warning('AnEmi.csv not found') if path.exists(Comb_Table_path): subgrps = read_subgrps( Comb_Table_path, Comb_Def_path, args.delimiter, args.substancemap ) else: log.warning('Comb_Table.csv not found') if path.exists(Hour_Table_path) and path.exists(Mon_Table_path): timevars = read_timevars( sources, Hour_Table_path, Mon_Table_path, args.delimiter ) else: timevars = None for key, subgrp in subgrps.iteritems(): subgrp.INDEX = subgrp_ind subgrp_ind += 1 for key, tvar in timevars.iteritems(): tvar.INDEX = subgrp_ind timevar_ind += 1 for src in sources.values(): if timevars is not None: timevarid = '_'.join([src.ALOB['MonthlyID'], src.ALOB['HourlyID']]) timevarind = timevars[timevarid].INDEX else: timevarind = 1 unwritten_subgroups = False if src.ALOB['Consumption'] != '0': if subgrps is not None: subgrp = subgrps[int(src.ALOB['ProcessID'])] src.add_subgrp( SUBGRP=subgrp.INDEX, ACTIVITY=float(src.ALOB['Consumption']), TIMEVAR=timevarind, UNIT='ton/year' ) elif not unwritten_subgroups: log.warning( "Could not add substance group emissions " + "due to missing Comb Table" ) unwritten_subgroups = True for emis in src.EMISSION: emis.TIMEVAR = timevarind if timevars is not None: with TimevarStream(edb, mode='w', sourcetype='point') as outstream: log.info('Writing timevars...') writer = ModelWriter(outstream) for tvar in timevars.values(): log.debug('Writing timevar %i' % tvar.INDEX) writer.write(tvar) if subgrps is not None: with codecs.open( '/usr/airviro/data/SCAC/prod/industry/enviman_subgrps.txt', mode='w', encoding='HP Roman8') as outstream: # with SubgrpStream(edb, mode='w') as outstream: log.info('Writing subgrps...') writer = ModelWriter(outstream) for subgrp in subgrps.values(): log.debug('Writing subgrps %i' % subgrp.INDEX) writer.write(subgrp) with codecs.open( '/usr/airviro/data/SCAC/prod/industry/enviman_sources.txt', mode='w', encoding='HP Roman8') as outstream: # with SourceStream(edb, mode='w') as outstream: log.info('Writing sources...') writer = ModelWriter(outstream) for src in sources.values(): log.debug('Writing source %s' % src.NAME) writer.write(src)
def main(): actionList = [ 'addSubstance', 'removeSubstance', 'mapSubstances', 'addCodeTree', 'removeCodeTree', 'moveCodeTree', 'extractByGeocode', 'uniteGrids', 'scale' ] # setting up parser parser = argparse.ArgumentParser( description=__doc__, formatter_class=argparse.ArgumentDefaultsHelpFormatter ) utils.add_standard_command_options(parser) parser.add_argument("-d", "--doc", action="store_true", dest="doc", help="Prints more detailed documentation and exit") parser.add_argument("-u", "--user", action="store", dest="user", default=None, help="Specify user manually") parser.add_argument("-e", "--edb", action="store", dest="edb", default=None, help="Name of target edb") parser.add_argument("-s", "--substance", action="store", dest="substance", help="Name of substance to add or remove") parser.add_argument("--substanceFactorFile", action="store", dest="substanceFactorFile", help="File with substance factors") parser.add_argument("--substanceMapping", action="store", dest="substanceMapping", help="File with mapping for substance indices") parser.add_argument("--codeMapping", action="store", dest="codeMapping", help="File with mapping from old codes to new") parser.add_argument("--edbMapping", action="store", dest="edbMapping", help="File with mapping from gc to edb") parser.add_argument("-f", "--force", action="store_true", dest="force", default=False, help="Start the process without confirming the domain") parser.add_argument("--factor", action="store", type=float, dest="factor", help="Factor used to scale emissions") parser.add_argument( "-n", "--names", action="store", dest="sourceNames", default=['.*'], nargs='*', help="Space separated list of regular expressions" ) parser.add_argument( "--sourceTypes", action="store", dest="sourceTypes", nargs='*', default=['griddb', 'sources'], choices=["griddb", "sources", "subgrpdb", "svehdb", "vehdb"], help="Source types to process" ) parser.add_argument( "--codeType", action="store", dest="codeType", default='ac', help="Code type to process" ) parser.add_argument( "--codeIndex", action="store", dest="codeIndex", default=1, type=int, help="Index of code tree to use in processing" ) parser.add_argument( "--newIndex", action="store", dest="newIndex", default=None, help="New index when moving an already defined code tree" ) parser.add_argument( "--codeLevel", action="store", dest="codeLevel", default=1, type=int, help="Level of code tree to use in processing") parser.add_argument( "--actions", action="store", dest="actions", nargs='*', choices=actionList, help="Action list" ) args = parser.parse_args() if args.doc: print doc sys.exit() # if len(args) > 0: # log.error("Incorrect number of arguments") # sys.exit(1) if args.edb is None: log.error("Need to specify edb") sys.exit(1) if args.user is None: log.error("Need to specify user") sys.exit(1) dmn = Domain() edb = Edb(dmn, args.user, args.edb) subdb = Subdb(edb) subdb.read() rsrc = Rsrc(edb.rsrcPath()) # Check if edb exist if not edb.exists(): log.error("Edb " + args.edb + " does not exist for user " + args.user + " in domain " + dmn.name) sys.exit(1) if args.codeType.lower() == "ac": codeType = "ACTIVITYCODE" elif args.codeType.lower() == "gc": codeType = "GEOCODE" else: parser.error("codeType should be either 'gc' or 'ac'") if args.codeIndex < 1: raise ValueError("Minimum code index is 1") if args.codeIndex > 1 and codeType == "gc": raise OSError("Multiple geocode trees not implemented") for a in args.actions: if a not in actionList: parser.error("Unknown action %s" % a) log.info("User: "******"Edb: " + args.edb) loadGridData = False # Set values in argsDict argDict = { "edb": edb, "user": args.user, "domain": dmn, "grids": [], "filters": args.sourceNames, "codeType": codeType, "codeIndex": args.codeIndex, "codeLevel": args.codeLevel, "sources": None, "griddb": None, "subgrpdb": None, "roaddb": None, "vehdb": None, "svehdb": None, "rsrc": rsrc } # ----- Reading sources from edb ------------------------------- if 'griddb' in args.sourceTypes: grids = [] gridNames = edb.listGrids() for name in gridNames: grd = egrid.Egrid(edb, name) grd.readAsc() grids.append(grd) argDict["grids"] = grids if 'sources' in args.sourceTypes: argDict["sources"] = list(ModelReader(SourceStream(edb, mode='r'))) if 'roaddb' in args.sourceTypes: log.error("Not implemented with new roaddb-structure") sys.exit(1) if 'subgrpdb' in args.sourceTypes: subgrpdb = Subgrpdb(edb) subgrpdb.read() argDict["subgrpdb"] = subgrpdb # emfacdbdb = Emfacdb(edb) # emfacdb.read() # argDict["subgrpdb"] = subgrpdb if 'svehdb' in args.sourceTypes: svehdb = Svehdb(edb) svehdb.read() argDict["svehdb"] = svehdb # TODO: add option for vehdb # parse additional args # -----addSubstance-------------------------------------- if "addSubstance" in args.actions: if args.substance is None: parser.error("Action addSubstance needs" + "--substance to be specified") if args.substanceFactorFile is None: parser.error("Option addSubstance also needs" + "--substanceFactorFile to be specified") if args.substance not in subdb.substIndices: raise ValueError( "Substance %s not in substance list" % args.substance) substanceNameFactorDict = parseMapping( path.abspath(args.substanceFactorFile), valueType=float) substanceFactorDict = {} # Converts mapping between substance name and factor to # mapping between substance index and factor for name, factor in substanceNameFactorDict.iteritems(): if name not in subdb.substIndices: raise KeyError( "Substance : %s not found in substance list" % name ) ind = subdb.substIndices[name] substanceFactorDict[ind] = factor argDict["substanceFactorDict"] = substanceFactorDict argDict["substance"] = subdb.substIndices[args.substance] # ----removeSubstance------------------------------------ if "removeSubstance" in args.actions: if args.substance is None: parser.error("Action removeSubstance needs" + "--substance to be specified") if args.substance not in subdb.substIndices: log.error("Substance %s not in substance list" % args.substance) sys.exit(1) argDict["substance"] = subdb.substIndices[args.substance] # ----mapSubstances-------------------------------------- if "mapSubstances" in args.actions: if args.substanceMapping is None: parser.error("Action mapSubstances needs" + "--substanceMapping to be specified") substanceMappingFile = path.abspath(args.substanceMapping) substanceMapping = parseMapping( substanceMappingFile, keyType=int, valueType=int ) argDict["substanceMapping"] = substanceMapping # ----addCodeTree----------------------------------------- if "addCodeTree" in args.actions: if args.codeMapping is None: parser.error("Action addCodeTree needs" + "--codeMapping to be specified") if args.newIndex is None: parser.error("Action addCodeTree needs" + "--newIndex to be specified") codeMappingFile = path.abspath(args.codeMapping) codeMapping = parseMapping(codeMappingFile) argDict["codeMapping"] = codeMapping argDict["newIndex"] = int(args.newIndex) # ----removeCodeTree------------------------------------- # Only arguments with default values needed, no validation is needed # ----moveCodeTree--------------------------------------- if "moveCodeTree" in args.actions: if args.newIndex is None: parser.error("Action moveCodeTree needs" + "--newIndex to be specified") try: argDict["newIndex"] = int(args.newIndex) except ValueError: log.error("newIndex must be an integer value") sys.exit(1) # ----extractByGeocode----------------------------------- if "extractByGeocode" in args.actions: if args.edbMapping is None: parser.error("Action extractByGeocode needs" + "--edbMapping to be specified") log.info("Slicing edb by geocode") edbMappingFile = path.abspath(args.edbMapping) # Geocode has to be int edbMapping = parseMapping(edbMappingFile, keyType=int) argDict["edbMapping"] = edbMapping if 'scale' in args.actions: argDict['substance'] = subdb.substIndices[args.substance] argDict['factor'] = args.factor if args.factor is None: log.error("Must specify --factor") sys.exit(1) if args.substance is None: log.error("Must specify --substance") sys.exit(1) # Processing actions for action in args.actions: log.info("Running action: %s" % action) argDict = eval(action)(argDict) # Writes each processed grid to edb if loadGridData: for grd in argDict["grids"]: if not match(grd["NAME"], argDict["filters"]) or grd.hasSubgrp(): continue else: log.debug( "Wrote grid %s to edb: %s, user: %s" % ( grd.par["NAME"].val, grd.edb.name, grd.user) ) grd.load() else: for grd in argDict["grids"]: if not match(grd["NAME"], argDict["filters"]) or grd.hasSubgrp(): continue else: log.debug( "Wrote .asc-file for grid %s to edb: %s, user: %s" % ( grd.par["NAME"].val, grd.edb.name, grd.user) ) grd.writeAsc(grd.getAscPath()) # Writes each processed database to edb if argDict["sources"] is not None: with SourceStream(edb, mode="w") as stream: model_writer = ModelWriter(stream) for src in argDict["sources"]: log.debug('Wrote source %s' % src.NAME) model_writer.write(src) for dbName in ["roaddb", "subgrpdb", "svehdb", "vehdb"]: if argDict[dbName] is not None: argDict[dbName].write() log.info( "Wrote %s to edb: %s, user: %s" % ( dbName, edb.name, edb.user) )
def main(): #-----------Setting up and unsing option parser----------------------- parser=OptionParser(usage= usage, version=version) parser.add_option("-u",'--user', action="store",dest="user", help="Name of target edb user") parser.add_option("-e","--edb", action="store",dest="edb", help="Name of target edb") parser.add_option("-i","--infile", action="store",dest="infile", help="Input csv file") # parser.add_option("-y","--year", # action="store",dest="year", # help="Only store sources for given year") parser.add_option("-v",dest='loglevel', action="store_const",default=get_loglevel(), help="produce verbose output") parser.add_option("-t", "--template", action="store",dest="cf",default=None, help="Generate default controlfile") parser.add_option("-o", "--outfile", action="store",dest="outfile",default=None, help="Name of outfiles (without extension)") parser.add_option("-d","--delimiter", action="store",dest="delimiter",default="\t", help="Delimiter used in csv-file") parser.add_option("-c","--filterCol", action="store",dest="filterCol", help="Header of column to use as filter") parser.add_option("-f","--filterVal", action="store",dest="filterVal", help="Value to use in filter") # parser.add_option("-g", "--geocodeRasterDir", # action="store",dest="geocodeRasterDir",default=None, # help="Directory with geocode rasters") (options, args) = parser.parse_args() #--------------------Init logger----------------------- # rootLogger = logger.RootLogger(level=options.loglevel) logging.basicConfig( format='%(levelname)s:%(name)s: %(message)s', level=options.loglevel, ) global log # log = rootLogger.getLogger(sys.argv[0]) log = logging.getLogger(parser.prog) #-----------------Validating options------------------- if options.cf is not None: generateCf(path.abspath(options.cf),controlFileTemplate) log.info("Wrote default controlfile") return 1 if options.user is None: log.error("Need to specify -u <user>") return 1 if options.edb is None: log.error("Need to specify -e <edb>") return 1 # if options.year is None: # log.error("Need to specify -y <year>") # return 1 # if len(options.year)!=4: # log.error("Year should be given with four digits") # return 1 if len(args)!=1: log.error("Controlfile should be given as argument") return 1 dmn=Domain() edb=Edb(dmn,options.user,options.edb) if not edb.exists(): log.error("Edb %s does not exist" %options.edb) return 1 log.info("Parsing controlfile") cf=ControlFile(args[0]) cdbPars=re.compile("companydb\.par\.(\w*?):").findall(cf.content) fdbPars=re.compile("facilitydb\.par\.(\w*?):").findall(cf.content) sdbPars=re.compile("sourcedb\.par\.(\w*?):").findall(cf.content) substEmisNr=re.compile("sourcedb\.subst_emis\.([0-9]*)\.emis").findall(cf.content) subgrpEmisNr=re.compile("sourcedb\.subgrp_emis\.([0-9]*)\.emis").findall(cf.content) cdbCols={} cdbDefaults={} for par in cdbPars: cdbCols[par]=cf.findString("companydb.par.%s:" %par) cdbDefaults[par]=cf.findString("companydb.par.%s.default:" %par, optional=True,default=None) fdbCols={} fdbDefaults={} for par in fdbPars: fdbCols[par]=cf.findString("facilitydb.par.%s:" %par) fdbDefaults[par]=cf.findString("facilitydb.par.%s.default:" %par, optional=True,default=None) sdbCols={} sdbDefaults={} for par in sdbPars: sdbCols[par]=cf.findString("sourcedb.par.%s:" %par) sdbDefaults[par]=cf.findString("sourcedb.par.%s.default:" %par, optional=True,default=None) substEmisCols={} substEmisDefaults={} if substEmisNr is not None: for emisNr in substEmisNr: cols={} defaults={} emisPars=re.compile("sourcedb\.subst_emis\.%s\.(\w*?):" %(emisNr)).findall(cf.content) emisDefaultPars=re.compile( "sourcedb\.subst_emis\.%s\.(\w*?)\.default:" %(emisNr)).findall(cf.content) if emisPars is not None: for par in emisPars: cols[par]=cf.findString("sourcedb.subst_emis.%s.%s:" %(emisNr,par)) if emisDefaultPars is not None: for par in emisDefaultPars: defaults[par]=cf.findString("sourcedb.subst_emis.%s.%s.default:" %(emisNr,par), optional=True,default=None) substEmisCols[emisNr]=cols substEmisDefaults[emisNr]=defaults subgrpEmisCols={} subgrpEmisDefaults={} if subgrpEmisNr is not None: for emisNr in subgrpEmisNr: cols={} defaults={} emisPars=re.compile("sourcedb\.subgrp_emis\.%s\.(\w*?):" %(emisNr)).findall(cf.content) emisDefaultPars=re.compile( "sourcedb\.subgrp_emis\.%s\.(\w*?)\.default:" %(emisNr)).findall(cf.content) if emisPars is not None: for par in emisPars: cols[par]=cf.findString("sourcedb.subgrp_emis.%s.%s:" %(emisNr,par)) if emisDefaultPars is not None: for par in emisDefaultPars: defaults[par]=cf.findString("sourcedb.subgrp_emis.%s.%s.default:" %(emisNr,par), optional=True,default=None) subgrpEmisCols[emisNr]=cols subgrpEmisDefaults[emisNr]=defaults log.info("Reading subdb...") subdb=Subdb(edb) subdb.read() log.info("Reading companydb...") companydb=Companydb(edb) companydb.read() log.info("Reading sourcedb...") # source_stream = SourceStream(edb, 'w') source_stream = open(options.outfile, 'w') source_writer = ModelWriter(source_stream,encoding="HP Roman8") log.info("Reading facilitydb...") facilitydb=Facilitydb(edb) facilitydb.read() log.info("Reading subgrpdb") subgrpdb=Subgrpdb(edb) subgrpdb.read() log.info("Reading edb.rsrc") rsrc=Rsrc(edb.rsrcPath()) acCodeTables=[] for i in range(rsrc.numberOfCodeTrees("ac")): acCodeTables.append(CodeTable(rsrc.path,codeType="ac",codeIndex=i+1)) gcCodeTables=[] for i in range(rsrc.numberOfCodeTrees("gc")): gcCodeTables.append(CodeTable(rsrc.path,codeType="gc",codeIndex=i+1)) geocodeRasters=[] rast1=Raster() rast1.read("/usr/airviro/data/geo/topdown/dynamicRasters/dynamic__GEOCODE__1.txt") rast2=Raster() rast2.read("/usr/airviro/data/geo/topdown/dynamicRasters/dynamic__GEOCODE__2.txt") geocodeRasters.append(rast1) geocodeRasters.append(rast2) log.info("Reading csv-file") table=DataTable() table.read(options.infile,delimiter=options.delimiter,encoding="ISO-8859-15") if options.filterCol is not None: if options.filterCol not in table.colIndex: log.error("Filter column header not found in table") sys.exit(1) invalid=False nFiltered=0 nRows=0 log.info("Processing rows") for rowInd,row in enumerate(table.data): nRows+=1 if options.filterCol is not None: filterVal=row[table.colIndex[options.filterCol]] if options.filterVal!=str(filterVal): nFiltered+=1 continue comp = Company() for par in comp.parOrder: val=cdbDefaults.get(par,None) if par in cdbCols: colId=cdbCols[par] try: tableVal=row[table.colIndex[colId]] except KeyError: log.error( "No column with header %s, columns: %s" %( colId,str(table.listIds()))) if tableVal is not None: val = tableVal if val is not None: #Too long names are truncated if par=="NAME" and len(val)>45: val=val[:45] comp[par]=val fac = Facility() for par in fac.parOrder: val=fdbDefaults.get(par,None) if par in fdbCols: colId=fdbCols[par] tableVal=row[table.colIndex[colId]] if tableVal is not None: val = tableVal if val is not None: #Too long names are truncated if par=="NAME" and len(val)>45: val=val[:45] fac[par]=val src = Source() for par in ["X1", "Y1","X2","Y2", "PX","PY","NAME","INFO","INFO2","DATE","CHANGED", "CHIMNEY HEIGHT","GASTEMPERATURE","GAS FLOW", "SEARCHKEY1","SEARCHKEY2","SEARCHKEY3", "SEARCHKEY4","SEARCHKEY5","CHIMNEY OUT","CHIMNEY IN", "HOUSE WIDTH","HOUSE HEIGHT","NOSEGMENTS","BUILD_WIDTHS", "BUILD_HEIGHTS","BUILD_LENGTHS","BUILD_DISTFARWALL", "BUILD_CENTER","GEOCODE","FORMULAMACRO","ALOB"]: val=sdbDefaults.get(par,None) if par in sdbCols: colId=sdbCols[par] tableVal=row[table.colIndex[colId]] if tableVal is not None: val = tableVal if val is not None: #validate code if par=="GEOCODE" and val is not None: gcList=val.split() for codeIndex,code in enumerate(gcList): if not gcCodeTables[codeIndex].hasCode(code): log.error("Invalid geo code %s on row %i" %(code,rowInd)) invalid=True #Too long names are truncated if par=="NAME" and len(val)>45: val=val[:45] #Store in src object and convert to correct type src._fieldvalues[par] = lazy_parse( src, par, val) gc1=geocodeRasters[0].getVal(src.get_coord()[0],src.get_coord()[1]) gc2=geocodeRasters[1].getVal(src.get_coord()[0],src.get_coord()[1]) src.GEOCODE = [str(int(gc1)) + "." + str(int(gc2))] for emisNr,emis in substEmisCols.items(): substEmis={"unit":None,"ac":None,"substance":None,"emis":None} for par in substEmis.keys(): if par in emis: substEmis[par]=row[table.colIndex[emis[par]]] else: try: substEmis[par]=substEmisDefaults[emisNr][par] except KeyError: log.error( "Need to specify column or default value for subgrp emis %i" %emisNr) substInd=subdb.substIndex(substEmis["substance"]) if substInd is None: log.error("Invalid substance name %s on row %i" %( substEmis["substance"],rowInd)) sys.exit(1) try: unit=rsrc.sub[substEmis["unit"]] except KeyError: log.error("Invalid unit name %s on row %i" %(emis["unit"],rowInd)) sys.exit(1) acList=substEmis["ac"].split('\\')[0].split() for codeIndex,code in enumerate(acList): # if code == "2.A.4.2": # import pdb; pdb.set_trace() refCode = acCodeTables[codeIndex].checkCode(code) if refCode == "-": log.error("Invalid activity code %s on row %i" %(code,rowInd)) sys.exit(1) if refCode != code: acList[codeIndex] = refCode substEmis["ac"] = acList if substEmis["emis"] is not None and substEmis["emis"]!="0": try: emis = src.add_emission() emis.UNIT = substEmis["unit"] emis.ACTCODE = substEmis["ac"] # needs re-formatting emis.EMISSION = float(substEmis["emis"]) emis.SUBSTANCE = substInd emis.auto_adjust_unit(edb) except: # print substEmis # log.error("Invalid substance emission on row %i" %rowInd) invalid=True src.EMISSION=src.EMISSION[:-1] for emis in subgrpEmisCols.values(): subgrpEmis={"unit":None,"ac":None,"name":None,"emis":None} for par in subgrpEmis.keys(): if par in emis: subgrpEmis[par]=row[table.colIndex[emis[par]]] else: try: subgrpEmis[par]=subgrpEmisDefaults[emisNr][par] except KeyError: log.error( "Need to specify column or default value for subgrp emis %i" %emisNr) #validating subgrp name try: subgrp=subgrpdb.getByName(subgrpEmis["name"]) except KeyError: log.error("Invalid subgrp name %s on row %i" %(subgrpEmis["name"],rowInd)) invalid=True #validating subgrp emis unit try: unitFactor=rsrc.subGrpEm[subgrpEmis["unit"]] except KeyError: log.error("Invalid unit %s for subgrp emission on row %i" %( subgrpEmis["unit"],rowInd)) invalid=True #validating subgrp activity code acList=subgrpEmis["ac"].split() for codeIndex,code in enumerate(acList): refCode = acCodeTables[codeIndex].checkCode(code) if refCode == "-": log.error("Invalid activity code %s on row %i" %(code,rowInd)) invalid=True break if refCode != code: acList[codeIndex] = refCode substEmis["ac"] = acList try: src.addSubgrpEmis(subgrp.index,emis=subgrpEmis["emis"],unit=subgrpEmis["unit"], ac=subgrpEmis["ac"]) except: log.error("Invalid subgrp emission on row %i" %rowInd) invalid=True companydb.append(comp,force=True) facilitydb.append(fac,force=True) source_writer.write(src) # sourcedb.append(src) if invalid: log.info("No output written due to validation errors") sys.exit(0) if len(companydb.items)>0: if options.outfile is None: log.info("Writing companydb") else: log.info("Writing company db to file") companydb.write(filename=options.outfile+".companydb") if len(facilitydb.items)>0: if options.outfile is None: log.info("Writing facilitydb") else: log.info("Writing facilitydb to file") facilitydb.write(filename=options.outfile+".facilitydb") # if len(sourcedb.sources)>0: # if options.outfile is None: # log.info("Writing sourcedb") # else: # log.info("Writing sourcedb to file") # sourcedb.write(filename=options.outfile+".sourcedb") if options.filterCol is not None: log.info("Filtered out %i out of %i" %(nFiltered,nRows))
def main(): # Parse command line arguments parser = argparse.ArgumentParser(description=__doc__) utils.add_standard_command_options(parser) parser.add_argument("controlfile", metavar='CONTROLFILE', action="store", help="Controlfile for topdown processing") parser.add_argument("-t", "--template", metavar='TEMPLATEFILE', action="store",dest="cf",default=None, help="Generate default controlfile") args = parser.parse_args() if args.cf is not None: generateCf(args.cf) log.info("Wrote default controlfile") sys.exit(0) log.info("Starting topdown processing") # Opening controlfile cf = ControlFile(args.controlfile) dmn = Domain() log.info("Reading topdown table") tdTableName = cf.findExistingPath("topDownTable:") tdTable = DataTable() tdTable.keys.append("Code") tdTable.read(tdTableName,delimiter=";") log.info("Reading national totals table") natTotalTableName = cf.findExistingPath("nationalTotalTable:") natTable = DataTable(desc=[{"id": "Code", "type":unicode}, {"id": "description", "type":unicode}]) natTable.keys.append("Code") natTable.read(natTotalTableName, units=True, defaultType=str) notationKeys = ["NE", "NO", "NA", "IE"] log.debug("Remove notation keys from national totals table") for row in natTable.data: for i in range(len(row)): if row[i] in notationKeys: row[i] = None log.debug("Convert all emission columns in national totals to float") for colId in natTable.listIds(): if colId not in ["Code","description"]: natTable.convertCol(colId,float) log.debug("Store units from national totals for each substance in dict") natUnits={} for col in natTable.desc: if col.get("units",None)!=None: natUnits[col["id"]]=col["units"] log.debug("Read remaining data from control file") bottomupEdbName = cf.findString("bottomUpEdb:") topDownEdbName = cf.findString("topDownEdb:") emissionsEdbName = cf.findString("emissionsEdb:") userName = cf.findString("user:"******"year:") #initialize edb objects buEdb = Edb(dmn,userName,bottomupEdbName) tdEdb = Edb(dmn,userName,topDownEdbName) eEdb = Edb(dmn,userName,emissionsEdbName) log.info("Reading/preparing EDB:s") log.info("Reading subdb") subdb = Subdb(eEdb) subdb.read() log.info("Reading subgrpdb") subgrpdb = SubgrpStream(buEdb) subgrpdb.read() log.info("Reading facilitydb") facilityIn = FacilityStream(buEdb) log.info("Reading companydb") companyIn = CompanyStream(buEdb) facilityOut = FacilityStream(eEdb,mode="w") companyOut = CompanyStream(eEdb,mode="w") log.info("Writing company db to result edb") companyOut.write(companyIn.read()) log.info("Writing facility db to result edb") facilityOut.write(facilityIn.read()) if not buEdb.exists(): log.error("Edb " + buEdb.name + " does not exist for user " + userName + " in domain " + dmn.name) sys.exit(1) if not tdEdb.exists(): log.error("Edb " + tdEdb.name + " does not exist for user " + userName + " in domain " + dmn.name) sys.exit(1) if not eEdb.exists(): log.error("Edb " + eEdb.name + " does not exist for user " + userName + " in domain " + dmn.name) sys.exit(1) keys = tdEdb.listGrids() msg = "%i keys found in edb: %s" % (len(keys), tdEdb.name) log.info(msg) # sourcedb from bottom-up edb with SourceStream(buEdb, mode='rb') as source_instream: source_reader = ModelReader(source_instream) bu_sources = list(source_reader) log.info( "%i point sources found in edb: %s" % ( len(bu_sources), buEdb.name) ) # Empty sourcedb of the result edb if cf.findBoolean("emptyEmissionSourcedb:"): eEdb.empty_sourcedb() e_sources = [] log.info("Removed point sources from edb: %s" % (eEdb.name)) else: # sourcedb from emission edb (result edb) with SourceStream(eEdb, mode='rb') as source_instream: source_reader = ModelReader(source_instream) e_sources = list(source_reader) msg = "%i point sources found in edb: %s" % (len(e_sources), eEdb.name) log.info(msg) if not path.exists(eEdb.rsrcPath()): log.error("No edb.rsrc exists for emission edb") sys.exit() else: rsrc = Rsrc(eEdb.rsrcPath()) acIndex = cf.findInt("acIndex:") codeDepth = rsrc.ac[acIndex-1].depth substances = cf.findStringList("substances:") for subst in substances: if subst not in subdb.substIndices: log.error("Substance: " + subst + " not in Airviro substance list") sys.exit() # Initialize trace for debug and additional logging if cf.findBoolean("trace:") == True: log.info("Initializing trace for detailed logging") trace = TraceDef( active=True, substances=cf.findStringList("trace.substances:"), logfile=cf.findString("trace.logfile:"), regdefgc=cf.findIntList("trace.regdef.gc:", optional=True, default=None), gcDefRaster=cf.findExistingPath("trace.gcraster:") ) else: trace = TraceDef(active=False) log.info("Initializing result table") resTablePath = cf.findString("resTable:") resTable = DataTable(desc=[{"id": "Code", "type": unicode}]) resTable.keys.append("Code") for subst in substances: resTable.addCol({"id": subst, "type": float, "unit": "%"}) # Create emission grid template (with geocodes) log.info("Reading emission grid template") eGridTemplatePath = cf.findExistingPath("emisGridTemplatePath:") eGridTemplate = Egrid(eEdb,"name") if eGridTemplatePath[-4:] == ".asc": eGridTemplatePath=eGridTemplatePath[:-4] eGridTemplate.readData(eGridTemplatePath) eGridTemplate.substances = {} eGridTemplate.par["SUBSTANCE"].val = [] dd = {"key": None, "regstat": None, "regdef": None, "bu_sources": bu_sources, "psIndices": [], "units": natUnits, "rsrc": rsrc, "subdb": subdb, "trace": trace, "subgrpdb": subgrpdb } # Process all rows in the topdown table for row in tdTable.data: code = row[tdTable.colIndex["Code"]] active = row[tdTable.colIndex["Active"]] statType = row[tdTable.colIndex["Stat_type"]] if active == "no": continue log.info("Code: "+code) distributed=False # Add '-' to the code to reach max length (fix for a GUI bug) airviroCode = code # while len(airviroCode.split(".")) < codeDepth: # airviroCode += ".-" tdrow = tdTable.data[tdTable.rowIndex([code])] nrow = natTable.data[natTable.rowIndex([code])] # Create a resTable row to fill with data resrow = [None] * resTable.ncols resrow[0] = code # Check if national totals are non-zero nonZero = False for val in nrow: if val != None: if val > 0: nonZero = True break # Filter out indices for pointsources with the current ac # Also including sources coded with sub-codes # This allows to estimate top-down emissions on a higher code-level psIndices = [] for i, ps in enumerate(bu_sources): codeMatch = False for emis in ps.EMISSION: # It is assumed that the first code is used while processing topdown ac = emis.ACTCODE[0] if ac[-1] == ".": ac=ac[:-1] # if ac[:len(code)] == code: if ac == code: codeMatch = True break if not codeMatch: for emis in ps.SUBGRP: # It is assumed that the first code is used while processing topdown ac = emis.ACTCODE[0] if ac[:len(code)] == code: codeMatch = True break if codeMatch: psIndices.append(i) dd["psIndices"] = psIndices keyName = row[tdTable.colIndex["Key"]] #If no distribution key specified and no ps in bottom-up edb - cont. if keyName is None and psIndices == []: log.debug("No key and no point sources found for code: %s, skipping..." % code) resTable.addRow(resrow) continue if psIndices!=[]: msg = "--Found %i pointsources" % len(psIndices) log.info(msg) if keyName is not None: if keyName not in keys: log.error("No such key: " + keyName) sys.exit() msg = "--Key: %s" % keyName log.info(msg) keyGrid = Egrid(tdEdb, keyName) keyGrid.readData() log.debug("Read key: " + keyName + " from topdownEdb") # create emission grid to store distributed emissions eGrid = deepcopy(eGridTemplate) eGrid.name = code.replace(".", "_") eGrid.par["NAME"].val = code eGrid.par["INFO2"].val = "Distribution key: " + keyGrid.par["NAME"].val eGrid.par["ACTIVITYCODE"].val = [airviroCode.split(".")] regstatName = row[tdTable.colIndex["Regstat"]] regdefName = row[tdTable.colIndex["Regdef"]] if regstatName is not None: if regdefName is None: log.error("No region definition given for regional statistics: " + regstatName) sys.exit(1) regstatPath = path.join(dmn.domainPath(), "topdown", "regstat", regstatName) regstat = DataTable() log.info("regstatPath: "+regstatPath) regstat.read(regstatPath, units=True, defaultType=float, delimiter=";") if not "Geocode" in regstat.listIds(): log.error("No Geocode column found in regstat") sys.exit(1) regstat.convertCol("Geocode", int) regstat.keys.append("Geocode") # Making Geocode the primary key # create list of unique geo codes geocodes = [row[regstat.colIndex["Geocode"]] for row in regstat.data] geocodes = unique(geocodes) for colId in regstat.listIds(): if colId.lower() == "year": rows = [] regstat.convertCol(colId, int) # Make it possible to accumulate year regstat.setKeys(regstat.keys + [colId]) # Calculates the total emission for each geocode # in case there are multiple rows for different fuels etc colsToSum = regstat.listIds() colsToSum.remove(colId) colsToSum.remove("Geocode") for gc in geocodes: # sums all numeric values in colsToSum for # rows matching row id [gc,year] #returns an accumulated row and appends it to rows rowId = regstat.dict2RowId({"Geocode": gc, colId: year}) rows.append(regstat.accumulate(rowId, "sum", colsToSum)) regstat.data = rows # replace original rows with accumulated rows regstat.keys.remove(colId) break # dd["regstat"] = regstat regdef = Raster() regdefPath = path.join(dmn.domainPath(), "topdown", "regdef", regdefName) regdef.read(regdefPath) dd["regstat"] = regstat dd["regdef"] = regdef else: dd["regstat"] = None dd["regdef"] = None if dd["regstat"] is not None and len(bu_sources) > 0 and statType == "fixed": log.info("--Regionalizing pointsources") dd = regionalizePS(dd, code) if keyName is not None and nonZero: regionalizedDefault = False # Spatial distribution of emissions for subst in substances: sInd = subdb.substIndices[subst] toUnit = dd["units"][subst] + "/year" ntot = nrow[natTable.colIndex[subst]] pstot = 0 for i in dd["psIndices"]: source = dd["bu_sources"][i] # TODO: should give reference to subgrps to include emis from them pstot += source.get_emis( sInd, toUnit, eEdb, actcodes=[code] ) if ntot is None or ntot == 0: if pstot > 0: # 9999 is used as marker for no national total resrow[resTable.colIndex[subst]] = 9999.0 log.warning( "Nattot is 0 but ps tot is: %f %s" % (pstot, toUnit)) continue nrest = ntot - pstot resrow[resTable.colIndex[subst]] = 100.0 if abs(nrest / ntot) < 0.0001: nrest = 0 log.info( "--Rest is < 0.01 % of national total, rounded to zero" ) continue elif nrest < 0: log.warning( "--National rest is below zero, %4.2f proc for %s" % ( -1 * nrest / ntot * 100, subst) ) dd["trace"].write() # continue log.info( "---Substance: "+subst+ ", rest is: " + str(nrest) + toUnit + " = " + str(nrest / ntot * 100.0) + "%" ) try: keyRast = keyGrid.substances[sInd] except KeyError: keyRast = keyGrid.substances[subdb.substIndices["all"]] dd["key"] = keyRast if dd["regstat"] is not None: if (subst not in regstat.colIndex and sInd not in keyGrid.substances and not regionalizedDefault): dd = regionalizeKey(dd, subst, code) regionalizedDefault = True else: dd = regionalizeKey(dd, subst, code) emisRast = distribute(dd["key"], nrest) emisRast = emisRast * unitConvFac(toUnit, "ton/year") eGrid.addData(emisRast, dd["subdb"].substIndices[subst]) distributed = True else: # resTable is filled # In case all national totals are zero but there are ps for subst in substances: sInd = dd["subdb"].substIndices[subst] toUnit = dd["units"][subst] + "/year" ntot = nrow[natTable.colIndex[subst]] pstot = 0 for i in dd["psIndices"]: source = dd["bu_sources"][i] # subgrps are not used! pstot += source.get_emis(sInd, toUnit, buEdb, actcodes=[code]) if ntot!=0 and ntot is not None: resrow[resTable.colIndex[subst]] = pstot / ntot * 100.0 else: resrow[resTable.colIndex[subst]] = -999.0 if len(dd["psIndices"]) > 0: tmp_sources = (bu_sources[i] for i in dd["psIndices"]) with SourceStream(eEdb, mode='wb') as out_source_stream: source_writer = ModelWriter(out_source_stream) for source in tmp_sources: source_writer.write(source) log.debug("Wrote ps to emission edb") if distributed: eGrid.load() log.debug("Wrote emission grid to emission edb") dd["trace"].write() resTable.addRow(resrow) resTableFile = open(resTablePath,"w") resTable.write(resTableFile) log.info("Finished topdown process")