def main(): #setting up parser parser=OptionParser(usage= usage, version=version) parser.add_option("-l", "--loglevel", action="store",dest="loglevel",default=2, help="Sets the loglevel (0-3 where 3=full logging)") parser.add_option("-s", "--substgroups", action="store",dest="substgroups", help="Path to substance group file") parser.add_option("-t", "--templateCf", action="store",dest="cf", help="Path to generate controlfile template") (options, args) = parser.parse_args() rootLogger = logger2.RootLogger(options.loglevel) logger=rootLogger.getLogger("createScbEf") if options.cf!=None: controlFile.generateCf(path.abspath(options.cf),controlFileTemplate) logger.info("Wrote default controlfile") sys.exit() if len(args)!=1: parser.error("Wrong number of arguments") cf=controlFile.controlFile(args[0]) airviroTable_90= open(cf.findString("airviroTable_90:"),'w') airviroTable_07= open(cf.findString("airviroTable_07:"),'w') table_brslag_koder = dataTable.DataTable() table_brslag_koder.read(cf.findExistingPath("table_brslag_koder:")) table_brslag_koder.keys.append("BRSLAG") table_brslag_koder.convertCol("BRSLAG",int) table_brslag_units = dataTable.DataTable( desc=[{"id":"Brslag","type":int}], keys=["Brslag"]) table_brslag_units.read(cf.findExistingPath("table_brslag_units:")) table_ef_units = dataTable.DataTable() table_ef_units.read(cf.findExistingPath("table_ef_units:")) table_ef_units.keys.append("Substance") table_fstalle_90 = dataTable.DataTable( desc=[{"id":"Fstalle","type":int}], keys=["Fstalle"]) table_fstalle_90.read(cf.findExistingPath("table_fstalle_90:")) table_fstalle_07 = dataTable.DataTable( desc=[{"id":"Fstalle","type":int}], keys=["Fstalle"]) table_fstalle_07.read(cf.findExistingPath("table_fstalle_07:")) table_pkbio = dataTable.DataTable( desc=[{"id":"Pkbio","type":int}], keys=["Pkbio"]) table_pkbio.read(cf.findExistingPath("table_pkbio:")) table_ef_other_90 = dataTable.DataTable() table_ef_other_90.read(cf.findExistingPath("table_ef_other_90:")) table_ef_NOx_SOx_90 = dataTable.DataTable() table_ef_NOx_SOx_90.read(cf.findExistingPath("table_ef_NOx_SOx_90:")) table_ef_07 = dataTable.DataTable() table_ef_07.read(cf.findExistingPath("table_ef_07:")) substances = cf.findStringList("substances:") #processing 90-06 efTable07_desc=[ {"id":"Fuel_id","type":int}, {"id":"Sector","type":unicode}, {"id":"Technology_id","type":int}, {"id":"Technology_bio_id","type":int}, {"id":"Facility_id","type":int}, {"id":"Facility_name","type":unicode}, {"id":"Year","type":int}, {"id":"Airviro_name","type":str}] efTable90_desc=[ {"id":"Fuel_id","type":int}, {"id":"Sector","type":unicode}, {"id":"Technology_id","type":int}, {"id":"Technology_bio_id","type":int}, {"id":"Facility_id","type":int}, {"id":"Facility_name","type":unicode}, {"id":"Year","type":int}, {"id":"Airviro_name","type":str}] for subst in substances: #Presumes that units are in the second column unitColId=table_ef_units.desc[1]["id"] ef_unit=table_ef_units.lookup(unitColId,[subst]) efTable90_desc.append({"id":subst,"type":float,"units":ef_unit}) efTable07_desc.append({"id":subst,"type":float,"units":ef_unit}) efTableKeys=["Fuel_id","Sector","Technology_id","Technology_bio_id","Facility_id","Year"] efTable90 = dataTable.DataTable(desc=efTable90_desc,keys=efTableKeys) efTable07 = dataTable.DataTable(desc=efTable07_desc,keys=efTableKeys) # t=table_ef_other_90 # for row in t.data: # sekt= row[t.colIndex("sekt")].upper() # fstalle=None # if sekt==u"EL": # fstalle=4 # newRow=[ # int(row[t.colIndex("BRSLAG")]), # sekt, # fstalle, #Technology id # None, #Technology bio id # 0, #Facility_id # None, #Facility_name # int(row[t.colIndex("AR")]), # None #Airviro name # ] # for subst in substances: # if subst!="NOx" and subst!="SOx": # ef=row[t.colIndex(subst)] # if not ef is None: # newRow.append(float(ef)) # else: # newRow.append(None) # else: # newRow.append(None) # efTable90.addRow(newRow) # t=table_ef_NOx_SOx_90 # for row in t.data: # fstalle = int(row[t.colIndex("fstalle")]) # facility_name = row[t.colIndex("FNAMN")] # facility_id = row[t.colIndex("ANLNR")] # if not facility_id is None: # facility_id=int(facility_id) # technology_bio_id = row[t.colIndex("pkbio")] # if not technology_bio_id is None: # technology_bio_id=int(technology_bio_id) # addFja=False # if fstalle==4: #Gas turbine implies electricity prod. # sector=u"EL" # elif fstalle==8: #Other sector # sector=u"OVR" # fstalle=None # elif fstalle==9: # Other use, default (industry, heat prod) # sector=u"IND" # fstalle=None # addFja=True # else: # sector=u"IND" #Industry # brslag=row[t.colIndex("BRSLAG")] # newRow=[ # int(brslag), # sector, # fstalle, #Technology id # technology_bio_id, #Technology bio id # facility_id, #Facility_id # facility_name, #Facility_name # int(row[t.colIndex("ar")]), # None # ] # for subst in substances: # if subst==u"NOx" or subst==u"SOx": # newRow.append(float(row[t.colIndex(subst)])) # else: # newRow.append(None) # existingRow=newRow[:] # existingRow[2]=None #fstalle is not specified # existingRow[3]=None # existingRow[4]=None # if existingRow[1]==u"EL": # existingRow[2]=4 #fstalle is Gas turbine # existingRowInd=efTable90.rowIndex(efTable90.getRowId(existingRow)) # efTable90.addRow(newRow,append=True) # #If fstalle is 09 - Other, this refers to either Ind or Fja, therefore both are added # if addFja: # newRow[1]=u"FJA" # #Add SOx and NOx factors to existing identical rows (if there are any) # efTable90.addRow(newRow,append=True) t=table_ef_07 for row in t.data: fstalle=row[t.colIndex("fstalle")] pkbio = row[t.colIndex("fstalle")] if not fstalle is None: fstalle=int(fstalle) if not pkbio is None: pkbio=int(pkbio) newRow=[ int(row[t.colIndex("brslag")]), row[t.colIndex("sekt")].upper(), fstalle, #Technology id None, #Technology bio id None, #Facility_id None, #Facility_name int(row[t.colIndex("ar")]), None #Airviro name ] for subst in substances: ef=row[t.colIndex(subst)] if not ef is None: newRow.append(float(ef)) else: newRow.append(None) efTable07.addRow(newRow) # for row in efTable90.data: # nameParts={"Fuel_id":row[0], # "Sector":row[1], # "Technology_id":row[2], # "Technology_bio_id":row[3], # "Facility_id":row[4], # "Year":row[6]} # if nameParts["Technology_id"] is None: # nameParts["Technology_id"]=0 # if nameParts["Technology_bio_id"] is None: # nameParts["Technology_bio_id"]=0 # if nameParts["Facility_id"] is None: # nameParts["Facility_id"]=0 # #use last two numbers for year # nameParts["Year"]=str(nameParts["Year"])[-2:] # airviroName= "f%(Fuel_id)02is%(Sector)st%(Technology_id)02ib%(Technology_bio_id)1if%(Facility_id)03iy%(Year)s" %nameParts # row[efTable90.colIndex("Airviro_name")]= airviroName # #outFile=open(outputTable,"w") # efTable90.join(table_pkbio,{"Pkbio":"Technology_bio_id"}) # efTable90.join(table_fstalle_90,{"Fstalle":"Technology_id"}) # efTable90.join(table_brslag_units,{"Brslag":"Fuel_id"}) # # efTable90.join(table_brslag_koder,{"BRSLAG":"Fuel_id"},addCols=["Klartext"]) # #Filter out unrelevant fuel types ignoreFuelTypes=cf.findIntList("ignoreFuelTypes:") # for fuelType in ignoreFuelTypes: # efTable90=efTable90.filtered({"Fuel_id":fuelType},invert=True) # efTable90.write(airviroTable_90) for row in efTable07.data: nameParts={"Fuel_id":row[0], "Sector":row[1], "Technology_id":row[2], "Technology_bio_id":row[3], "Facility_id":row[4], "Year":row[6]} if nameParts["Technology_id"] is None: nameParts["Technology_id"]=0 if nameParts["Technology_bio_id"] is None: nameParts["Technology_bio_id"]=0 if nameParts["Facility_id"] is None: nameParts["Facility_id"]=0 #use last two numbers for year nameParts["Year"]=str(nameParts["Year"])[-2:] #airviroName= "f%(Fuel_id)02is%(Sector)st%(Technology_id)02ib%(Technology_bio_id)1if%(Facility_id)03iy%(Year)s" %nameParts #Facility id not included airviroName= "f%(Fuel_id)02is%(Sector)st%(Technology_id)02ib%(Technology_bio_id)1iy%(Year)s" %nameParts row[efTable07.colIndex("Airviro_name")]= airviroName #Set all bio fuel CO2 emis factors to 0 try: if table_brslag_koder.lookup("branslegrupp",[nameParts["Fuel_id"]]) == "biomass": row[efTable07.colIndex("CO2")]=0 except: print "Warning - no branslegrupp found for brslag %i, assuming fossile" %(nameParts["Fuel_id"]) efTable07.join(table_pkbio,{"Pkbio":"Technology_bio_id"}) efTable07.join(table_fstalle_07,{"Fstalle":"Technology_id"}) efTable07.join(table_brslag_units,{"Brslag":"Fuel_id"}) #filter out unrelevant fuel types for fuelType in ignoreFuelTypes: efTable07=efTable07.filtered({"Fuel_id":fuelType},invert=True) efTable07.write(airviroTable_07)
def main(): #-----------Setting up and unsing option parser----------------------- parser=OptionParser(usage= usage, version=version) logger=logging.getLogger("exportEmisGrids.py") parser.add_option("-l", "--loglevel", action="store",dest="loglevel",default=2, help="Sets the loglevel (0-3 where 3=full logging)") parser.add_option("-u", "--user", action="store", dest="user", default=None, help="Specify user manually") parser.add_option("-e", "--edb", action="store", dest="edb", default=None, help="Name of target edb") parser.add_option("-t", "--template", action="store",dest="cf",default=None, help="Generate default controlfile") parser.add_option("-f", "--force", action="store_true",dest="force",default=False, help="To start the process without confirming the domain") parser.add_option("--aggregate", action="store_true",dest="aggregate",default=False, help="Aggregate twin-links for correct use of street canyon models") parser.add_option("-a", "--attributeFilter", action="store",dest="attributeFilter",default=None, help="Set to filter out roads with the specified attribute value, attribute field name is set in controlfile") parser.add_option("-r", "--region", action="store",dest="region",default=None, help="Determines the percentage of spiked tyres, possible region names are found in studdedTyreTable") parser.add_option("-y", "--year", action="store",dest="outYear",default=None, help="Year from which traffic update factors and vehicle compsition will be taken") parser.add_option("-o","--outfile", action="store",dest="outfile",default=None, help="Output road ascii file") parser.add_option("-g","--geofilter", action="store",dest="geoFilter",default=None, help="Filter out roads within polygons with field value matching the geoFilter, specify shapefile in controlfile") (options, args) = parser.parse_args() #------------Setting up logging capabilities ----------- rootLogger=logger2.RootLogger(int(options.loglevel)) logger=rootLogger.getLogger(sys.argv[0]) if options.cf!=None: #Checks if the file already exists, prompt the user if overwrite is wanted, create file controlFile.generateCf(path.abspath(options.cf),controlFileTemplate) logger.info("Wrote default controlfile") sys.exit() if len(args)!=1: parser.error("Incorrect number of arguments") if options.edb ==None: parser.error("Need to specify edb using flag -e") if options.user ==None: parser.error("Need to specify user using flag -u") if options.region==None: parser.error("Need to specify region") if options.outYear is None: parser.error("Need to specify output year by -y <year>") if options.attributeFilter is not None: attributeFilter=options.attributeFilter else: attributeFilter=None #Get vagverksregion (used to select percentage of studded tyres) region=options.region domainName=os.environ["AVDBNAME"] dmn = pyDomain.domain(domainName) if not options.force: answer=raw_input("Chosen dbase is: "+domainName+",continue(y/n)?") if answer=="y": dmn=pyDomain.domain() else: sys.exit(1) if not dmn.edbExistForUser(options.edb,options.user): logger.error("Edb "+options.edb+" does not exist for user "+ options.user+" in domain "+domainName) sys.exit(1) #Creating edb andedb.rsrc objects edb=pyEdb.edb(dmn.name,options.user,options.edb) #Creating a roaddb object roaddb=pyRoaddb.pyRoaddb(dmn,options.user,edb.name) #Creating a control-file object (simple parser) cf=controlFile.controlFile(fileName=path.abspath(args[0]),codec="latin6") #Retrieving data from control file shapeFilePath = cf.findExistingPath("shapeFile:") artemisToSimairTablePath = cf.findExistingPath("artemisToSimairTable:") nvdbToArtemisTablePath = cf.findExistingPath("nvdbToArtemisTable:") updateFactorTablePath = cf.findExistingPath("updateFactorTable:") vehCompTablePath = cf.findExistingPath("vehicleCompositionTable:") studdedTyreTablePath= cf.findExistingPath("studdedTyreTable:") attributeFilterFieldName=cf.findString("attributeFilterFieldName:") #Loads driver to read shape-files using ogr-library driver = ogr.GetDriverByName('ESRI Shapefile') #If option for geoFilter is used, the polygon defining the boundaries #of the area to be filtered is read from geoFilterShapeFile if options.geoFilter is not None: gfShapeFilePath=cf.findExistingPath("geoFilterShapeFile:") gfFieldName=cf.findString("geoFilterFieldName:") gfShapeFile = driver.Open(str(gfShapeFilePath), update=0) if gfShapeFile is None: logger.error("Could not open data source: " +gfShapeFilePath) sys.exit(1) gfLayer = gfShapeFile.GetLayer() logger.info("Found %i features in geocode shapefile" %gfLayer.GetFeatureCount()) geoFilterPolys=[] gfFeature = gfLayer.GetNextFeature() while gfFeature: geocode=gfFeature.GetFieldAsString(str(gfFieldName)) if geocode==options.geoFilter: geoFilterPoly=gfFeature.GetGeometryRef() geoFilterPolys.append(geoFilterPoly) gfFeature = gfLayer.GetNextFeature() if len(geoFilterPolys)==0: logger.error("Could not find any polygon with field value matching the specified geoFilter in shapeFile: %s, field: %s" %(gfShapeFilePath,gfFieldName)) sys.exit(1) inYear=cf.findInt("inYear:") outYear=int(options.outYear) #Creating lookup table for the ARTEMIS-code nvdbToArtemisTable=dataTable.DataTable() nvdbToArtemisTable.keys=["vaghallare","tatort","hastighet","vagklass","vagtyp"] nvdbToArtemisTable.read(nvdbToArtemisTablePath) missingTSTable=dataTable.DataTable( desc=[ {"id":"vaghallare","type":str}, {"id":"tatort","type":str}, {"id":"hastighet","type":str}, {"id":"vagklass","type":str}, {"id":"vagtyp","type":str}, {"id":"antal","type":int}], keys=["vaghallare","tatort","hastighet","vagklass","vagtyp"]) #If input and output year is the same, no update is needed made for flow if inYear!=outYear: #Creating lookup table for the updateFactors updateFactorTable=dataTable.DataTable() updateFactorTable.read(updateFactorTablePath) try: #Set columns used as unique row identifiers, raise error if they do not exist updateFactorTable.setKeys(["start_year","vehicle","roadtype"]) except DataTableException,msg: logger.error("Could not find column header in updateFactorTable:"+msg) sys.exit(1) #Filters out not needed rows from the updateFactorTable (makes lookups faster) updateFactorTable=updateFactorTable.filtered({"start_year":str(inYear)}) #year no longer necessary as a row identifier updateFactorTable.setKeys(["vehicle","roadtype"]) if unicode(outYear) not in updateFactorTable.listIds() and inYear!=outYear: logger.error("No update factors to year %i found in %s" %(outYear,updateFactorTablePath)) sys.exit(1)
def main(): #setting up parser parser=OptionParser(usage= usage, version=version) parser.add_option("-l", "--loglevel", action="store",dest="loglevel",default=2, help="Sets the loglevel (0-3 where 3=full logging)") parser.add_option("-t", "--templateCf", action="store",dest="cf", help="Path to generate controlfile template") (options, args) = parser.parse_args() rootLogger = logger2.RootLogger(options.loglevel) logger=rootLogger.getLogger("calcPSEmis.py") if options.cf!=None: controlFile.generateCf(path.abspath(options.cf),controlFileTemplate) logger.info("Wrote default controlfile") sys.exit() if len(args)!=1: parser.error("Wrong number of arguments") cf=controlFile.controlFile(args[0]) table_ef=dataTable.DataTable() table_ef.read(cf.findExistingPath("table_ef:"),units=True) table_ef.keys=["Fuel_id","Sector","Technology_id","Year","Technology_bio_id"] #table_proc_emis=dataTable.DataTable() #table_proc_emis.read(cf.findExistingPath("table_process_emis:")) table_energy=dataTable.DataTable() table_energy.read(cf.findExistingPath("table_energy:")) table_fuelmap=dataTable.DataTable() table_fuelmap.read(cf.findExistingPath("table_fuelmap:")) table_fuelmap.keys=["Namn_ETS"] substances = cf.findStringList("substances:") fuels = cf.findStringList("fuels:") year = cf.findString("year:") for subst in substances: colInd=table_ef.colIndex(unicode(subst)) ef_unit=table_ef.desc[colInd]["units"] emis_unit=ef_unit.split("/")[0] table_energy.addCol({"id":subst,"type":float,"units":emis_unit}) table_fuelmap=dataTable.DataTable() table_fuelmap.read(cf.findExistingPath("table_fuelmap:")) table_fuelmap.keys=["Namn_ETS"] outputTablePath=cf.findString("table_output:") t=table_energy planti=0 pg=utilities.ProgressBar(len(t.data),sys.stdout) energyEmisPlants=[] for planti,plant in enumerate(t.data): pg.update(planti) if plant[t.colIndex("year")]!=year: continue source=plant[t.colIndex("prio")] if source not in [u"ETS",u"SMP",u"MR"]: continue fstalle_org=plant[t.colIndex("fstalle")] sector=plant[t.colIndex("sector_corr")] ef_year=plant[t.colIndex("year")] if int(year)<=2007: ef_year="2007" for fuel in fuels: try: energy=plant[t.colIndex(source+"_"+fuel)] except dataTable.DataTableException: continue if energy is None: continue fstalle=fstalle_org fuel_code=None #Find out fstalle if fstalle=="5": if fuel not in ["heavyoil", "othersolid"]: fstalle=None if fstalle=="6": if fuel not in ["othersolid"]: fstalle=None else: fuel_code=24 if fstalle=="7": if fuel not in ["othersolid"]: fstalle=None else: fuel_code=10 if fstalle=="3" or fstalle=="4": if sector=="FJA": fstalle="3" elif sector=="EL": fstalle="4" if fuel == "othersolid": fuel_code="14" #waste else: fstalle=None #Gasturbine if fuel in ["biogas", "othergases"]: if sector=="EL": fstalle="1" if sector=="FJA": fstalle="2" if fstalle is None: fstalle="99" #default value if fuel_code is None: fuel_code= table_fuelmap.lookup("brslag",[fuel]) row_id=[fuel_code,sector,fstalle,ef_year,None] for subst in substances: ef=table_ef.lookup(subst,row_id) if ef is None: continue if plant[t.colIndex(subst)] is None: #All energy values are assumed to be in TJ and all ef are in massa/GJ #Therefore the energy in converted to GJ plant[t.colIndex(subst)]=1000*float(energy)*float(ef) else: plant[t.colIndex(subst)]+=1000*float(energy)*float(ef) energyEmisPlants.append(plant[:]) table_energy.data=energyEmisPlants outputFile=open(outputTablePath,"w") table_energy.write(outputFile) pg.finished()