コード例 #1
0
ファイル: copySrc.py プロジェクト: SMHI-Apl/Airviro-tools
def main():
    #-----------Setting up and unsing option parser-----------------------
    parser=OptionParser(usage= usage, version=version)
    
    parser.add_option('--src_user',
                      action="store",dest="src_user",
                      help="Name of target edb user")

    parser.add_option("--src_edb",
                      action="store",dest="src_edb",
                      help="Name of target edb")

    parser.add_option('--tgt_user',
                      action="store",dest="tgt_user",
                      help="Name of target edb user")

    parser.add_option("--tgt_edb",
                      action="store",dest="tgt_edb",
                      help="Name of target edb")

    parser.add_option("--codeIndex",
                      action="store",dest="codeIndex",
                      help="Activity code index to use for filter")

    parser.add_option("--codeValue",
                      action="store",dest="codeValue",
                      help="Activity code value to filter by")

    parser.add_option("-l", "--loglevel",
                      action="store",dest="loglevel",default=2,
                      help="Sets the loglevel (0-3 where 3=full logging)")

    parser.add_option("-o", "--outfile",
                      action="store",dest="outfile",
                      help="Output filenam, leave out to write directly to target edb")

    (options, args) = parser.parse_args()
    
    #--------------------Init logger-----------------------
    rootLogger = logger.RootLogger(level=options.loglevel)
    global log
    log = rootLogger.getLogger(sys.argv[0])

    #-----------------Validating options-------------------

    if options.src_user is None:
        log.error("Need to specify --src_user <user>")
        return 1
    if options.src_edb is None:
        log.error("Need to specify --src_edb <edb>")
        return 1

    if options.tgt_user is None:
        log.error("Need to specify --tgt_user <user>")
        return 1
    if options.tgt_edb is None:
        log.error("Need to specify --tgt_edb <edb>")
        return 1


    if len(args)!=0:
        log.error("No argument expected")
        return 1

    dmn=Domain()
    src_edb=Edb(dmn,options.src_user,options.src_edb)
    tgt_edb=Edb(dmn,options.tgt_user,options.tgt_edb)

    if not src_edb.exists():
        log.error("Source edb %s does not exist") %options.src_edb
        return 1

    if not tgt_edb.exists():
        log.error("Target edb %s does not exist") %options.tgt_edb
        return 1

    if options.codeIndex is None:
        log.error("No code index specified")
        return 1

    if options.codeValue is None:
        log.error("No code value specified")
        return 1

    log.info("Reading subdb...")
    src_subdb=Subdb(src_edb)
    src_subdb.read()

    log.info("Reading sourcedb...")
    src_sourcedb=Sourcedb(src_edb)
    src_sourcedb.read()
    

    tgt_sourcedb=Sourcedb(tgt_edb)
    tgt_sourcedb.read()

    #log.info("Reading subgrpdb")
    #src_subgrpdb=Subgrpdb(src_edb)
    #src_subgrpdb.read()

    log.info("Reading edb.rsrc")
    src_rsrc=Rsrc(src_edb.rsrcPath())
    tgt_rsrc=Rsrc(tgt_edb.rsrcPath())

    log.info("Reading target sourcedb...")


    acCodeTables=[]
    for i in range(tgt_rsrc.numberOfCodeTrees("ac")):
        acCodeTables.append(CodeTable(tgt_rsrc.path,codeType="ac",codeIndex=i+1))

    gcCodeTables=[]
    for i in range(tgt_rsrc.numberOfCodeTrees("gc")):
        gcCodeTables.append(CodeTable(tgt_rsrc.path,codeType="gc",codeIndex=i+1))



    copiedSources=0
    readSources=0
    for src in src_sourcedb.sources:
        hasEmis=False
        src["PX"]=0
        src["PY"]=0
                      
        toBeRemoved=[]
        for substInd,emis in src.subst_emis.iteritems():
            ac= emis["ACTCODE"][int(options.codeIndex)-1]
            ac=[c for c in ac if c!="-"]
            emis["ACTCODE"][int(options.codeIndex)-1]=ac #Remove '-'from code

            ac=".".join(ac)
            
            if ac!=options.codeValue:
                toBeRemoved.append(substInd)

        for key in toBeRemoved:
            src.removeSubstEmis(key)

        if src.subst_emis!={}:
            hasEmis=True
        
        toBeRemoved=[]
        for subgrpInd,emis in src.subgrp_emis.iteritems():
            ac= emis["ACTCODE"][int(options.codeIndex)-1]
            ac=[c for c in ac if c!="-"]
            emis["ACTCODE"][int(options.codeIndex)-1]=ac #Remove '-'from code
            ac=".".join(ac)

            
            if ac!=options.codeValue:
                toBeRemoved.append(subgrpInd)

        for key in toBeRemoved:
            src.removeSubgrpEmis(key)

        if src.subgrp_emis!={}:
            hasEmis=True


        toBeRemoved=[]
        for actInd,emis in src.activity_emis.iteritems():
            ac= emis["ACTCODE"][int(options.codeIndex)-1]
            ac=[c for c in ac if c!="-"]
            emis["ACTCODE"][int(options.codeIndex)-1]=ac #Remove '-'from code
            ac=".".join(ac)
            
            if ac!=options.codeValue:
                toBeRemoved.append(actInd)

        for key in toBeRemoved:
            src.removeActivityEmis(key)

        if src.activity_emis!={}:
            hasEmis=True

        readSources+=1
        if hasEmis:
            copiedSources+=1
            tgt_sourcedb.sources.append(src)

    if options.outfile is None:
        log.info("Writing sources to target edb")
    else:
        log.info("Writing sources to file")

    tgt_sourcedb.write(filename=options.outfile,force=True)

    log.info("Successfully Copied %i out of %i sources" %(copiedSources,readSources))
コード例 #2
0
ファイル: csv2sourcedb.py プロジェクト: SMHI-Apl/AQM-tools
def main():
    #-----------Setting up and unsing option parser-----------------------
    parser=OptionParser(usage= usage, version=version)
    
    parser.add_option("-u",'--user',
                      action="store",dest="user",
                      help="Name of target edb user")

    parser.add_option("-e","--edb",
                      action="store",dest="edb",
                      help="Name of target edb")

    parser.add_option("-i","--infile",
                      action="store",dest="infile",
                      help="Input csv file")
    
 #   parser.add_option("-y","--year",
 #                     action="store",dest="year",
 #                     help="Only store sources for given year")
    
    parser.add_option("-v",dest='loglevel',
                      action="store_const",default=get_loglevel(),
                      help="produce verbose output")

    parser.add_option("-t", "--template",
                      action="store",dest="cf",default=None,
                      help="Generate default controlfile")

    parser.add_option("-o", "--outfile",
                      action="store",dest="outfile",default=None,
                      help="Name of outfiles (without extension)")

    parser.add_option("-d","--delimiter",
                      action="store",dest="delimiter",default="\t",
                      help="Delimiter used in csv-file")

    parser.add_option("-c","--filterCol",
                      action="store",dest="filterCol",
                      help="Header of column to use as filter")

    parser.add_option("-f","--filterVal",
                      action="store",dest="filterVal",
                      help="Value to use in filter")


#    parser.add_option("-g", "--geocodeRasterDir",
#                      action="store",dest="geocodeRasterDir",default=None,
#                      help="Directory with geocode rasters")
    


    (options, args) = parser.parse_args()
    
    #--------------------Init logger-----------------------
#     rootLogger = logger.RootLogger(level=options.loglevel)
    logging.basicConfig(
            format='%(levelname)s:%(name)s: %(message)s',
            level=options.loglevel,
    )
    global log
#     log = rootLogger.getLogger(sys.argv[0])
    log = logging.getLogger(parser.prog)

    #-----------------Validating options-------------------

    if options.cf is not None:
        generateCf(path.abspath(options.cf),controlFileTemplate)
        log.info("Wrote default controlfile")
        return 1

    if options.user is None:
        log.error("Need to specify -u <user>")
        return 1
    if options.edb is None:
        log.error("Need to specify -e <edb>")
        return 1
#    if options.year is None:
#        log.error("Need to specify -y <year>")
#        return 1
#    if len(options.year)!=4:
#        log.error("Year should be given with four digits")
#        return 1


    if len(args)!=1:
        log.error("Controlfile should be given as argument")
        return 1

    dmn=Domain()
    edb=Edb(dmn,options.user,options.edb)
    if not edb.exists():
        log.error("Edb %s does not exist" %options.edb)
        return 1

    log.info("Parsing controlfile")

    cf=ControlFile(args[0])
    cdbPars=re.compile("companydb\.par\.(\w*?):").findall(cf.content)
    fdbPars=re.compile("facilitydb\.par\.(\w*?):").findall(cf.content)
    sdbPars=re.compile("sourcedb\.par\.(\w*?):").findall(cf.content)
    substEmisNr=re.compile("sourcedb\.subst_emis\.([0-9]*)\.emis").findall(cf.content)
    subgrpEmisNr=re.compile("sourcedb\.subgrp_emis\.([0-9]*)\.emis").findall(cf.content)

    cdbCols={}
    cdbDefaults={}
    for par in cdbPars:
        cdbCols[par]=cf.findString("companydb.par.%s:" %par)
        cdbDefaults[par]=cf.findString("companydb.par.%s.default:" %par,
                                       optional=True,default=None)
    fdbCols={}
    fdbDefaults={}
    for par in fdbPars:
        fdbCols[par]=cf.findString("facilitydb.par.%s:" %par)
        fdbDefaults[par]=cf.findString("facilitydb.par.%s.default:" %par,
                                       optional=True,default=None)

    sdbCols={}
    sdbDefaults={}
    for par in sdbPars:
        sdbCols[par]=cf.findString("sourcedb.par.%s:" %par)
        sdbDefaults[par]=cf.findString("sourcedb.par.%s.default:" %par,
                                       optional=True,default=None)

    substEmisCols={}
    substEmisDefaults={}
    if substEmisNr is not None:
        for emisNr in substEmisNr:
            cols={}
            defaults={}
            emisPars=re.compile("sourcedb\.subst_emis\.%s\.(\w*?):" %(emisNr)).findall(cf.content)
            emisDefaultPars=re.compile(
                "sourcedb\.subst_emis\.%s\.(\w*?)\.default:" %(emisNr)).findall(cf.content)
            if emisPars is not None:
                for par in emisPars:            
                    cols[par]=cf.findString("sourcedb.subst_emis.%s.%s:" %(emisNr,par))        
            if emisDefaultPars is not None:        
                for par in emisDefaultPars:    
                    defaults[par]=cf.findString("sourcedb.subst_emis.%s.%s.default:" %(emisNr,par),
                                                optional=True,default=None)
            substEmisCols[emisNr]=cols
            substEmisDefaults[emisNr]=defaults

    subgrpEmisCols={}
    subgrpEmisDefaults={}
    if subgrpEmisNr is not None:
        for emisNr in subgrpEmisNr:
            cols={}
            defaults={}
            emisPars=re.compile("sourcedb\.subgrp_emis\.%s\.(\w*?):" %(emisNr)).findall(cf.content)
            emisDefaultPars=re.compile(
                "sourcedb\.subgrp_emis\.%s\.(\w*?)\.default:" %(emisNr)).findall(cf.content)
            if emisPars is not None:
                for par in emisPars:            
                    cols[par]=cf.findString("sourcedb.subgrp_emis.%s.%s:" %(emisNr,par))        
            if emisDefaultPars is not None:        
                for par in emisDefaultPars:    
                    defaults[par]=cf.findString("sourcedb.subgrp_emis.%s.%s.default:" %(emisNr,par),
                                                optional=True,default=None)
            subgrpEmisCols[emisNr]=cols
            subgrpEmisDefaults[emisNr]=defaults
        

    log.info("Reading subdb...")
    subdb=Subdb(edb)
    subdb.read()

    log.info("Reading companydb...")
    companydb=Companydb(edb)
    companydb.read()

    log.info("Reading sourcedb...")
#     source_stream = SourceStream(edb, 'w')
    source_stream = open(options.outfile, 'w')
    source_writer = ModelWriter(source_stream,encoding="HP Roman8")


    log.info("Reading facilitydb...")
    facilitydb=Facilitydb(edb)
    facilitydb.read()


    log.info("Reading subgrpdb")
    subgrpdb=Subgrpdb(edb)

    subgrpdb.read()

    log.info("Reading edb.rsrc")
    rsrc=Rsrc(edb.rsrcPath())
    
    acCodeTables=[]
    for i in range(rsrc.numberOfCodeTrees("ac")):
        acCodeTables.append(CodeTable(rsrc.path,codeType="ac",codeIndex=i+1))

    gcCodeTables=[]
    for i in range(rsrc.numberOfCodeTrees("gc")):
        gcCodeTables.append(CodeTable(rsrc.path,codeType="gc",codeIndex=i+1))
        

    geocodeRasters=[]
    rast1=Raster()
    rast1.read("/usr/airviro/data/geo/topdown/dynamicRasters/dynamic__GEOCODE__1.txt")
    rast2=Raster()
    rast2.read("/usr/airviro/data/geo/topdown/dynamicRasters/dynamic__GEOCODE__2.txt")
    geocodeRasters.append(rast1)
    geocodeRasters.append(rast2)

    log.info("Reading csv-file")
    table=DataTable()
    table.read(options.infile,delimiter=options.delimiter,encoding="ISO-8859-15")

    if options.filterCol is not None:
        if options.filterCol not in table.colIndex:
            log.error("Filter column header not found in table")
            sys.exit(1)

    invalid=False
    nFiltered=0
    nRows=0

    log.info("Processing rows")
    for rowInd,row in enumerate(table.data):
        nRows+=1
        if options.filterCol is not None:
            filterVal=row[table.colIndex[options.filterCol]]
            if options.filterVal!=str(filterVal):
                nFiltered+=1
                continue

        comp = Company()
        for par in comp.parOrder:
            val=cdbDefaults.get(par,None)
            if par in cdbCols:
                colId=cdbCols[par]
                try:
                    tableVal=row[table.colIndex[colId]]
                except KeyError:
                    log.error(
                        "No column with header %s, columns: %s" %(
                            colId,str(table.listIds())))
                if tableVal is not None:
                    val = tableVal
            if val is not None:
                #Too long names are truncated
                if par=="NAME" and len(val)>45:
                    val=val[:45]
                comp[par]=val

        fac = Facility()
        for par in fac.parOrder:
            val=fdbDefaults.get(par,None)
            if par in fdbCols:
                colId=fdbCols[par]
                tableVal=row[table.colIndex[colId]]
                if tableVal is not None:
                    val = tableVal
            if val is not None:
                #Too long names are truncated
                if par=="NAME" and len(val)>45:
                    val=val[:45]
                fac[par]=val

        src = Source()       
        for par in ["X1", "Y1","X2","Y2",
                    "PX","PY","NAME","INFO","INFO2","DATE","CHANGED",
                    "CHIMNEY HEIGHT","GASTEMPERATURE","GAS FLOW",
                    "SEARCHKEY1","SEARCHKEY2","SEARCHKEY3",
                    "SEARCHKEY4","SEARCHKEY5","CHIMNEY OUT","CHIMNEY IN",
                    "HOUSE WIDTH","HOUSE HEIGHT","NOSEGMENTS","BUILD_WIDTHS",
                    "BUILD_HEIGHTS","BUILD_LENGTHS","BUILD_DISTFARWALL",
                    "BUILD_CENTER","GEOCODE","FORMULAMACRO","ALOB"]:
            val=sdbDefaults.get(par,None)
            if par in sdbCols:
                colId=sdbCols[par]
                tableVal=row[table.colIndex[colId]]
                if tableVal is not None:
                    val = tableVal
            if val is not None:
                #validate code
                if par=="GEOCODE" and val is not None:
                    gcList=val.split()
                    for codeIndex,code in enumerate(gcList):
                        if not gcCodeTables[codeIndex].hasCode(code):
                            log.error("Invalid geo code %s on row %i" %(code,rowInd))
                            invalid=True
                #Too long names are truncated
                if par=="NAME" and len(val)>45:
                    val=val[:45]

                #Store in src object and convert to correct type
                src._fieldvalues[par] = lazy_parse(
                    src, par, val)

        gc1=geocodeRasters[0].getVal(src.get_coord()[0],src.get_coord()[1])
        gc2=geocodeRasters[1].getVal(src.get_coord()[0],src.get_coord()[1])
        src.GEOCODE = [str(int(gc1)) + "." + str(int(gc2))]

        for emisNr,emis in substEmisCols.items():
            substEmis={"unit":None,"ac":None,"substance":None,"emis":None}

            for par in substEmis.keys():
                if par in emis:
                    substEmis[par]=row[table.colIndex[emis[par]]]
                else:
                    try:
                        substEmis[par]=substEmisDefaults[emisNr][par]
                    except KeyError:
                        log.error(
                            "Need to specify column or default value for subgrp emis %i" %emisNr)

            
            substInd=subdb.substIndex(substEmis["substance"])
            if substInd is None:
                log.error("Invalid substance name %s on row %i" %(
                        substEmis["substance"],rowInd))
                sys.exit(1)

            try:
                unit=rsrc.sub[substEmis["unit"]]
            except KeyError:
                log.error("Invalid unit name %s on row %i" %(emis["unit"],rowInd))
                sys.exit(1)

            acList=substEmis["ac"].split('\\')[0].split()
            for codeIndex,code in enumerate(acList):
#                 if code == "2.A.4.2":
#                     import pdb; pdb.set_trace()
                refCode = acCodeTables[codeIndex].checkCode(code)
                if refCode == "-":
                    log.error("Invalid activity code %s on row %i" %(code,rowInd))
                    sys.exit(1)
                if refCode != code:
                    acList[codeIndex] = refCode
            substEmis["ac"] = acList
            
            if substEmis["emis"] is not None and substEmis["emis"]!="0":
                try:
                    emis = src.add_emission()
                    emis.UNIT = substEmis["unit"] 
                    emis.ACTCODE = substEmis["ac"]  # needs re-formatting
                    emis.EMISSION = float(substEmis["emis"])
                    emis.SUBSTANCE = substInd

                    emis.auto_adjust_unit(edb) 


                except:            
#                     print substEmis
#                     log.error("Invalid substance emission on row %i" %rowInd)
                    invalid=True
                    src.EMISSION=src.EMISSION[:-1]


        for emis in subgrpEmisCols.values():
            subgrpEmis={"unit":None,"ac":None,"name":None,"emis":None}
            for par in subgrpEmis.keys():
                if par in emis:
                    subgrpEmis[par]=row[table.colIndex[emis[par]]]
                else:
                    try:
                        subgrpEmis[par]=subgrpEmisDefaults[emisNr][par]
                    except KeyError:
                        log.error(
                            "Need to specify column or default value for subgrp emis %i" %emisNr)

            #validating subgrp name
            try:                        
                subgrp=subgrpdb.getByName(subgrpEmis["name"])
            except KeyError:
                log.error("Invalid subgrp name %s on row %i" %(subgrpEmis["name"],rowInd))
                invalid=True

            #validating subgrp emis unit
            try:                
                unitFactor=rsrc.subGrpEm[subgrpEmis["unit"]]
            except KeyError:
                log.error("Invalid unit %s for subgrp emission on row %i" %(
                        subgrpEmis["unit"],rowInd))
                invalid=True

            #validating subgrp activity code
            acList=subgrpEmis["ac"].split()
            for codeIndex,code in enumerate(acList):
                refCode = acCodeTables[codeIndex].checkCode(code)
                if refCode == "-":
                    log.error("Invalid activity code %s on row %i" %(code,rowInd))
                    invalid=True
                    break
                if refCode != code:
                    acList[codeIndex] = refCode
            substEmis["ac"] = acList

            try:
                src.addSubgrpEmis(subgrp.index,emis=subgrpEmis["emis"],unit=subgrpEmis["unit"],
                                  ac=subgrpEmis["ac"])
            except:            
                log.error("Invalid subgrp emission on row %i" %rowInd)
                invalid=True

        companydb.append(comp,force=True)
        facilitydb.append(fac,force=True)
        source_writer.write(src)
#         sourcedb.append(src)

    if invalid:
        log.info("No output written due to validation errors")
        sys.exit(0)
    if len(companydb.items)>0:
        if options.outfile is None:
            log.info("Writing companydb")
        else:
            log.info("Writing company db to file")
        companydb.write(filename=options.outfile+".companydb")

    if len(facilitydb.items)>0:
        if options.outfile is  None:
            log.info("Writing facilitydb")
        else:
            log.info("Writing facilitydb to file")
        facilitydb.write(filename=options.outfile+".facilitydb")

#     if len(sourcedb.sources)>0:
#         if options.outfile is None:
#             log.info("Writing sourcedb")
#         else:
#             log.info("Writing sourcedb to file")
#     sourcedb.write(filename=options.outfile+".sourcedb")

    if options.filterCol is not None:
        log.info("Filtered out %i out of %i" %(nFiltered,nRows))
コード例 #3
0
def main():
    #-----------Setting up and unsing option parser-----------------------
    parser=OptionParser(usage= usage, version=version)
    logger=logging.getLogger("exportToEMEP.py")
    
    parser.add_option("-l", "--loglevel",
                      action="store",dest="loglevel",default=2,
                      help="Sets the loglevel (0-3 where 3=full logging)")
    
    parser.add_option("-u", "--user",
                      action="store", dest="user", default=None,
                      help="Specify user manually")    

    parser.add_option("-e", "--edb",
                      action="store", dest="edb", default=None,
                      help="Name of target edb")

    parser.add_option("-t", "--template",
                      action="store",dest="cf",default=None,
                      help="Generate default controlfile")

    parser.add_option("-f", "--force",
                      action="store_true",dest="force",default=False,
                      help="To start the process without confirming the domain")

        
    (options, args) = parser.parse_args()

    #------------Setting up logging capabilities -----------
    rootLogger=logger.RootLogger(int(options.loglevel))
    log=rootLogger.getLogger(sys.argv[0])

    
    if options.cf!=None:
        generateCf(path.abspath(options.cf))
        log.info("Wrote default controlfile")
        sys.exit()

    if len(args)!=1:
        parser.error("Incorrect number of arguments")

    domainName=os.environ["AVDBNAME"]
    dmn = domain.Domain(domainName)        
    

    if options.edb ==None:
        parser.error("Need to specify edb using flag -e")
    if options.user ==None:
        parser.error("Need to specify user using flag -u")        

    if not options.force:
        answer=raw_input("Chosen dbase is: "+domainName+", continue(y/n)?")    
        if answer=="y":
            dmn=domain.Domain()            
        else:
            sys.exit("Interrupted by user")
    
    if not dmn.edbExistForUser(options.edb,options.user):
        log.error("Edb "+options.edb+" does not exist for user "+options.user+" in domain "+domainName)    
        sys.exit()

    edb=Edb(dmn.name,options.user,options.edb)
    rsrc=rsrc.Rsrc(edb.rsrcPath())

    #Opening controlfile
    cf=controlfile.ControlFile(fileName=path.abspath(args[0]))

    
    substances=cf.findStringList("substances:")
    outputDir=cf.findExistingPath("outputDir:")
    acIndex=cf.findInt("acIndex:")

    #Get activity code tree
    codes=codetable.CodeTable(edb.rsrcPath(),acIndex=acIndex)
        
    datadb=datadb.Datadb(dmn,options.user,edb.name)
    datadb.read()
    
    substDict=dmn.listSubstanceIndices()

    #List grid names
    gridNames=edb.listGrids()
   
    for ac in codes.ac:
        if ac=="all":
            continue
        pdb.set_trace()
        log.debug("Activity code: "+ac)
        dataMarker = emepgrid.emepRaster()
        rasterDict={}
        substancesWithData=[]
        #Rereads grid list for each category to not fill the memory with emission grids
        #Grid data is only read for grids with current ac
        gridList=[]
        for gridName in gridNames:
            grd=Egrid(dmn.name,options.user,edb.name,gridName)
            grd.readAsc()        
            if grd.hasFuel():
                log.warning("Only support for non-fuel grids implemented, no processing done for grid "+gridName)
                break
            gridList.append(grd)

        
        for subst in substances:
            log.debug("Substance: "+subst)
            substInd=substDict[subst]
            totEmisRast=raster.Raster(Xll=1190000,Yll=6110000,Ncols=720,Nrows=1560,Cellsize=1000,init=0)

            for grd in gridList:
                if grd.par["ACTIVITYCODE"].val[0]==ac:
                    if len(grd.substances)==0:
                        grd.readData()                        
                    totEmisRast=totEmisRast+grd.substances.get(substInd,0)

            for src in datadb.sources:
                if src.par["ACTIVITYCODE"].val[0]==ac:
                    row,col=totEmisRast.getIndex(src.par["X1"].val,src.par["Y1"].val)
                    totEmisRast.data[row,col]+=src.getEmis(substInd,rsrc,"ton/year")

            pdb.set_trace()
            if not totEmisRast.sum()==0:
                if subst not in substancesWithData:
                    substancesWithData.append(subst)

                __RT90__="+proj=tmerc +lat_0=0 +lon_0=15d48\\'29.8\\\" +x_0=1500000 +k_0=1 +ellps=bessel +towgs84=414.1,41.3,603.1,-0.855,2.141,-7.023,0"
                emepRast = emepgrid.sortToEmep(totEmisRast,__RT90__,printInfo=True)
            
                dataMarker.data = numpy.where(emepRast.data > 0, 1, dataMarker.data)
                rasterDict[subst]=emepRast

                categoryDirPath = path.join(outputDir, ac)
                if not path.isdir(categoryDirPath):
                    os.mkdir(categoryDirPath)
                fileName = path.join(categoryDirPath, "Emep50km_" + subst+ ".asc")
                emepRast.write(fileName)
                log.info("Emissions in EMEP-projection for substance: " + subst + "written to outputDir for category: " + ac)
          
        if len(rasterDict)>0:
            #creating substance header in the same order as the substances in the template
            header = "i\tj\t"
            #headerList=["SO2","NOx","NH3","NMVOC","CO","TSP","PM10","PM25","Pb ","Cd","Hg","As","Cr","Cu","Ni","Se","Zn","Aldrin","Chlordane","Chlordecone","Dieldrin","Endrin","Heptachlor","Hexabromobiphenyl","Mirex","Toxaphene","HCH","DDT","PCB","DIOX","PAH","HCB","PCP","SCCP"]
            for s in substancesWithData:
                header += s + "\t"
            #remove the tab after the last column and add a newline instead
            header = header[: - 1]+ "\n"

            #Creating file for EMEP-data                    
            fileName = "CLRTAP_" + ac + ".txt"
            categoryDirPath = path.join(outputDir, ac)
            if not path.isdir(categoryDirPath):
                os.mkdir(categoryDirPath)
            fid = open(path.join(categoryDirPath, fileName), 'w')
            fid.writelines(header)

            #Writing indexes and data for all non-zero elements                
            for row in range(dataMarker.nrows):
                for col in range(dataMarker.ncols):
                    if dataMarker.data[row, col] > 0:
                        (i, j) = dataMarker.getCentreCoords(row, col)
                        fid.write(str(i) + "\t" + str(j) + "\t")
                        for substWithData in substancesWithData[:-1]:
                            fid.write(str(rasterDict[substWithData].data[row, col]) + "\t")                            
                        fid.write(str(rasterDict[substancesWithData[-1]].data[row, col]) + "\n")
            fid.close()
            log.info("wrote emissions to clrtap-file: " + path.join(categoryDirPath, fileName))
    log.info("Finished")
コード例 #4
0
def main():
    #-----------Setting up and unsing option parser-----------------------
    parser=OptionParser(usage= usage, version=version)
    
    parser.add_option("-l", "--loglevel",
                      action="store",dest="loglevel",default=2,
                      help="Sets the loglevel (0-3 where 3=full logging)")
    
    parser.add_option("-u", "--user",
                      action="store", dest="user", default=None,
                      help="Specify user manually")    

    parser.add_option("-e", "--edb",
                      action="store", dest="edb", default=None,
                      help="Name of target edb")

    parser.add_option("-t", "--template",
                      action="store",dest="cf",default=None,
                      help="Generate default controlfile")

    parser.add_option("-f", "--force",
                      action="store_true",dest="force",default=False,
                      help="To start the process without confirming the domain")

    parser.add_option("-a", "--attributeFilter",
                      action="store",dest="attributeFilter",default=None,
                      help="Set to filter out roads with the specified attribute value, attribute field name is set in controlfile")

    parser.add_option("-o","--outfile",
                      action="store",dest="outfile",default=None,
                      help="Output road ascii file")

    parser.add_option("-g","--geofilter",
                      action="store",dest="geoFilter",default=None,
                      help="Filter out roads within polygons with field value matching the geoFilter, specify shapefile in controlfile")

    parser.add_option("--scaleADT",
                      action="store",dest="ADTScaleFactor",default=None,
                      help="Scale ADT with a factor")
            
    (options, args) = parser.parse_args()

    #--------------------Init logger-----------------------
    rootLogger = logger.RootLogger(options.loglevel,format="%(message)s")
    global log
    log = rootLogger.getLogger(sys.argv[0])

    #-----------------Validating options-------------------

    if options.cf!=None:
        #Checks if the file already exists, prompt the user if overwrite is wanted, create file
        controlfile.generateCf(path.abspath(options.cf),controlFileTemplate)
        log.info("Wrote default controlfile")
        sys.exit()

    if len(args)!=1:
        parser.error("Incorrect number of arguments")
    
    if options.edb ==None:
        parser.error("Need to specify edb using flag -e")
    if options.user ==None:
        parser.error("Need to specify user using flag -u")
    if options.attributeFilter is not None:
        attributeFilter=options.attributeFilter
    else:
        attributeFilter=None

    
    domainName=os.environ["AVDBNAME"]
    dmn = domain.Domain(domainName)        

    if not options.force:
        answer=raw_input("Chosen dbase is: "+domainName+",continue(y/n)?")    
        if answer=="y":
            dmn=domain.Domain()            
        else:
            sys.exit(1)
    
    if not dmn.edbExistForUser(options.edb,options.user):
        log.error("Edb "+options.edb+" does not exist for user "+
                     options.user+" in domain "+domainName)    
        sys.exit(1)

    #Creating edb object
    edb=Edb(dmn.name,options.user,options.edb)

    #Create edb rsrc object
    rsrc=Rsrc(edb.rsrcPath())
    
    #Creating a roaddb object
    roaddb=Roaddb(dmn,options.user,edb.name)
    
    #Creating a control-file object (simple parser)
    cf=controlfile.ControlFile(fileName=path.abspath(args[0]),codec="latin6")

    #Retrieving data from control file
    shapeFilePath = cf.findExistingPath("shapeFile:")
    asciiCodeTable = cf.findString("asciiCodeTable:",optional=True,default="latin6")
    vehCompTablePath = cf.findExistingPath("vehicleCompositionTable:",optional=True)
    attributeFilterFieldName=cf.findString("attributeFilterFieldName:",optional=True,default=None)

    #Loads driver to read shape-files using ogr-library
    driver = ogr.GetDriverByName('ESRI Shapefile')

    #If option for geoFilter is used, the polygon defining the boundaries
    #of the area to be filtered is read from geoFilterShapeFile
    if options.geoFilter is not None:
        gfShapeFilePath=cf.findExistingPath("geoFilterShapeFile:")
        gfFieldName=cf.findString("geoFilterFieldName:")
        gfShapeFile =  driver.Open(str(gfShapeFilePath), update=0)
        if gfShapeFile is None:
            log.error("Could not open data source: " +gfShapeFilePath)
            sys.exit(1)
        gfLayer = gfShapeFile.GetLayer()
        log.info("Found %i features in geocode shapefile" %gfLayer.GetFeatureCount())    
        geoFilterPolys=[]
        gfFeature = gfLayer.GetNextFeature()
        while gfFeature:
            geocode=gfFeature.GetFieldAsString(str(gfFieldName))
            if geocode==options.geoFilter:
                geoFilterPoly=gfFeature.GetGeometryRef()
                geoFilterPolys.append(geoFilterPoly)
            gfFeature = gfLayer.GetNextFeature()
            
        if len(geoFilterPolys)==0:
            log.error("Could not find any polygon with field value matching the specified geoFilter in shapeFile: %s, field: %s" %(gfShapeFilePath,gfFieldName))
            sys.exit(1)


    #Creating table for default vehicle composition
    if vehCompTablePath is not None:
        vehCompTable=datatable.DataTable()
        vehCompTable.read(vehCompTablePath)
        try:
            #Set columns used as unique row identifiers, raise error if they do not exist
            vehCompTable.setKeys(["Vehicle","Tatort"])
            for fuel in ["bensin","ethanol","diesel","CNG","Totalt"]:
                vehCompTable.convertCol(fuel,float)

        except DataTableException,msg:
            log.error("Could not find column header in vehCompTable:"+msg)
            sys.exit(1)

        except ValueError:
            log.error("Could not find fuel %s among column headers in vehCompTable:" %fuel)
            sys.exit(1)
コード例 #5
0
def main():
    #-----------Setting up and unsing option parser-----------------------
    parser=OptionParser(usage= usage, version=version)
    
    parser.add_option("-u",'--user',
                      action="store",dest="user",
                      help="Name of target edb user")

    parser.add_option("-e","--edb",
                      action="store",dest="edb",
                      help="Name of target edb")

    parser.add_option("-v","--viewports",
                      action="store",dest="viewports",
                      help="Comma-separated list of area id's to be cut out, default is all")

    parser.add_option("-y","--year",
                      action="store",dest="year",
                      help="Cut out for given year")
    
    parser.add_option("-l", "--loglevel",
                      action="store",dest="loglevel",default=2,
                      help="Sets the loglevel (0-3 where 3=full logging)")

    parser.add_option("-s","--suffix",
                      action="store",dest="suffix",default="v1",
                      help="Sets suffix to names of generated edb's to support version management, default is 'v1'")

    (options, args) = parser.parse_args()


    
    #--------------------Init logger-----------------------
    rootLogger = logger.RootLogger(level=options.loglevel)
    global log
    log = rootLogger.getLogger(sys.argv[0])

    #-----------------Validating options-------------------

    if options.user is None:
        log.error("Need to specify -u <user>")
        return 1
    if options.edb is None:
        log.error("Need to specify -e <edb>")
        return 1
    if options.year is None:
        log.error("Need to specify -y <year>")
        return 1
    if len(options.suffix)>4:
        log.error("Limit your suffix length to 4 characters")
        return 1

    if len(options.year)!=4:
        log.error("Year should be given with four digits")
        return 1

    dmn=Domain()
    viewports=[]
    if options.viewports is not None:        
        viewportIds=options.viewports.split(",")
    else:
        viewportIds=dmn.listViewports()
    for vpId in viewportIds:
        vp=ViewPort()
        vp.read(path.join(dmn.wndPath(),"modell.par"),vpId)
        viewports.append(vp)
    
    edb=Edb(dmn,options.user,options.edb)

    log.info("Reading sourcedb...")
    sourcedb=Sourcedb(edb)
    sourcedb.read()

    log.info("Reading emfacdb...")
    emfacdb=Emfacdb(edb)
    emfacdb.read()

    log.info("Reading subdb...")
    subdb=Subdb(edb)
    subdb.read()
    
    edbDotRsrc=edb.rsrcPath()
    
    for vpInd,vp in enumerate(viewports):        
        targetEdbName=vp.code+"_"+options.year+"_"+options.suffix     
        tEdb=Edb(dmn,options.user,targetEdbName)
        if tEdb.exists():
            log.info("Edb %s already exists, remove first to update" %targetEdbName)
            continue
        tEdb.create(edbRsrc=edbDotRsrc)
        log.info("Created empty edb %s" %targetEdbName)

        subdb.setEdb(tEdb)
        subdb.write()
        log.info("Wrote searchkeys")

        emfacdb.setEdb(tEdb)        
        emfacdb.write()
        log.info("Wrote emfacdb")
        
        tSourcedb=Sourcedb(tEdb)        
        
        log.info("Cutting out viewport %s (%i/%i)" %(vp.code,vpInd+1,len(viewports)))
        for srcInd,src in enumerate(sourcedb.sources):
            if includeShip(src,vp.code,src["Y1"],options.year):
                log.debug("Ship %i/%i included in %s" %(srcInd+1,len(sourcedb.sources),tEdb.name))
                tSourcedb.sources.append(src)
        tSourcedb.write()
        log.info("Wrote exatracted sources to %s" %tEdb.name)
        tEdb.setDesc("This edb has been extracted from %s under user %s, " %(edb.name,edb.user)+
                            "and includes all ships that have visited the map area %s (%s) during %s\n" %(vp.code,vp.name,options.year))
    log.info("Finished!")
    return 0
コード例 #6
0
def main():
    #-----------Setting up and unsing option parser-----------------------
    parser=OptionParser(usage= usage, version=version)
    
    parser.add_option("-l", "--loglevel",
                      action="store",dest="loglevel",default=2,
                      help="Sets the loglevel (0-3 where 3=full logging)")
    
    parser.add_option("-u", "--user",
                      action="store", dest="user", default=None,
                      help="Specify user manually")    

    parser.add_option("-e", "--edb",
                      action="store", dest="edb", default=None,
                      help="Name of target edb")

    parser.add_option("-t", "--template",
                      action="store",dest="cf",default=None,
                      help="Generate default controlfile")

    parser.add_option("-f", "--force",
                      action="store_true",dest="force",default=False,
                      help="To start the process without confirming the domain")

        
    (options, args) = parser.parse_args()

    # Setup logging
    logging.configure(terminal_level=logging.DEBUG)
    log = logging.getLogger(__name__)


    if options.cf!=None:
        generateCf(path.abspath(options.cf))
        log.info("Wrote default controlfile")
        sys.exit()

    if len(args)!=1:
        parser.error("Incorrect number of arguments")
    
    if options.edb ==None:
        parser.error("Need to specify edb using flag -e")
    if options.user ==None:
        parser.error("Need to specify user using flag -u")        

    dmn = Domain()

    if not options.force:
        answer=raw_input("Chosen dbase is: "+dmn.name+",continue(y/n)?")    
        if answer!="y":
            sys.exit("Interrupted by user")
    
    if not dmn.edbExistForUser(options.edb,options.user):
        log.error("Edb "+options.edb+" does not exist for user "+
                     options.user+" in domain "+dmn.name)    
        sys.exit()
    #---Creating edb and rsrc objects------------------
    edb=Edb(dmn,options.user,options.edb)
    rsrc=Rsrc(edb.rsrcPath())

    #Opening controlfile
    #---retrieving data from control file----
    cf=ControlFile(fileName=path.abspath(args[0]))    
    substances=cf.findStringList("substances:")
    outputDir=cf.findExistingPath("outputDir:")
    acIndex=cf.findInt("acIndex:")
    macroFileName=path.abspath(cf.findExistingPath("xrepedbMacro:"))

    fromProj=cf.findString("fromProj:")
    toProj=cf.findString("toProj:")

    try:
        fromProj=transcoord.proj4Dict[fromProj]
    except KeyError:
        log.error("Projection %s not found in proj4Dictin transCoord.py" %fromProj)

    try:
        toProj=transcoord.proj4Dict[toProj]
    except KeyError:
        log.error("Projection %s not found in proj4Dictin transCoord.py" %toProj)

    formats = cf.findStringList("formats:")
    units = cf.findString("units:")

    writeGrids=cf.findBoolean("writeGrids:",optional=True,default=True)
    
    edb_xll=cf.findInt("edb_xll:")
    edb_yll=cf.findInt("edb_yll:")
    edb_ncols=cf.findInt("edb_ncols:")
    edb_nrows=cf.findInt("edb_nrows:")
    edb_cellsize=cf.findFloat("edb_cellsize:")

    if fromProj!=toProj:
        out_xll=cf.findFloat("out_xll:")
        out_yll=cf.findFloat("out_yll:")
        out_ncols=cf.findInt("out_ncols:")
        out_nrows=cf.findInt("out_nrows:")
        out_cellsize=cf.findFloat("out_cellsize:")

    #-----------------------------------------

    #Finds index to search units    
    unitIndex=None
    for key,unit in rsrc.search.iteritems():
        if isinstance(key,int):
            if rsrc.search[key]==units:
                unitIndex=key
                break

    if unitIndex is None:
        log.error("Search units: %s not defined in edb.rsrc" %units)
        sys.exit()
    
    macro = ControlFile(fileName=macroFileName,removeComments=False)
    #preparing export macro
    macro.setParam("general.database:",dmn.name)
    xmin=edb_xll
    xmax=edb_xll+edb_ncols*edb_cellsize
    ymin=edb_yll
    ymax=edb_yll+edb_nrows*edb_cellsize
    macro.setParam("edb.mapopt.bounds:", "%i %i %i %i" %(xmin, xmax, ymin, ymax))
    macro.setParam("edb.user:"******"edb.edb:",edb.name)
    macro.setParam("REGION     :","%i %i %i %i" %(xmin, xmax, ymin, ymax))
    macro.setParam("USER          :"******"EDB           :",edb.name)
    macro.setParam("GRID          :",
                   "%i %i %i %i %i %i" %(edb_xll,edb_yll,edb_ncols,edb_nrows,edb_cellsize,edb_cellsize))
    macro.setParam("edb.unit:",unitIndex)
    macro.setParam("UNIT        :",unitIndex)
#     macro.setParam("NOACTCODE  :",acIndex)
    macro.setParam("NOACTCODE  :",len(rsrc.ac))
    #Get activity code tree
    acTree=codeemistree.CodeEmisTree("Activity codes",units=units)
    acTree.readActivityCodes(rsrc.path,acIndex)

    substDict=dmn.listSubstanceIndices()


    edbRast =  Raster(Xll=edb_xll,Yll=edb_yll,Ncols=edb_ncols,
                      Nrows=edb_nrows,Cellsize=edb_cellsize,
                      Nodata=-9999,init=0)
    if fromProj!=toProj:
        outRastTemplate = Raster(Xll=out_xll,Yll=out_yll,Ncols=out_ncols,
                                 Nrows=out_nrows,Cellsize=out_cellsize,
                                 Nodata=-9999)
    else:
        outRastTemplate=Raster()
        outRastTemplate.assign(edbRast)

            
    for node in acTree.root.getiterator():
        if node.tag=="root" or node.tag=="Record":
            continue

        ac=node.tag
        log.debug("Activity code: "+ac)
        #Finds row index for activity codes in macro
        #Add a row with the current ac
        #If there are many ac already given, these are
        #replaced by the current ac
        macroLines=macro.content.split("\n")
        actCodeInd=None
        geoCodeInd=None
        for lineInd,line in enumerate(macroLines):
            if "NOACTCODE" in line:
                actCodeInd=lineInd
            if "NOGEOCODE" in line:
                geoCodeInd=lineInd

        if len(ac.split('.')) >= rsrc.ac[acIndex-1].depth:
            macroLines=macroLines[:actCodeInd+1]+["none"]*(acIndex-1)+[ac]+["none"]*(len(rsrc.ac)-acIndex)+macroLines[geoCodeInd:]
        else:
            macroLines=macroLines[:actCodeInd+1]+["none"]*(acIndex-1)+[ac+'.']+["none"]*(len(rsrc.ac)-acIndex)+macroLines[geoCodeInd:]
        macro.content="\n".join(macroLines)
        macro.write()
        

        #boolean raster marking where there is data for any of the substances
        if 'CLRTAP' in formats:
            dataMarker = Raster(Xll=out_xll,Yll=out_yll,Ncols=out_ncols,
                                Nrows=out_nrows,Cellsize=out_cellsize,
                                Nodata=-9999,init=0)
        rasterDict={}
        substancesWithData=[]
        for substance in substances:
            log.debug("Substance %s" %substance)
            substanceIndex=substDict[substance]
            macro.setParam("ELEMENT    :",substanceIndex)
            macro.write()
            command="xrepedb -i "+macro.name
            (returnCode,errMsg,outMsg)=utilities.execute(command)
            tmp=outMsg.split()[10:-2]
            tmp.sort()
            if tmp[0] == '0.000000E+00' and tmp[-1] == '0.000000E+00':
                print "ERROR: The field for "+substance+" is empty!"
                continue
#             pdb.set_trace()
            emisRast=string2rast(outMsg,edbRast)
            emisSum=emisRast.sum()

            outRast=Raster()
            outRast.assign(outRastTemplate)

            rec=ET.SubElement(node,"Record")
            rec.attrib["substance"]=substance
            rec.attrib["emission"]=str(emisSum)            
            if emisSum>0 and writeGrids:
                if substance not in substancesWithData:
                    substancesWithData.append(substance)

                if fromProj!=toProj:
                    exportRast = transcoord.transformEmisRaster(emisRast,outRast,fromProj,toProj,tmpDir=dmn.tmpDir())
                else:
                    exportRast=emisRast

                if 'CLRTAP' in formats:
                    dataMarker.data = numpy.where(exportRast.data > 0, 1, dataMarker.data)
                    rasterDict[substance]=exportRast

                categoryDirPath = path.join(outputDir, ac)
                if not path.isdir(categoryDirPath):
                    os.mkdir(categoryDirPath)

                if 'ESRI Ascii grid' in formats:
                    fileName = path.join(categoryDirPath, substance+ ".asc")
                    exportRast.write(fileName)
                    log.debug("Grid for " + substance + "written to outputDir for category: " + ac)
          
        summaryTable=acTree.createTable(writeAll=True)
        summaryTable.sortRows()
        tableFile=open(path.join(outputDir,"summaryTable.txt"),'w')
        summaryTable.write(tableFile)

        if len(rasterDict)>0 and 'CLRTAP' in formats:
            #creating substance header in the same order as the substances in the template
            header = "i\tj\t"
            #headerList=["SO2","NOx","NH3","NMVOC","CO","TSP","PM10","PM25","Pb ","Cd","Hg","As","Cr","Cu","Ni","Se","Zn","Aldrin","Chlordane","Chlordecone","Dieldrin","Endrin","Heptachlor","Hexabromobiphenyl","Mirex","Toxaphene","HCH","DDT","PCB","DIOX","PAH","HCB","PCP","SCCP"]
            for s in substancesWithData:
                header += s + "\t"
            #remove the tab after the last column and add a newline instead
            header = header[: - 1]+ "\n"

            #Creating file for EMEP-data                    
            fileName = "CLRTAP_" + ac + ".txt"
            categoryDirPath = path.join(outputDir, ac)
            if not path.isdir(categoryDirPath):
                os.mkdir(categoryDirPath)
            fid = open(path.join(categoryDirPath, fileName), 'w')
            fid.writelines(header)

            sum=0
            #Writing indexes and data for all non-zero elements                
            for row in range(dataMarker.nrows):
                for col in range(dataMarker.ncols):
                    if dataMarker.data[row, col] > 0:
                        (i, j) = dataMarker.getCentreCoords(row, col)
                        fid.write(str(i) + "\t" + str(j) + "\t")
                        for substWithData in substancesWithData[:-1]:
                            fid.write(str(rasterDict[substWithData].data[row, col]) + "\t")
                            sum+=rasterDict[substWithData].data[row, col]
                        fid.write(str(rasterDict[substancesWithData[-1]].data[row, col]) + "\n")
                        sum+=rasterDict[substancesWithData[-1]].data[row, col]
            fid.close()
            log.info("wrote emissions to clrtap-file: " + path.join(categoryDirPath, fileName))
    log.info("Finished")
コード例 #7
0
ファイル: modifyEdb.py プロジェクト: SMHI-Apl/Airviro-tools
def main():

    actionList = [
        'addSubstance',
        'removeSubstance',
        'mapSubstances',
        'addCodeTree',
        'removeCodeTree',
        'moveCodeTree',
        'extractByGeocode',
        'uniteGrids',
        'scale'
    ]

    # setting up parser
    parser = argparse.ArgumentParser(
        description=__doc__,
        formatter_class=argparse.ArgumentDefaultsHelpFormatter
    )
    utils.add_standard_command_options(parser)

    parser.add_argument("-d", "--doc",
                        action="store_true", dest="doc",
                        help="Prints more detailed documentation and exit")

    parser.add_argument("-u", "--user",
                        action="store", dest="user", default=None,
                        help="Specify user manually")

    parser.add_argument("-e", "--edb",
                        action="store", dest="edb", default=None,
                        help="Name of target edb")

    parser.add_argument("-s", "--substance", action="store",
                        dest="substance",
                        help="Name of substance to add or remove")

    parser.add_argument("--substanceFactorFile", action="store",
                        dest="substanceFactorFile",
                        help="File with substance factors")

    parser.add_argument("--substanceMapping", action="store",
                        dest="substanceMapping",
                        help="File with mapping for substance indices")

    parser.add_argument("--codeMapping", action="store",
                        dest="codeMapping",
                        help="File with mapping from old codes to new")

    parser.add_argument("--edbMapping", action="store",
                        dest="edbMapping",
                        help="File with mapping from gc to edb")

    parser.add_argument("-f", "--force",
                        action="store_true", dest="force", default=False,
                        help="Start the process without confirming the domain")

    parser.add_argument("--factor",
                        action="store", type=float, dest="factor",
                        help="Factor used to scale emissions")

    parser.add_argument(
        "-n", "--names", action="store",
        dest="sourceNames", default=['.*'],
        nargs='*',
        help="Space separated list of regular expressions"
    )

    parser.add_argument(
        "--sourceTypes", action="store",
        dest="sourceTypes",
        nargs='*',
        default=['griddb', 'sources'],
        choices=["griddb", "sources", "subgrpdb", "svehdb", "vehdb"],
        help="Source types to process"
    )

    parser.add_argument(
        "--codeType", action="store",
        dest="codeType", default='ac',
        help="Code type to process"
    )

    parser.add_argument(
        "--codeIndex", action="store",
        dest="codeIndex", default=1, type=int,
        help="Index of code tree to use in processing"
    )

    parser.add_argument(
        "--newIndex", action="store",
        dest="newIndex", default=None,
        help="New index when moving an already defined code tree"
    )

    parser.add_argument(
        "--codeLevel", action="store",
        dest="codeLevel", default=1, type=int,
        help="Level of code tree to use in processing")

    parser.add_argument(
        "--actions", action="store",
        dest="actions",
        nargs='*',
        choices=actionList,
        help="Action list"
    )

    args = parser.parse_args()

    if args.doc:
        print doc
        sys.exit()

#     if len(args) > 0:
#         log.error("Incorrect number of arguments")
#         sys.exit(1)

    if args.edb is None:
        log.error("Need to specify edb")
        sys.exit(1)

    if args.user is None:
        log.error("Need to specify user")
        sys.exit(1)

    dmn = Domain()
    edb = Edb(dmn, args.user, args.edb)
    subdb = Subdb(edb)
    subdb.read()
    rsrc = Rsrc(edb.rsrcPath())

    # Check if edb exist
    if not edb.exists():
        log.error("Edb " + args.edb + " does not exist for user " +
                  args.user + " in domain " + dmn.name)
        sys.exit(1)

    if args.codeType.lower() == "ac":
        codeType = "ACTIVITYCODE"
    elif args.codeType.lower() == "gc":
        codeType = "GEOCODE"
    else:
        parser.error("codeType should be either 'gc' or 'ac'")

    if args.codeIndex < 1:
        raise ValueError("Minimum code index is 1")
    if args.codeIndex > 1 and codeType == "gc":
        raise OSError("Multiple geocode trees not implemented")

    for a in args.actions:
        if a not in actionList:
            parser.error("Unknown action %s" % a)

    log.info("User: "******"Edb: " + args.edb)

    loadGridData = False

    # Set values in argsDict
    argDict = {
        "edb": edb,
        "user": args.user,
        "domain": dmn,
        "grids": [],
        "filters": args.sourceNames,
        "codeType": codeType,
        "codeIndex": args.codeIndex,
        "codeLevel": args.codeLevel,
        "sources": None,
        "griddb": None,
        "subgrpdb": None,
        "roaddb": None,
        "vehdb": None,
        "svehdb": None,
        "rsrc": rsrc
    }

    # ----- Reading sources from edb -------------------------------
    if 'griddb' in args.sourceTypes:
        grids = []
        gridNames = edb.listGrids()
        for name in gridNames:
            grd = egrid.Egrid(edb, name)
            grd.readAsc()
            grids.append(grd)
        argDict["grids"] = grids

    if 'sources' in args.sourceTypes:
        argDict["sources"] = list(ModelReader(SourceStream(edb, mode='r')))

    if 'roaddb' in args.sourceTypes:
        log.error("Not implemented with new roaddb-structure")
        sys.exit(1)

    if 'subgrpdb' in args.sourceTypes:
        subgrpdb = Subgrpdb(edb)
        subgrpdb.read()
        argDict["subgrpdb"] = subgrpdb

    # emfacdbdb = Emfacdb(edb)
    # emfacdb.read()
    # argDict["subgrpdb"] = subgrpdb

    if 'svehdb' in args.sourceTypes:
        svehdb = Svehdb(edb)
        svehdb.read()
        argDict["svehdb"] = svehdb

    # TODO:  add option for vehdb

    # parse additional args
    # -----addSubstance--------------------------------------
    if "addSubstance" in args.actions:
        if args.substance is None:
            parser.error("Action addSubstance needs" +
                         "--substance to be specified")
        if args.substanceFactorFile is None:
            parser.error("Option addSubstance also needs" +
                         "--substanceFactorFile to be specified")
        if args.substance not in subdb.substIndices:
            raise ValueError(
                "Substance %s not in substance list" % args.substance)

        substanceNameFactorDict = parseMapping(
            path.abspath(args.substanceFactorFile),
            valueType=float)

        substanceFactorDict = {}
        # Converts mapping between substance name and factor to
        # mapping between substance index and factor
        for name, factor in substanceNameFactorDict.iteritems():
            if name not in subdb.substIndices:
                raise KeyError(
                    "Substance : %s not found in substance list" % name
                )
            ind = subdb.substIndices[name]
            substanceFactorDict[ind] = factor

        argDict["substanceFactorDict"] = substanceFactorDict
        argDict["substance"] = subdb.substIndices[args.substance]

    # ----removeSubstance------------------------------------
    if "removeSubstance" in args.actions:
        if args.substance is None:
            parser.error("Action removeSubstance needs" +
                         "--substance to be specified")

        if args.substance not in subdb.substIndices:
            log.error("Substance %s not in substance list" % args.substance)
            sys.exit(1)

        argDict["substance"] = subdb.substIndices[args.substance]

    # ----mapSubstances--------------------------------------
    if "mapSubstances" in args.actions:
        if args.substanceMapping is None:
            parser.error("Action mapSubstances needs" +
                         "--substanceMapping to be specified")
        substanceMappingFile = path.abspath(args.substanceMapping)
        substanceMapping = parseMapping(
            substanceMappingFile,
            keyType=int,
            valueType=int
        )
        argDict["substanceMapping"] = substanceMapping

    # ----addCodeTree-----------------------------------------
    if "addCodeTree" in args.actions:
        if args.codeMapping is None:
            parser.error("Action addCodeTree needs" +
                         "--codeMapping to be specified")
        if args.newIndex is None:
            parser.error("Action addCodeTree needs" +
                         "--newIndex to be specified")

        codeMappingFile = path.abspath(args.codeMapping)
        codeMapping = parseMapping(codeMappingFile)
        argDict["codeMapping"] = codeMapping

        argDict["newIndex"] = int(args.newIndex)

    # ----removeCodeTree-------------------------------------
    # Only arguments with default values needed, no validation is needed

    # ----moveCodeTree---------------------------------------
    if "moveCodeTree" in args.actions:
        if args.newIndex is None:
            parser.error("Action moveCodeTree needs" +
                         "--newIndex to be specified")
        try:
            argDict["newIndex"] = int(args.newIndex)
        except ValueError:
            log.error("newIndex must be an integer value")
            sys.exit(1)

    # ----extractByGeocode-----------------------------------
    if "extractByGeocode" in args.actions:
        if args.edbMapping is None:
            parser.error("Action extractByGeocode needs" +
                         "--edbMapping to be specified")
        log.info("Slicing edb by geocode")
        edbMappingFile = path.abspath(args.edbMapping)
        # Geocode has to be int
        edbMapping = parseMapping(edbMappingFile, keyType=int)
        argDict["edbMapping"] = edbMapping

    if 'scale' in args.actions:
        argDict['substance'] = subdb.substIndices[args.substance]
        argDict['factor'] = args.factor
        if args.factor is None:
            log.error("Must specify --factor")
            sys.exit(1)

        if args.substance is None:
            log.error("Must specify --substance")
            sys.exit(1)

    # Processing actions
    for action in args.actions:
        log.info("Running action: %s" % action)
        argDict = eval(action)(argDict)

    # Writes each processed grid to edb
    if loadGridData:
        for grd in argDict["grids"]:
            if not match(grd["NAME"], argDict["filters"]) or grd.hasSubgrp():
                continue
            else:
                log.debug(
                    "Wrote grid %s to edb: %s,  user: %s" % (
                        grd.par["NAME"].val, grd.edb.name, grd.user)
                )
                grd.load()
    else:
        for grd in argDict["grids"]:
            if not match(grd["NAME"], argDict["filters"]) or grd.hasSubgrp():
                continue
            else:
                log.debug(
                    "Wrote .asc-file for grid %s to edb: %s,  user: %s" % (
                        grd.par["NAME"].val, grd.edb.name, grd.user)
                )
                grd.writeAsc(grd.getAscPath())

    # Writes each processed database to edb
    if argDict["sources"] is not None:
        with SourceStream(edb, mode="w") as stream:
            model_writer = ModelWriter(stream)
            for src in argDict["sources"]:
                log.debug('Wrote source %s' % src.NAME)
                model_writer.write(src)

    for dbName in ["roaddb", "subgrpdb", "svehdb", "vehdb"]:
        if argDict[dbName] is not None:
            argDict[dbName].write()
            log.info(
                "Wrote %s to edb: %s, user: %s" % (
                    dbName, edb.name, edb.user)
            )
コード例 #8
0
ファイル: topdown.py プロジェクト: SMHI-Apl/AQM-tools
def main():

   # Parse command line arguments
    parser = argparse.ArgumentParser(description=__doc__)
    utils.add_standard_command_options(parser)


    parser.add_argument("controlfile", metavar='CONTROLFILE',
                       action="store",
                      help="Controlfile for topdown processing")
    
    parser.add_argument("-t", "--template", metavar='TEMPLATEFILE',
                        action="store",dest="cf",default=None,
                        help="Generate default controlfile")

    args = parser.parse_args()


    if args.cf is not None:
        generateCf(args.cf)
        log.info("Wrote default controlfile")
        sys.exit(0)

    log.info("Starting topdown processing")
    # Opening controlfile
    cf = ControlFile(args.controlfile)
    dmn = Domain()

    log.info("Reading topdown table")
    tdTableName = cf.findExistingPath("topDownTable:")
    tdTable = DataTable()
    tdTable.keys.append("Code")
    tdTable.read(tdTableName,delimiter=";")

    log.info("Reading national totals table")
    natTotalTableName = cf.findExistingPath("nationalTotalTable:")
    natTable = DataTable(desc=[{"id": "Code", "type":unicode},
                               {"id": "description", "type":unicode}])
    natTable.keys.append("Code")
    natTable.read(natTotalTableName, units=True, defaultType=str)
    notationKeys = ["NE", "NO", "NA", "IE"]

    
    
    log.debug("Remove notation keys from national totals table")
    for row in natTable.data:
        for i in range(len(row)):
            if row[i] in notationKeys:
                row[i] = None

    log.debug("Convert all emission columns in national totals to float")
    for colId in natTable.listIds():
        if colId not in ["Code","description"]:
            natTable.convertCol(colId,float)

    log.debug("Store units from national totals for each substance in dict")
    natUnits={}
    for col in natTable.desc:
        if col.get("units",None)!=None:
            natUnits[col["id"]]=col["units"]
        
    log.debug("Read remaining data from control file")
    bottomupEdbName = cf.findString("bottomUpEdb:")
    topDownEdbName = cf.findString("topDownEdb:")
    emissionsEdbName = cf.findString("emissionsEdb:")
    userName = cf.findString("user:"******"year:")

    #initialize edb objects
    buEdb = Edb(dmn,userName,bottomupEdbName)
    tdEdb = Edb(dmn,userName,topDownEdbName)
    eEdb = Edb(dmn,userName,emissionsEdbName)
    log.info("Reading/preparing EDB:s")
    
    log.info("Reading subdb")
    subdb = Subdb(eEdb)
    subdb.read()

    log.info("Reading subgrpdb")
    subgrpdb = SubgrpStream(buEdb)
    subgrpdb.read()

    log.info("Reading facilitydb")
    facilityIn = FacilityStream(buEdb)

    log.info("Reading companydb")
    companyIn = CompanyStream(buEdb)
    
    facilityOut = FacilityStream(eEdb,mode="w")
    companyOut = CompanyStream(eEdb,mode="w")

    log.info("Writing company db to result edb")
    companyOut.write(companyIn.read())

    log.info("Writing facility db to result edb")
    facilityOut.write(facilityIn.read())

    if not buEdb.exists():
        log.error("Edb " + buEdb.name + " does not exist for user " + userName +
                  " in domain " + dmn.name)
        sys.exit(1)
    if not tdEdb.exists():
        log.error("Edb " + tdEdb.name + " does not exist for user " + userName +
                  " in domain " + dmn.name)
        sys.exit(1)
    if not eEdb.exists():
        log.error("Edb " + eEdb.name + " does not exist for user " + userName +
                  " in domain " + dmn.name)
        sys.exit(1)

    keys = tdEdb.listGrids()
    msg = "%i keys found in edb: %s" % (len(keys), tdEdb.name)
    log.info(msg)

    # sourcedb from bottom-up edb
    with SourceStream(buEdb, mode='rb') as source_instream:
        source_reader = ModelReader(source_instream)
        bu_sources = list(source_reader)

    log.info(
        "%i point sources found in edb: %s" % (
            len(bu_sources),
            buEdb.name)
    )


    # Empty sourcedb of the result edb
    if cf.findBoolean("emptyEmissionSourcedb:"):
        eEdb.empty_sourcedb()
        e_sources = []
        log.info("Removed point sources from edb: %s" % (eEdb.name))
    else:
        # sourcedb from emission edb (result edb)
        with SourceStream(eEdb, mode='rb') as source_instream:
            source_reader = ModelReader(source_instream)
            e_sources = list(source_reader)

        msg = "%i point sources found in edb: %s" % (len(e_sources), eEdb.name)
        log.info(msg)

    if not path.exists(eEdb.rsrcPath()):
        log.error("No edb.rsrc exists for emission edb")
        sys.exit()
    else:
        rsrc = Rsrc(eEdb.rsrcPath())
    acIndex = cf.findInt("acIndex:")
    codeDepth = rsrc.ac[acIndex-1].depth

    substances = cf.findStringList("substances:")
        
    for subst in substances:
        if subst not in subdb.substIndices:
            log.error("Substance: " + subst + " not in Airviro substance list")
            sys.exit()
    
    # Initialize trace for debug and additional logging
    if cf.findBoolean("trace:") == True:
        log.info("Initializing trace for detailed logging")
        trace = TraceDef(
            active=True,
            substances=cf.findStringList("trace.substances:"),
            logfile=cf.findString("trace.logfile:"),
            regdefgc=cf.findIntList("trace.regdef.gc:",
                                    optional=True,
                                    default=None),
            gcDefRaster=cf.findExistingPath("trace.gcraster:")
        )                               
    else:
        trace = TraceDef(active=False)

    log.info("Initializing result table")
    resTablePath = cf.findString("resTable:")
    resTable = DataTable(desc=[{"id": "Code", "type": unicode}])
    resTable.keys.append("Code")
    for subst in substances:
        resTable.addCol({"id": subst, "type": float, "unit": "%"})
        
    # Create emission grid template (with geocodes)
    log.info("Reading emission grid template")
    eGridTemplatePath = cf.findExistingPath("emisGridTemplatePath:")
    eGridTemplate = Egrid(eEdb,"name")
    if eGridTemplatePath[-4:] == ".asc":
        eGridTemplatePath=eGridTemplatePath[:-4]
    eGridTemplate.readData(eGridTemplatePath)
    eGridTemplate.substances = {}
    eGridTemplate.par["SUBSTANCE"].val = []
    dd = {"key": None,
          "regstat": None,
          "regdef": None,
          "bu_sources": bu_sources,
          "psIndices": [],
          "units": natUnits,
          "rsrc": rsrc,
          "subdb": subdb,
          "trace": trace,
          "subgrpdb": subgrpdb
          }    

    # Process all rows in the topdown table
    for row in tdTable.data:
        code = row[tdTable.colIndex["Code"]]
        active = row[tdTable.colIndex["Active"]]
        statType = row[tdTable.colIndex["Stat_type"]]
        if active == "no":
            continue
        log.info("Code: "+code)
        
        distributed=False

        # Add '-' to the code to reach max length (fix for a GUI bug)
        airviroCode = code
#         while len(airviroCode.split(".")) < codeDepth:
#             airviroCode += ".-"
            
        tdrow = tdTable.data[tdTable.rowIndex([code])]
        nrow = natTable.data[natTable.rowIndex([code])]

        # Create a resTable row to fill with data
        resrow = [None] * resTable.ncols
        resrow[0] = code

        # Check if national totals are non-zero
        nonZero = False
        for val in nrow:
            if val != None:
                if val > 0:
                    nonZero = True
                    break

        # Filter out indices for pointsources with the current ac
        # Also including sources coded with sub-codes
        # This allows to estimate top-down emissions on a higher code-level
        psIndices = []
        for i, ps in enumerate(bu_sources):
            codeMatch = False
            
            for emis in ps.EMISSION:                
                # It is assumed that the first code is used while processing topdown
                ac = emis.ACTCODE[0]
                if ac[-1] == ".":
                    ac=ac[:-1]
#                 if ac[:len(code)] == code:                    
                if ac == code:                    
                    codeMatch = True
                    break

            if not codeMatch:
                for emis in ps.SUBGRP:                
                    # It is assumed that the first code is used while processing topdown
                    ac = emis.ACTCODE[0]
                    if ac[:len(code)] == code:                    
                        codeMatch = True
                        break

            if codeMatch:
                psIndices.append(i)

        dd["psIndices"] = psIndices

        keyName = row[tdTable.colIndex["Key"]]

        #If no distribution key specified and no ps in bottom-up edb - cont.
        if keyName is None and psIndices == []:
            log.debug("No key and no point sources found for code: %s, skipping..." % code)
            resTable.addRow(resrow)
            continue

        if psIndices!=[]:
            msg = "--Found %i pointsources" % len(psIndices)
            log.info(msg)

        if keyName is not None:
            if keyName not in keys:
                log.error("No such key: " + keyName)
                sys.exit()

            msg = "--Key: %s" % keyName
            log.info(msg)
            keyGrid = Egrid(tdEdb, keyName)
            keyGrid.readData()
            log.debug("Read key: " + keyName + " from topdownEdb")

            # create emission grid to store distributed emissions
            eGrid = deepcopy(eGridTemplate)
            eGrid.name = code.replace(".", "_")
            eGrid.par["NAME"].val = code
            eGrid.par["INFO2"].val = "Distribution key: " + keyGrid.par["NAME"].val
            eGrid.par["ACTIVITYCODE"].val = [airviroCode.split(".")]

        regstatName = row[tdTable.colIndex["Regstat"]]
        regdefName = row[tdTable.colIndex["Regdef"]]
                
        if regstatName is not None:
            if regdefName is None:
                log.error("No region definition given for regional statistics: " +
                          regstatName)
                sys.exit(1)
            regstatPath = path.join(dmn.domainPath(), "topdown", "regstat", regstatName)
            regstat = DataTable()
            log.info("regstatPath: "+regstatPath)
            regstat.read(regstatPath, units=True, defaultType=float, delimiter=";")
            if not "Geocode" in regstat.listIds():
                log.error("No Geocode column found in regstat")
                sys.exit(1)
            regstat.convertCol("Geocode", int)
            regstat.keys.append("Geocode")  # Making Geocode the primary key

            # create list of unique geo codes
            geocodes = [row[regstat.colIndex["Geocode"]] for row in regstat.data]
            geocodes = unique(geocodes)


            for colId in regstat.listIds():
                if colId.lower() == "year":
                    rows = []
                    regstat.convertCol(colId, int)
                    # Make it possible to accumulate year
                    regstat.setKeys(regstat.keys + [colId])
            
                    # Calculates the total emission for each geocode
                    # in case there are multiple rows for different fuels etc
                    colsToSum = regstat.listIds()
                    colsToSum.remove(colId)
                    colsToSum.remove("Geocode")
                    for gc in geocodes:
                        # sums all numeric values in colsToSum for
                        # rows matching row id [gc,year]
                        #returns an accumulated row and appends it to rows
                        rowId = regstat.dict2RowId({"Geocode": gc, colId: year})
                        rows.append(regstat.accumulate(rowId, "sum", colsToSum))
                    regstat.data = rows  # replace original rows with accumulated rows
                    regstat.keys.remove(colId)
                    break
                
#             dd["regstat"] = regstat
            regdef = Raster()
            regdefPath = path.join(dmn.domainPath(), "topdown", "regdef", regdefName)
            regdef.read(regdefPath)

            dd["regstat"] = regstat
            dd["regdef"] = regdef
        else:
            dd["regstat"] = None
            dd["regdef"] = None

        if dd["regstat"] is not None and len(bu_sources) > 0 and statType == "fixed":
            log.info("--Regionalizing pointsources")
            dd = regionalizePS(dd, code)

        if keyName is not None and nonZero:
            regionalizedDefault = False
            # Spatial distribution of emissions
            for subst in substances:
                
                sInd = subdb.substIndices[subst]
                toUnit = dd["units"][subst] + "/year"
                ntot = nrow[natTable.colIndex[subst]]                
                pstot = 0
                for i in dd["psIndices"]:
                    source = dd["bu_sources"][i]
                    # TODO: should give reference to subgrps to include emis from them
                    pstot += source.get_emis(
                        sInd,
                        toUnit,
                        eEdb,
                        actcodes=[code]
                    )

                if ntot is None or ntot == 0:
                    if pstot > 0:
                        # 9999 is used as marker for no national total 
                        resrow[resTable.colIndex[subst]] = 9999.0
                        log.warning(
                            "Nattot is 0 but ps tot is: %f %s" % (pstot, toUnit))
                    continue
                
                nrest = ntot - pstot

                resrow[resTable.colIndex[subst]] = 100.0
            
                if abs(nrest / ntot) < 0.0001:
                    nrest = 0
                    log.info(
                        "--Rest is < 0.01 % of national total, rounded to zero"
                    )
                    continue
                elif nrest < 0:
                    log.warning(
                        "--National rest is below zero, %4.2f proc for %s" % (
                            -1 * nrest / ntot * 100,
                             subst)
                    )
                    dd["trace"].write()
#                    continue
                log.info(
                    "---Substance: "+subst+
                    ", rest is: " + str(nrest) +
                    toUnit + " = " + str(nrest / ntot * 100.0) + "%"
                )
                
                try: 
                    keyRast = keyGrid.substances[sInd]
                except KeyError:
                    keyRast = keyGrid.substances[subdb.substIndices["all"]]
                    
                dd["key"] = keyRast
                if dd["regstat"] is not None:
                    if (subst not in regstat.colIndex and 
                        sInd not in keyGrid.substances and not regionalizedDefault):
                        dd = regionalizeKey(dd, subst, code)
                        regionalizedDefault = True                                    
                    else:
                        dd = regionalizeKey(dd, subst, code)
                    
                emisRast = distribute(dd["key"], nrest)
                emisRast = emisRast * unitConvFac(toUnit, "ton/year")
                eGrid.addData(emisRast, dd["subdb"].substIndices[subst])
                distributed = True

        else:
            # resTable is filled
            # In case all national totals are zero but there are ps
            for subst in substances:
                sInd = dd["subdb"].substIndices[subst]
                toUnit = dd["units"][subst] + "/year"
                ntot = nrow[natTable.colIndex[subst]]               
                pstot = 0
                for i in dd["psIndices"]:
                    source = dd["bu_sources"][i]
                    # subgrps are not used!
                    pstot += source.get_emis(sInd, toUnit, buEdb,
                                             actcodes=[code])

                if ntot!=0 and ntot is not None:
                    resrow[resTable.colIndex[subst]] = pstot / ntot * 100.0
                else:
                    resrow[resTable.colIndex[subst]] = -999.0

        if len(dd["psIndices"]) > 0:
            tmp_sources = (bu_sources[i] for i in dd["psIndices"])
            with SourceStream(eEdb, mode='wb') as out_source_stream:
                source_writer = ModelWriter(out_source_stream)
                for source in tmp_sources:
                    source_writer.write(source)
            log.debug("Wrote ps to emission edb")

        if distributed:
            eGrid.load()
            log.debug("Wrote emission grid to emission edb")    

        dd["trace"].write()
        resTable.addRow(resrow)

    resTableFile = open(resTablePath,"w")
    resTable.write(resTableFile)

    log.info("Finished topdown process")
コード例 #9
0
def main():
    #-----------Setting up and unsing option parser-----------------------
    parser=OptionParser(usage= usage, version=version)
    
    parser.add_option("-l", "--loglevel",
                      action="store",dest="loglevel",default=2,
                      help="Sets the loglevel (0-3 where 3=full logging)")
    
    parser.add_option("-t", "--template",
                      action="store",dest="cf",default=None,
                      help="Generate default controlfile")

    parser.add_option("-f", "--force",
                      action="store_true",dest="force",default=False,
                      help="To start the process without confirming the domain")

    parser.add_option("-n", "--nproc",
                      action="store",dest="nproc",default=1,
                      help="Number of gifmap processes to run at a time")
        
    (options, args) = parser.parse_args()

    #------------Setting up logging capabilities -----------
    rootLogger=logger.RootLogger(int(options.loglevel))
    log=rootLogger.getLogger(sys.argv[0])
    #logger=basicLogger(int(options.loglevel),sys.argv[0])

    if options.cf!=None:
        controlfile.generateCf(path.abspath(options.cf),controlFileTemplate)
        print("Wrote default controlfile")
        sys.exit()

    if len(args)!=1:
        parser.error("Incorrect number of arguments")
    
    domainName=os.environ["AVDBNAME"]
    dmn = domain.Domain(domainName)        

    if not options.force:
        answer=raw_input("Chosen dbase is: "+domainName+",continue(y/n)?")    
        if answer=="y":
            dmn=domain.Domain()            
        else:
            sys.exit("Interrupted by user")
            
    #Opening controlfile
    #---retrieving data from control file----
    cf=controlfile.ControlFile(fileName=path.abspath(args[0]))

    #Get search parameters from control file
    substances=cf.findStringList("substances:")
    outputDir=cf.findExistingPath("outputDir:")
    baseSearches=cf.findStringList("searches:")
    fromProj=cf.findString("fromProj:")
    toProj=cf.findString("toProj:")
    resample = cf.findBoolean("resample:")
    dmn=domain.Domain(os.environ.get('AVDBNAME'))
    substDict=dmn.listSubstanceIndices()

    if resample:
        #Get proj4 projection definitions from aliases
        try:
            fromProj=transcoord.proj4Dict[fromProj]
        except KeyError:
            print("Projection %s not found in proj4Dictin transCoord.py" %fromProj)
        try:
            toProj=transcoord.proj4Dict[toProj]
        except KeyError:
            print("Projection %s not found in proj4Dictin transCoord.py" %toProj)       

        #Get output grind parameters from controlfile
        out_xll=cf.findFloat("out_xll:")
        out_yll=cf.findFloat("out_yll:")
        out_ncols=cf.findInt("out_ncols:")
        out_nrows=cf.findInt("out_nrows:")
        out_cellsize=cf.findFloat("out_cellsize:")

    #Build list with search definitions
    searches=[]
    for search_id in baseSearches:
        prefix=cf.findString("search."+search_id+".prefix:")
        macroPath=cf.findString("search."+search_id+".macro:")
        starttime=cf.findString("search."+search_id+".starttime:")
        endtime=cf.findString("search."+search_id+".endtime:")
        parVals = cf.findParam("search."+search_id+".par.",findAll=True)
        parDict={}
        for p in parVals:
            ind=p.index(":")
            par = p[:ind+1]
            val=p[ind+1:].strip()
            parDict[par]=val

        alobVals = cf.findParam("search."+search_id+".alob.",findAll=True)
        alobDict={}
        for a in alobVals:
            ind=a.index(":")
            key = a[:ind]
            val=a[ind+1:].strip()
            alobDict[par]=val
        
        macro = controlfile.ControlFile(fileName=macroPath,removeComments=False)
        unitIndex=macro.findInt("edb.unit:")
        edbName=macro.findString("edb.edb:")
        userName=macro.findString("edb.user:"******"FROM       :",starttime)
        macro.setParam("TO         :",endtime)
        for key,val in parDict.iteritems():
            macro.setParam(key,val)
        for key,val in alobDict.iteritems():
            macro.setParam(key,"R"+val,addSpace=False)
            

        edb=Edb(dmn.name,userName,edbName)
        rsrc=Rsrc(edb.rsrcPath())
        unitName=rsrc.search[unitIndex]

        for substance in substances:
            filename=path.join(outputDir,prefix+"_"+substance.replace(" ","_")+".asc")
            searches.append(
                {'id':search_id,
                 'macro':macro,
                 'prefix':prefix,
                 'substance':substance,
                 'unit':unitName,
                 'filename':filename
                 }
                )

    if resample:
        summaryTable=datatable.DataTable(desc=[{"id":'search_id',"type":unicode},
                                               {"id":'substance',"type":unicode},
                                               {"id":'macro',"type":unicode},
                                               {"id":'filename',"type":unicode},
                                               {"id":'sum',"type":float},
                                               {"id":'proj_sum',"type":float},
                                               {"id":'unit',"type":unicode}],
                                         keys=["search_id","substance"])
    else:
        summaryTable=datatable.DataTable(desc=[{"id":'search_id',"type":unicode},
                                               {"id":'substance',"type":unicode},
                                               {"id":'macro',"type":unicode},
                                               {"id":'filename',"type":unicode},
                                               {"id":'sum',"type":float},
                                               {"id":'unit',"type":unicode}],
                                         keys=["search_id","substance"])

    #Inititalising parallell run
    nproc=int(options.nproc) #numer of processes to run in parallell
    running_proc=0           #counter for number of running processes
    pids={}                  #dictionary to store process output and info
    todo=len(searches)       #list of processes to be run
    searchInd=0              #index of the process

    def countRunning(pids):
        """Count the number of running processes"""
        for pid, props in pids.iteritems():
            props['ret_val']=props['proc'].poll()
        return len([pid for pid in pids if pids[pid]['ret_val'] is None])

    while todo+running_proc>0:
        #loop until all processes have finished
        if running_proc==nproc or todo==0:
            print("Running gifmap...")

            #When nproc processes are started, the program waits here
            #until one is finished before adding a new one
            while countRunning(pids)==running_proc:
                time.sleep(5)

            #mark first found finished process as done
            for pid,props in pids.iteritems():
                if not props['ret_val'] is None and not props['done']:
                    props['done']=True
                    break

            command=props["cmd"]
            if props['ret_val']!=0:
                errMsg=pids[pid]["proc"].stderr.read()
                print("Error while running command: %s\n%s" %(pids[pid],errMsg))
                sys.exit(1)

           
            print("Finished search %s" %props['search_id'])
            #Find search in search list
            running_proc-=1
            for f_search in searches:
                if f_search["id"]==props['search_id'] and f_search['substance']==props['substance']:
                    break

            f_search['done']=True
            try:
                f_search['res']=res2rast(f_search['filename'])
                #Store original result from search
                f_search['sum']=f_search['res'].sum()

            except:
                f_search['res']=None

                output=pids[pid]["proc"].stdout.read()
                f_search['sum']=re.compile("#EMIS (\S*).*").search(output).group(1)
                print "Could not extract result raster from %s" %f_search['filename']
                print "Uses total from gifmap log"
                

            if resample:
                outRast = raster.Raster(Xll=out_xll,Yll=out_yll,Ncols=out_ncols,
                                        Nrows=out_nrows,Cellsize=out_cellsize,
                                        Nodata=-9999)

                #Run post-processing of result for finsished processes
                if f_search['sum']>0:
                    print("Projecting result to target CRS")
                    f_search['res'] = transcoord.transformEmisRaster(f_search['res'],outRast,
                                                                     fromProj,toProj,
                                                                     tmpDir=dmn.tmpDir())
                    #Store projected result from search
                    f_search['proj_sum']=f_search['res'].sum()
                else:
                    f_search['proj_sum']=0
                   
            #Write result to file
            if f_search['res'] is not None:
                f_search['res'].write(f_search['filename'])

            #Add total emission to summary table
            if resample:
                summaryTable.addRow(
                    [f_search['id'],
                     f_search['substance'],
                     f_search['macro'].name,
                     f_search['filename'],
                     f_search['sum'],
                     f_search['proj_sum'],
                     f_search['unit']])
            else:
                summaryTable.addRow(
                    [f_search['id'],
                     f_search['substance'],
                     f_search['macro'].name,
                     f_search['filename'],
                     f_search['sum'],
                     f_search['unit']])

        #Add another search process
        elif todo>0:
            c_search=searches[searchInd]
            substance=c_search["substance"]
            substanceIndex=substDict[substance]
            c_search['macro'].setParam("ELEMENT    :",substanceIndex)
            c_search['macro'].write()
            command="gifmap -T -i "+c_search['macro'].name+" -o "+c_search['filename']           
            p=subprocess.Popen(command,stderr=subprocess.PIPE,stdout=subprocess.PIPE,shell=True)

            #Add info and handles to process dictionary
            pids[p.pid]={"proc":p,
                         "search_id":c_search['id'],
                         "substance":c_search['substance'],
                         "cmd":command,
                         'ret_val':None,
                         'done':False}
            print(
                "Started search %s, substance %s" %(
                    c_search['id'],c_search['substance']))

            running_proc+=1
            todo-=1
            searchInd+=1
            
            #Each process needs some time to read
            #the macro before it is changed
            time.sleep(10)
            
    #summaryTable.sortRows()
    tableFile=open(path.join(outputDir,"summaryTable.txt"),'w')
    summaryTable.write(tableFile)    
    print("Finished")