예제 #1
0
def main():
    # -----------Setting up and unsing option parser-----------------------
    parser = OptionParser(usage=usage, version=version)

    parser.add_option("-d", "--doc",
                      action="store_true", dest="doc",
                      help="Prints more detailed documentation and exit")

    parser.add_option("-v",
                      action="store_const", const=logging.DEBUG,
                      dest="loglevel", default=get_loglevel(),
                      help="Produce verbose output")

    parser.add_option("-o", "--output",
                      action="store", dest="outfileName", default=None,
                      help="Output file")

    parser.add_option("-i", "--i",
                      action="store", dest="infile",
                      help="Raster to be regionalized")

    parser.add_option("-r", "--regdef",
                      action="store", dest="regdef",
                      help="Raster defining the regions found " +
                      "in statistics table")

    parser.add_option("-s", "--stat",
                      action="store", dest="stat",
                      help="Tab-separated statistics table, " +
                      "with 1st column representing geocode and value " +
                      "representing region sum")

    parser.add_option("-c", "--colname",
                      action="store", dest="colname",
                      help="Header of column in stat table to fetch " +
                      "region sums from")

    (options, args) = parser.parse_args()

    # ------------Setting up logging capabilities -----------
    rootLogger = logger.RootLogger(int(options.loglevel))
    global log
    log = rootLogger.getLogger(sys.argv[0])

    # ------------Process and validate options---------------
    if options.doc:
        print __doc__
        sys.exit()

    if len(args) > 0:
        parser.error("Incorrect number of arguments")

    regdef = Raster()
    if options.regdef is not None:
        regdef.read(options.regdef)
    else:
        log.error("No region definition raster specified")
        sys.exit(1)

    key = Raster()
    if options.infile is not None:
        key.read(options.infile)
    else:
        log.info("No initial distribution raster given, using " +
                 "homogenious distribution")
        key.assign(regdef)
        key.data = np.where(regdef.data != regdef.nodata, 1, regdef.nodata)

    if options.stat is None:
        log.error("No statistics table specified")
        sys.exit(1)

    if (regdef.xll != key.xll or regdef.yll != key.yll):
        log.error("The raster domain size is differs between key raster " +
                  "and region raster")

    stat = DataTable()
    stat.read(options.stat, defaultType=float)
    print stat.desc
    stat.convertCol(stat.desc[0]["id"], int)
    print stat.desc
    # Set first column as unique id for rows
    stat.setKeys([stat.desc[0]["id"]])

    # Remove nodata in rasters
    key.nodataToZero()
    regdef.nodataToZero()

    log.info("Consistency och completeness check for codes " +
             "in regdef and statistics")
    # Create list of codes in raster
    regdefCodes = regdef.unique()
    regdefCodes = map(int, regdefCodes)

    if 0.0 in regdefCodes:
        regdefCodes.remove(0.0)

    statRegCodes = [row[0] for row in stat.data]

    if options.colname is not None:
        try:
            col = stat.colIndex[options.colname]
        except KeyError:
            log.error("No column named " +
                      "%s found in statistics table" % options.colname)
            sys.exit(1)
    else:
        col = 1
    colId = stat.desc[col]["id"]

    errFound = False
    for code in statRegCodes:
        # Assuring that the regional codes are present in regdef
        if code not in regdefCodes:
            log.error("Input Error: code:" + str(int(code)) +
                     "in reg totals is not represented in regional raster")
            errFound = True

    # Assuring that the regional raster IDs are present
    # in the regional statistics
    for code in regdefCodes:
        if code not in statRegCodes:
            log.error("Input Error: ID:" + str(code) +
                     " in region raster is not represented in " +
                      "regional totals!")
            errFound = True

    if errFound:
        sys.exit(1)

    # For all regions, calculate the regional sum,
    # set the key values to key/regSum
    res = np.zeros(key.data.shape)
    log.info("Regionalizing key raster")
    for code in regdefCodes:
        emis = stat.lookup(colId, code)
        log.debug("Statistics for reg %i: %f" % (code, emis))
        mask = np.array(regdef.data == code)
        regKey = mask * key.data
        regSum = np.sum(np.sum(regKey))
        if regSum > 0:
            res = res + emis * regKey / regSum
        else:
            log.warning(
                "Distribution key is zero for geocode" +
                " %i, using homogenuous distribution for this region" % code)
            res = res + emis * mask / np.sum(np.sum(mask))

    key.data = res

    # resultRast=(totFractionRast*key)*regionalTotals.regSum(substance)
    key.write(options.outfileName)
    log.info("Result with sum: %f written" % res.sum())
예제 #2
0
def main():
    # -----------Setting up and unsing option parser-----------------------
    parser = OptionParser(usage=usage, version=version)

    parser.add_option("-d",
                      "--doc",
                      action="store_true",
                      dest="doc",
                      help="Prints more detailed documentation and exit")

    parser.add_option("-v",
                      action="store_const",
                      const=logging.DEBUG,
                      dest="loglevel",
                      default=get_loglevel(),
                      help="Produce verbose output")

    parser.add_option("-o",
                      "--output",
                      action="store",
                      dest="outfileName",
                      default=None,
                      help="Output file")

    parser.add_option("-i",
                      "--i",
                      action="store",
                      dest="infile",
                      help="Raster to be regionalized")

    parser.add_option("-r",
                      "--regdef",
                      action="store",
                      dest="regdef",
                      help="Raster defining the regions found " +
                      "in statistics table")

    parser.add_option("-s",
                      "--stat",
                      action="store",
                      dest="stat",
                      help="Tab-separated statistics table, " +
                      "with 1st column representing geocode and value " +
                      "representing region sum")

    parser.add_option("-c",
                      "--colname",
                      action="store",
                      dest="colname",
                      help="Header of column in stat table to fetch " +
                      "region sums from")

    (options, args) = parser.parse_args()

    # ------------Setting up logging capabilities -----------
    rootLogger = logger.RootLogger(int(options.loglevel))
    global log
    log = rootLogger.getLogger(sys.argv[0])

    # ------------Process and validate options---------------
    if options.doc:
        print __doc__
        sys.exit()

    if len(args) > 0:
        parser.error("Incorrect number of arguments")

    regdef = Raster()
    if options.regdef is not None:
        regdef.read(options.regdef)
    else:
        log.error("No region definition raster specified")
        sys.exit(1)

    key = Raster()
    if options.infile is not None:
        key.read(options.infile)
    else:
        log.info("No initial distribution raster given, using " +
                 "homogenious distribution")
        key.assign(regdef)
        key.data = np.where(regdef.data != regdef.nodata, 1, regdef.nodata)

    if options.stat is None:
        log.error("No statistics table specified")
        sys.exit(1)

    if (regdef.xll != key.xll or regdef.yll != key.yll):
        log.error("The raster domain size is differs between key raster " +
                  "and region raster")

    stat = DataTable()
    stat.read(options.stat, defaultType=float)
    print stat.desc
    stat.convertCol(stat.desc[0]["id"], int)
    print stat.desc
    # Set first column as unique id for rows
    stat.setKeys([stat.desc[0]["id"]])

    # Remove nodata in rasters
    key.nodataToZero()
    regdef.nodataToZero()

    log.info("Consistency och completeness check for codes " +
             "in regdef and statistics")
    # Create list of codes in raster
    regdefCodes = regdef.unique()
    regdefCodes = map(int, regdefCodes)

    if 0.0 in regdefCodes:
        regdefCodes.remove(0.0)

    statRegCodes = [row[0] for row in stat.data]

    if options.colname is not None:
        try:
            col = stat.colIndex[options.colname]
        except KeyError:
            log.error("No column named " +
                      "%s found in statistics table" % options.colname)
            sys.exit(1)
    else:
        col = 1
    colId = stat.desc[col]["id"]

    errFound = False
    for code in statRegCodes:
        # Assuring that the regional codes are present in regdef
        if code not in regdefCodes:
            log.error("Input Error: code:" + str(int(code)) +
                      "in reg totals is not represented in regional raster")
            errFound = True

    # Assuring that the regional raster IDs are present
    # in the regional statistics
    for code in regdefCodes:
        if code not in statRegCodes:
            log.error("Input Error: ID:" + str(code) +
                      " in region raster is not represented in " +
                      "regional totals!")
            errFound = True

    if errFound:
        sys.exit(1)

    # For all regions, calculate the regional sum,
    # set the key values to key/regSum
    res = np.zeros(key.data.shape)
    log.info("Regionalizing key raster")
    for code in regdefCodes:
        emis = stat.lookup(colId, code)
        log.debug("Statistics for reg %i: %f" % (code, emis))
        mask = np.array(regdef.data == code)
        regKey = mask * key.data
        regSum = np.sum(np.sum(regKey))
        if regSum > 0:
            res = res + emis * regKey / regSum
        else:
            log.warning("Distribution key is zero for geocode" +
                        " %i, using homogenuous distribution for this region" %
                        code)
            res = res + emis * mask / np.sum(np.sum(mask))

    key.data = res

    # resultRast=(totFractionRast*key)*regionalTotals.regSum(substance)
    key.write(options.outfileName)
    log.info("Result with sum: %f written" % res.sum())
예제 #3
0
def main():

   # Parse command line arguments
    parser = argparse.ArgumentParser(description=__doc__)
    utils.add_standard_command_options(parser)


    parser.add_argument("controlfile", metavar='CONTROLFILE',
                       action="store",
                      help="Controlfile for topdown processing")
    
    parser.add_argument("-t", "--template", metavar='TEMPLATEFILE',
                        action="store",dest="cf",default=None,
                        help="Generate default controlfile")

    args = parser.parse_args()


    if args.cf is not None:
        generateCf(args.cf)
        log.info("Wrote default controlfile")
        sys.exit(0)

    log.info("Starting topdown processing")
    # Opening controlfile
    cf = ControlFile(args.controlfile)
    dmn = Domain()

    log.info("Reading topdown table")
    tdTableName = cf.findExistingPath("topDownTable:")
    tdTable = DataTable()
    tdTable.keys.append("Code")
    tdTable.read(tdTableName,delimiter=";")

    log.info("Reading national totals table")
    natTotalTableName = cf.findExistingPath("nationalTotalTable:")
    natTable = DataTable(desc=[{"id": "Code", "type":unicode},
                               {"id": "description", "type":unicode}])
    natTable.keys.append("Code")
    natTable.read(natTotalTableName, units=True, defaultType=str)
    notationKeys = ["NE", "NO", "NA", "IE"]

    
    
    log.debug("Remove notation keys from national totals table")
    for row in natTable.data:
        for i in range(len(row)):
            if row[i] in notationKeys:
                row[i] = None

    log.debug("Convert all emission columns in national totals to float")
    for colId in natTable.listIds():
        if colId not in ["Code","description"]:
            natTable.convertCol(colId,float)

    log.debug("Store units from national totals for each substance in dict")
    natUnits={}
    for col in natTable.desc:
        if col.get("units",None)!=None:
            natUnits[col["id"]]=col["units"]
        
    log.debug("Read remaining data from control file")
    bottomupEdbName = cf.findString("bottomUpEdb:")
    topDownEdbName = cf.findString("topDownEdb:")
    emissionsEdbName = cf.findString("emissionsEdb:")
    userName = cf.findString("user:"******"year:")

    #initialize edb objects
    buEdb = Edb(dmn,userName,bottomupEdbName)
    tdEdb = Edb(dmn,userName,topDownEdbName)
    eEdb = Edb(dmn,userName,emissionsEdbName)
    log.info("Reading/preparing EDB:s")
    
    log.info("Reading subdb")
    subdb = Subdb(eEdb)
    subdb.read()

    log.info("Reading subgrpdb")
    subgrpdb = SubgrpStream(buEdb)
    subgrpdb.read()

    log.info("Reading facilitydb")
    facilityIn = FacilityStream(buEdb)

    log.info("Reading companydb")
    companyIn = CompanyStream(buEdb)
    
    facilityOut = FacilityStream(eEdb,mode="w")
    companyOut = CompanyStream(eEdb,mode="w")

    log.info("Writing company db to result edb")
    companyOut.write(companyIn.read())

    log.info("Writing facility db to result edb")
    facilityOut.write(facilityIn.read())

    if not buEdb.exists():
        log.error("Edb " + buEdb.name + " does not exist for user " + userName +
                  " in domain " + dmn.name)
        sys.exit(1)
    if not tdEdb.exists():
        log.error("Edb " + tdEdb.name + " does not exist for user " + userName +
                  " in domain " + dmn.name)
        sys.exit(1)
    if not eEdb.exists():
        log.error("Edb " + eEdb.name + " does not exist for user " + userName +
                  " in domain " + dmn.name)
        sys.exit(1)

    keys = tdEdb.listGrids()
    msg = "%i keys found in edb: %s" % (len(keys), tdEdb.name)
    log.info(msg)

    # sourcedb from bottom-up edb
    with SourceStream(buEdb, mode='rb') as source_instream:
        source_reader = ModelReader(source_instream)
        bu_sources = list(source_reader)

    log.info(
        "%i point sources found in edb: %s" % (
            len(bu_sources),
            buEdb.name)
    )


    # Empty sourcedb of the result edb
    if cf.findBoolean("emptyEmissionSourcedb:"):
        eEdb.empty_sourcedb()
        e_sources = []
        log.info("Removed point sources from edb: %s" % (eEdb.name))
    else:
        # sourcedb from emission edb (result edb)
        with SourceStream(eEdb, mode='rb') as source_instream:
            source_reader = ModelReader(source_instream)
            e_sources = list(source_reader)

        msg = "%i point sources found in edb: %s" % (len(e_sources), eEdb.name)
        log.info(msg)

    if not path.exists(eEdb.rsrcPath()):
        log.error("No edb.rsrc exists for emission edb")
        sys.exit()
    else:
        rsrc = Rsrc(eEdb.rsrcPath())
    acIndex = cf.findInt("acIndex:")
    codeDepth = rsrc.ac[acIndex-1].depth

    substances = cf.findStringList("substances:")
        
    for subst in substances:
        if subst not in subdb.substIndices:
            log.error("Substance: " + subst + " not in Airviro substance list")
            sys.exit()
    
    # Initialize trace for debug and additional logging
    if cf.findBoolean("trace:") == True:
        log.info("Initializing trace for detailed logging")
        trace = TraceDef(
            active=True,
            substances=cf.findStringList("trace.substances:"),
            logfile=cf.findString("trace.logfile:"),
            regdefgc=cf.findIntList("trace.regdef.gc:",
                                    optional=True,
                                    default=None),
            gcDefRaster=cf.findExistingPath("trace.gcraster:")
        )                               
    else:
        trace = TraceDef(active=False)

    log.info("Initializing result table")
    resTablePath = cf.findString("resTable:")
    resTable = DataTable(desc=[{"id": "Code", "type": unicode}])
    resTable.keys.append("Code")
    for subst in substances:
        resTable.addCol({"id": subst, "type": float, "unit": "%"})
        
    # Create emission grid template (with geocodes)
    log.info("Reading emission grid template")
    eGridTemplatePath = cf.findExistingPath("emisGridTemplatePath:")
    eGridTemplate = Egrid(eEdb,"name")
    if eGridTemplatePath[-4:] == ".asc":
        eGridTemplatePath=eGridTemplatePath[:-4]
    eGridTemplate.readData(eGridTemplatePath)
    eGridTemplate.substances = {}
    eGridTemplate.par["SUBSTANCE"].val = []
    dd = {"key": None,
          "regstat": None,
          "regdef": None,
          "bu_sources": bu_sources,
          "psIndices": [],
          "units": natUnits,
          "rsrc": rsrc,
          "subdb": subdb,
          "trace": trace,
          "subgrpdb": subgrpdb
          }    

    # Process all rows in the topdown table
    for row in tdTable.data:
        code = row[tdTable.colIndex["Code"]]
        active = row[tdTable.colIndex["Active"]]
        statType = row[tdTable.colIndex["Stat_type"]]
        if active == "no":
            continue
        log.info("Code: "+code)
        
        distributed=False

        # Add '-' to the code to reach max length (fix for a GUI bug)
        airviroCode = code
#         while len(airviroCode.split(".")) < codeDepth:
#             airviroCode += ".-"
            
        tdrow = tdTable.data[tdTable.rowIndex([code])]
        nrow = natTable.data[natTable.rowIndex([code])]

        # Create a resTable row to fill with data
        resrow = [None] * resTable.ncols
        resrow[0] = code

        # Check if national totals are non-zero
        nonZero = False
        for val in nrow:
            if val != None:
                if val > 0:
                    nonZero = True
                    break

        # Filter out indices for pointsources with the current ac
        # Also including sources coded with sub-codes
        # This allows to estimate top-down emissions on a higher code-level
        psIndices = []
        for i, ps in enumerate(bu_sources):
            codeMatch = False
            
            for emis in ps.EMISSION:                
                # It is assumed that the first code is used while processing topdown
                ac = emis.ACTCODE[0]
                if ac[-1] == ".":
                    ac=ac[:-1]
#                 if ac[:len(code)] == code:                    
                if ac == code:                    
                    codeMatch = True
                    break

            if not codeMatch:
                for emis in ps.SUBGRP:                
                    # It is assumed that the first code is used while processing topdown
                    ac = emis.ACTCODE[0]
                    if ac[:len(code)] == code:                    
                        codeMatch = True
                        break

            if codeMatch:
                psIndices.append(i)

        dd["psIndices"] = psIndices

        keyName = row[tdTable.colIndex["Key"]]

        #If no distribution key specified and no ps in bottom-up edb - cont.
        if keyName is None and psIndices == []:
            log.debug("No key and no point sources found for code: %s, skipping..." % code)
            resTable.addRow(resrow)
            continue

        if psIndices!=[]:
            msg = "--Found %i pointsources" % len(psIndices)
            log.info(msg)

        if keyName is not None:
            if keyName not in keys:
                log.error("No such key: " + keyName)
                sys.exit()

            msg = "--Key: %s" % keyName
            log.info(msg)
            keyGrid = Egrid(tdEdb, keyName)
            keyGrid.readData()
            log.debug("Read key: " + keyName + " from topdownEdb")

            # create emission grid to store distributed emissions
            eGrid = deepcopy(eGridTemplate)
            eGrid.name = code.replace(".", "_")
            eGrid.par["NAME"].val = code
            eGrid.par["INFO2"].val = "Distribution key: " + keyGrid.par["NAME"].val
            eGrid.par["ACTIVITYCODE"].val = [airviroCode.split(".")]

        regstatName = row[tdTable.colIndex["Regstat"]]
        regdefName = row[tdTable.colIndex["Regdef"]]
                
        if regstatName is not None:
            if regdefName is None:
                log.error("No region definition given for regional statistics: " +
                          regstatName)
                sys.exit(1)
            regstatPath = path.join(dmn.domainPath(), "topdown", "regstat", regstatName)
            regstat = DataTable()
            log.info("regstatPath: "+regstatPath)
            regstat.read(regstatPath, units=True, defaultType=float, delimiter=";")
            if not "Geocode" in regstat.listIds():
                log.error("No Geocode column found in regstat")
                sys.exit(1)
            regstat.convertCol("Geocode", int)
            regstat.keys.append("Geocode")  # Making Geocode the primary key

            # create list of unique geo codes
            geocodes = [row[regstat.colIndex["Geocode"]] for row in regstat.data]
            geocodes = unique(geocodes)


            for colId in regstat.listIds():
                if colId.lower() == "year":
                    rows = []
                    regstat.convertCol(colId, int)
                    # Make it possible to accumulate year
                    regstat.setKeys(regstat.keys + [colId])
            
                    # Calculates the total emission for each geocode
                    # in case there are multiple rows for different fuels etc
                    colsToSum = regstat.listIds()
                    colsToSum.remove(colId)
                    colsToSum.remove("Geocode")
                    for gc in geocodes:
                        # sums all numeric values in colsToSum for
                        # rows matching row id [gc,year]
                        #returns an accumulated row and appends it to rows
                        rowId = regstat.dict2RowId({"Geocode": gc, colId: year})
                        rows.append(regstat.accumulate(rowId, "sum", colsToSum))
                    regstat.data = rows  # replace original rows with accumulated rows
                    regstat.keys.remove(colId)
                    break
                
#             dd["regstat"] = regstat
            regdef = Raster()
            regdefPath = path.join(dmn.domainPath(), "topdown", "regdef", regdefName)
            regdef.read(regdefPath)

            dd["regstat"] = regstat
            dd["regdef"] = regdef
        else:
            dd["regstat"] = None
            dd["regdef"] = None

        if dd["regstat"] is not None and len(bu_sources) > 0 and statType == "fixed":
            log.info("--Regionalizing pointsources")
            dd = regionalizePS(dd, code)

        if keyName is not None and nonZero:
            regionalizedDefault = False
            # Spatial distribution of emissions
            for subst in substances:
                
                sInd = subdb.substIndices[subst]
                toUnit = dd["units"][subst] + "/year"
                ntot = nrow[natTable.colIndex[subst]]                
                pstot = 0
                for i in dd["psIndices"]:
                    source = dd["bu_sources"][i]
                    # TODO: should give reference to subgrps to include emis from them
                    pstot += source.get_emis(
                        sInd,
                        toUnit,
                        eEdb,
                        actcodes=[code]
                    )

                if ntot is None or ntot == 0:
                    if pstot > 0:
                        # 9999 is used as marker for no national total 
                        resrow[resTable.colIndex[subst]] = 9999.0
                        log.warning(
                            "Nattot is 0 but ps tot is: %f %s" % (pstot, toUnit))
                    continue
                
                nrest = ntot - pstot

                resrow[resTable.colIndex[subst]] = 100.0
            
                if abs(nrest / ntot) < 0.0001:
                    nrest = 0
                    log.info(
                        "--Rest is < 0.01 % of national total, rounded to zero"
                    )
                    continue
                elif nrest < 0:
                    log.warning(
                        "--National rest is below zero, %4.2f proc for %s" % (
                            -1 * nrest / ntot * 100,
                             subst)
                    )
                    dd["trace"].write()
#                    continue
                log.info(
                    "---Substance: "+subst+
                    ", rest is: " + str(nrest) +
                    toUnit + " = " + str(nrest / ntot * 100.0) + "%"
                )
                
                try: 
                    keyRast = keyGrid.substances[sInd]
                except KeyError:
                    keyRast = keyGrid.substances[subdb.substIndices["all"]]
                    
                dd["key"] = keyRast
                if dd["regstat"] is not None:
                    if (subst not in regstat.colIndex and 
                        sInd not in keyGrid.substances and not regionalizedDefault):
                        dd = regionalizeKey(dd, subst, code)
                        regionalizedDefault = True                                    
                    else:
                        dd = regionalizeKey(dd, subst, code)
                    
                emisRast = distribute(dd["key"], nrest)
                emisRast = emisRast * unitConvFac(toUnit, "ton/year")
                eGrid.addData(emisRast, dd["subdb"].substIndices[subst])
                distributed = True

        else:
            # resTable is filled
            # In case all national totals are zero but there are ps
            for subst in substances:
                sInd = dd["subdb"].substIndices[subst]
                toUnit = dd["units"][subst] + "/year"
                ntot = nrow[natTable.colIndex[subst]]               
                pstot = 0
                for i in dd["psIndices"]:
                    source = dd["bu_sources"][i]
                    # subgrps are not used!
                    pstot += source.get_emis(sInd, toUnit, buEdb,
                                             actcodes=[code])

                if ntot!=0 and ntot is not None:
                    resrow[resTable.colIndex[subst]] = pstot / ntot * 100.0
                else:
                    resrow[resTable.colIndex[subst]] = -999.0

        if len(dd["psIndices"]) > 0:
            tmp_sources = (bu_sources[i] for i in dd["psIndices"])
            with SourceStream(eEdb, mode='wb') as out_source_stream:
                source_writer = ModelWriter(out_source_stream)
                for source in tmp_sources:
                    source_writer.write(source)
            log.debug("Wrote ps to emission edb")

        if distributed:
            eGrid.load()
            log.debug("Wrote emission grid to emission edb")    

        dd["trace"].write()
        resTable.addRow(resrow)

    resTableFile = open(resTablePath,"w")
    resTable.write(resTableFile)

    log.info("Finished topdown process")