Exemplo n.º 1
0
def main():

    # Parse command line arguments
    parser = argparse.ArgumentParser(description=__doc__)
    utils.add_standard_command_options(parser)

    parser.add_argument(
        "--source_srs",
        action="store", dest="source_srs", required=True,
        help="Input SRS"
    )

    parser.add_argument(
        "--target_srs",
        action="store", dest="target_srs", required=True,
        help="Output SRS"
    )

    parser.add_argument(
        "-o",
        action="store",
        dest="outdir",
        help='Output directory',
        required=True
    )

    parser.add_argument(
        "-i",
        action='store',
        dest='infile',
        help='Input result data file, default is stdin'
    )

    parser.add_argument(
        "-t",
        action='store',
        dest='template',
        help='Output grid definition (header)'
    )

    parser.add_argument(
        "-f",
        action='store',
        dest='factor',
        type=int,
        help='Refinement factor used before resampling'
    )

    args = parser.parse_args()

    instream = None
    if args.infile is not None:
        instream = gzip.open(args.infile, 'r')

    instream = codecs.getreader(ENCODING)(instream or sys.stdin)

    reader = ResultReader(instream, res_type=EsriGridTimeSeriesResult())
    fields = iter(reader)

    source_srs = osr.SpatialReference()
    target_srs = osr.SpatialReference()
    source_srs.ImportFromProj4(args.source_srs)
    target_srs.ImportFromProj4(args.target_srs)
    coord_trans = osr.CoordinateTransformation(source_srs, target_srs)

    outfield_def = parse_esri_ascii_grid_header(iter(open(args.template, 'r')))
    outfield_def['proj'] = target_srs.ExportToWkt()

    infield_def = {}
    infield_def['x1'] = reader.res.x1
    infield_def['x2'] = reader.res.x2
    infield_def['y1'] = reader.res.y1
    infield_def['y2'] = reader.res.y2

    field_defined = False
    for timestamp, field in fields:
        log.info("processing %s" % timestamp.strftime('%y%m%d %H'))
        if not field_defined:
            infield_def['nx'] = field.shape[1]
            infield_def['ny'] = field.shape[0]
            infield_def['dx'] = (
                infield_def['x2'] - infield_def['x1']) / infield_def['nx']
            infield_def['dy'] = (
                infield_def['y2'] - infield_def['y1']) / infield_def['ny']
            field_defined = True

        # refine grid by factor and sort into new field with output projection
        outfield = inverse_nearest_neighbour(
            field,
            infield_def,
            coord_trans,
            outfield_def,
            factor=args.factor
        )
        outfile = gzip.open(
            path.join(
                args.outdir,
                "q_BC_%s.gz" % timestamp.strftime('%y%m%d%H%M')),
            'w'
        )
        outfile.write('%(x1)i %(y1)i\n' % outfield_def)
        outfile.write('%(nx)i %(ny)i\n' % outfield_def)
        outfile.write('%(dx)i %(dy)i\n' % outfield_def)
        np.savetxt(outfile, outfield, fmt=b'%.6g')
        outfile.close()
Exemplo n.º 2
0
def main():

    # Parse command line arguments
    parser = argparse.ArgumentParser(description=__doc__)
    utils.add_standard_command_options(parser)

    parser.add_argument(
        '-i', required=True,
        action='store', dest='macro',
        help='Macro specifying result',
    )

    parser.add_argument(
        '-t', '--timestamp',
        action="store", dest='timestamp',
        type=lambda s: datetime.datetime.strptime(s, '%y%m%d%H'),
        help="Hour to export as 'YYMMDDhh, " +
        "default is first timestamp of result'"

    )

    parser.add_argument(
        "--srs",
        action="store", dest="srs",
        help="Output SRS alias (from projections.rf)"
    )

    parser.add_argument(
        '--alpha',
        action='store',
        dest='alpha',
        type=int,
        help='Output alpha value (0-255)',
        default=0
    )

    parser.add_argument(
        '--factor',
        action='store',
        dest='factor',
        help="Refinement factor for resampling",
        type=int,
        default=2
    )

    parser.add_argument(
        '--no-cache',
        action='store_false',
        dest='cache',
        help='Do not use cached images.'
    )

    parser.add_argument(
        '--norm-area',
        action='store_true',
        dest='norm_area',
        help='Normalize field by area (e.g. g/(s*km2))'
    )

    args = parser.parse_args()

    field_def = get_field_definition_from_macro(args.macro)
    levels, rgba = get_colormap_from_disp_macro(args.macro, args.alpha)
    colormap = colormap_from_levels(levels, rgba)
    field = get_field(field_def, args.timestamp)
    ny, nx = field.shape
    dx = (field_def.x2 - field_def.x1) / nx
    dy = (field_def.y2 - field_def.y1) / ny
    viewport = ViewPort(field_def.area)
    viewport.read()
    inproj = get_proj4(viewport.proj)
    # src_srs = osr.SpatialReference()
    # src_srs.ImportFromProj4(inproj)
    # infield_def = {
    #     'x1': field_def.x1,
    #     'x2': field_def.x2,
    #     'y1': field_def.y1,
    #     'y2': field_def.y2,
    #     'nx': nx,
    #     'ny': ny,
    #     'dx': dx,
    #     'dy': dy,
    #     'proj': src_srs
    # }

    # tgt_srs = osr.SpatialReference()
    # if args.srs is not None:
    #     tgt_srs.ImportFromProj4(args.srs)
    # else:
    #     tgt_srs.SetWellKnownGeogCS('WGS84'.encode("ascii"))
    # coord_trans = osr.CoordinateTransformation(src_srs, tgt_srs)

    # # tranform srs for corners
    # xll, yll, _ = coord_trans.TransformPoint(
    #     infield_def['x1'], infield_def['y1']
    # )
    # xul, yul, _ = coord_trans.TransformPoint(
    #     infield_def['x1'], infield_def['y2']
    # )
    # xur, yur, _ = coord_trans.TransformPoint(
    #     infield_def['x2'], infield_def['y2']
    # )
    # xlr, ylr, _ = coord_trans.TransformPoint(
    #     infield_def['x2'], infield_def['y1']
    # )

    # # outfield_def = {}
    # outfield_def = {
    #     'x1': min(xll, xul),
    #     'x2': max(xlr, xur),
    #     'y1': min(yll, ylr),
    #     'y2': max(yul, yur),
    #     'nx':
    # outfield_def['ny'] =

    # outfield_def.proj = tgt_srs.ExportToWkt()
    # refine grid by factor and sort into new field with output projection
    # outfield = inverse_nearest_neighbour(
    #     field,
    #     infield_def,
    #     coord_trans,
    #     outfield_def,
    #     factor=args.factor)
    cache_gtiff = create_cache_path(field_def, args.timestamp, '.tif')
    png = cache_gtiff[:-4] + '.png'
    if args.cache and valid_cache(field_def, args.macro, png):
        log.info('Using cached png: %s' % png)
        sys.exit(1)

    field2GDAL(
        field_def, field,
        cache_gtiff,
        proj=inproj
    )
    warp_vrt = cache_gtiff[:-4] + '_warped.vrt'
    warp(cache_gtiff, warp_vrt, inproj)
    rgba_vrt = cache_gtiff[:-4] + '_rgba.vrt'
    color_relief(warp_vrt, rgba_vrt, colormap)
    translate(rgba_vrt, png, 'PNG')
Exemplo n.º 3
0
def main():
    # Create the domain
    domain = Domain()

    # Parse command line arguments
    parser = argparse.ArgumentParser(description=__doc__)
    utils.add_standard_command_options(parser)
    utils.add_edb_command_options(parser)

    parser.add_argument(
        '-F',
        action="store_true", dest='force',
        help="Force - overwrite target edb"
    )

    parser.add_argument(
        '--delimiter', default='\t',
        action="store", dest='delimiter',
        help='Delimiter used in enviman csv-tables'
    )

    parser.add_argument(
        '-i',
        action="store", dest='indir',
        help=("Directory containing enviman tables in .csv format," +
              " named the same as Enviman MS Excel sheets, " +
              "but with space replaced by '_'")
    )

    parser.add_argument(
        '--substancemap',
        action=MappingAction, dest='substancemap',
        help=("File mapping substance indices from enviman to Airviro")
    )

    parser.add_argument(
        '--source_srs', metavar="EPSG", type=int,
        action='store', dest='source_srs',
        help='Coordinate system of enviman data'
    )

    parser.add_argument(
        '--target_srs', metavar="EPSG", type=int,
        action='store', dest='target_srs',
        help='Coordinate system of Airviro data'
    )

    args = parser.parse_args()
    edb = Edb(domain, args.user, args.edb)

    PAG_Source_path = path.join(args.indir, 'PAG_Source.csv')
    AnEm_Table_path = path.join(args.indir, 'AnEm_Table.csv')
    Mon_Table_path = path.join(args.indir, 'Mon_Table.csv')
    Hour_Table_path = path.join(args.indir, 'Hour_Table.csv')
    Comb_Table_path = path.join(args.indir, 'Comb_Table.csv')
    Comb_Def_path = path.join(args.indir, 'Comb_Def.csv')

    subgrp_ind = get_max_subgrp_index(edb) + 1
    timevar_ind = get_max_timevar_index(edb) + 1

    if path.exists(PAG_Source_path):
        sources = read_sources(
            PAG_Source_path,
            args.delimiter
        )
    else:
        sources = None
        log.warning('PAG_Source.csv not found')

    if args.source_srs is not None and args.target_srs is not None:
        log.debug("Preparing spatial reference transformation")
        target_srs = osr.SpatialReference()
        source_srs = osr.SpatialReference()
        target_srs.ImportFromEPSG(args.target_srs)
        source_srs.ImportFromEPSG(args.source_srs)
        srs_transform = osr.CoordinateTransformation(source_srs, target_srs)
        for sourceid, source in sources.iteritems():
            transform(source, srs_transform)

    else:
        log.debug("No spatial reference transformation will be performed")

    if path.exists(AnEm_Table_path) and sources is not None:
        read_emissions(
            sources,
            AnEm_Table_path,
            args.delimiter,
            args.substancemap
        )
    else:
        log.warning('AnEmi.csv not found')

    if path.exists(Comb_Table_path):
        subgrps = read_subgrps(
            Comb_Table_path,
            Comb_Def_path,
            args.delimiter,
            args.substancemap
        )

    else:
        log.warning('Comb_Table.csv not found')

    if path.exists(Hour_Table_path) and path.exists(Mon_Table_path):
        timevars = read_timevars(
            sources,
            Hour_Table_path,
            Mon_Table_path,
            args.delimiter
        )
    else:
        timevars = None

    for key, subgrp in subgrps.iteritems():
        subgrp.INDEX = subgrp_ind
        subgrp_ind += 1

    for key, tvar in timevars.iteritems():
        tvar.INDEX = subgrp_ind
        timevar_ind += 1

    for src in sources.values():
        if timevars is not None:
            timevarid = '_'.join([src.ALOB['MonthlyID'], src.ALOB['HourlyID']])
            timevarind = timevars[timevarid].INDEX
        else:
            timevarind = 1

        unwritten_subgroups = False
        if src.ALOB['Consumption'] != '0':
            if subgrps is not None:
                subgrp = subgrps[int(src.ALOB['ProcessID'])]
                src.add_subgrp(
                    SUBGRP=subgrp.INDEX,
                    ACTIVITY=float(src.ALOB['Consumption']),
                    TIMEVAR=timevarind,
                    UNIT='ton/year'
                )
            elif not unwritten_subgroups:
                log.warning(
                    "Could not add substance group emissions " +
                    "due to missing Comb Table"
                )
                unwritten_subgroups = True

        for emis in src.EMISSION:
            emis.TIMEVAR = timevarind

    if timevars is not None:
        with TimevarStream(edb, mode='w', sourcetype='point') as outstream:
            log.info('Writing timevars...')
            writer = ModelWriter(outstream)
            for tvar in timevars.values():
                log.debug('Writing timevar %i' % tvar.INDEX)
                writer.write(tvar)

    if subgrps is not None:
        with codecs.open(
            '/usr/airviro/data/SCAC/prod/industry/enviman_subgrps.txt',
            mode='w',
            encoding='HP Roman8') as outstream:
#        with SubgrpStream(edb, mode='w') as outstream:
            log.info('Writing subgrps...')
            writer = ModelWriter(outstream)
            for subgrp in subgrps.values():
                log.debug('Writing subgrps %i' % subgrp.INDEX)
                writer.write(subgrp)
    with codecs.open(
        '/usr/airviro/data/SCAC/prod/industry/enviman_sources.txt',
        mode='w',
        encoding='HP Roman8') as outstream:

#    with SourceStream(edb, mode='w') as outstream:
        log.info('Writing sources...')
        writer = ModelWriter(outstream)
        for src in sources.values():
            log.debug('Writing source %s' % src.NAME)
            writer.write(src)
Exemplo n.º 4
0
def main():

    # Parse command line arguments
    parser = argparse.ArgumentParser(description=__doc__)

    utils.add_standard_command_options(parser)

    parser.add_argument(
        "-f", "--format", action="store", dest="format", default="ESRI Shapefile", help="Format of road network"
    )

    parser.add_argument("--inRoads", required=True, action="store", dest="inRoads", help="Input road network")

    parser.add_argument("--outRoads", required=True, action="store", dest="outRoads", help="Output road network")

    parser.add_argument(
        "--buildings", required=True, action="store", dest="buildings", help="Input building roof contours (3D)"
    )

    parser.add_argument("--topo", required=True, action="store", dest="topo", help="Input raster DEM")

    parser.add_argument(
        "--split",
        type=int,
        action="store",
        dest="split",
        help="Threshold in changed road direction (degrees)" + " for when to split road",
    )

    args = parser.parse_args()

    if not path.exists(args.topo):
        log.error("Input raster does not exist")
        sys.exit(1)

    if not path.exists(args.buildings):
        log.error("Input building contours does not exist")
        sys.exit(1)
        return 1

    log.info("Reading DEM")
    topo = readGDAL(args.topo, bandIndex=1)[0]

    # Opening driver for road networks
    try:
        driver = ogr.GetDriverByName(args.format)
    except:
        log.error("Invalid format for road networks, check ogr documentation")
        sys.exit(1)

    if args.split is None:
        log.info("Do not split roads")
        splitLimit = None
    else:
        splitLimit = float(args.split)
        log.info("Split roads that change direction" + " more than %f" % splitLimit)

    # extract extent from topo raster
    xmin = topo.xll
    ymin = topo.yll
    xmax = topo.xur()
    ymax = topo.yur()

    # Calculate dimensions of spatial index
    ncols = int((xmax - xmin) / CELLSIZE)
    nrows = int((ymax - ymin) / CELLSIZE)

    # Init spatial index of building contours
    spatInd = SpatialIndex(xmin, ymin, nrows, ncols, CELLSIZE)

    log.info("Reading and indexing building contours")
    # Read buildings and store using spatial indices
    spatInd.indexBuildingContours(args.buildings)

    # open road network shape-file
    log.info("Reading road network")
    inRoadFile = driver.Open(args.inRoads, update=0)

    if path.exists(args.outRoads):
        driver.DeleteDataSource(args.outRoads)

    outRoadFile = driver.CreateDataSource(args.outRoads)
    if inRoadFile is None:
        log.error("Could not open file with input road network")
        sys.exit(1)

    if outRoadFile is None:
        log.error("Could not open file with output road network")
        sys.exit(1)

    # Get layer definition and first feature of input road network
    inRoadLayer = inRoadFile.GetLayer()
    inRoadLayerDefn = inRoadLayer.GetLayerDefn()

    outRoadLayer = outRoadFile.CreateLayer("first_layer", geom_type=inRoadLayer.GetGeomType())

    # create fields on output road file
    for fieldInd in range(inRoadLayerDefn.GetFieldCount()):
        fieldDefn = inRoadLayerDefn.GetFieldDefn(fieldInd)
        outRoadLayer.CreateField(fieldDefn)

    outRoadLayerDefn = outRoadLayer.GetLayerDefn()
    fieldNames = [outRoadLayerDefn.GetFieldDefn(i).GetName() for i in range(outRoadLayerDefn.GetFieldCount())]

    log.info("Adding attributes to road feature (if missing)")
    # Add attributes for street canyon geometry
    if "BHGT1" not in fieldNames:
        outRoadLayer.CreateField(ogr.FieldDefn("BHGT1", ogr.OFTInteger))
    if "BHGT2" not in fieldNames:
        outRoadLayer.CreateField(ogr.FieldDefn("BHGT2", ogr.OFTInteger))
    if "BHGT1W" not in fieldNames:
        outRoadLayer.CreateField(ogr.FieldDefn("BHGT1W", ogr.OFTInteger))
    if "BHGT2W" not in fieldNames:
        outRoadLayer.CreateField(ogr.FieldDefn("BHGT2W", ogr.OFTInteger))
    if "BANG1" not in fieldNames:
        outRoadLayer.CreateField(ogr.FieldDefn("BANG1", ogr.OFTInteger))
    if "BANG2" not in fieldNames:
        outRoadLayer.CreateField(ogr.FieldDefn("BANG2", ogr.OFTInteger))
    if "BDIST" not in fieldNames:
        outRoadLayer.CreateField(ogr.FieldDefn("BDIST", ogr.OFTInteger))
    if "BSECT" not in fieldNames:
        fieldDefn = ogr.FieldDefn("BSECT", ogr.OFTString)
        fieldDefn.SetWidth(40)
        outRoadLayer.CreateField(fieldDefn)
    if "BSECTW" not in fieldNames:
        fieldDefn = ogr.FieldDefn("BSECTW", ogr.OFTString)
        fieldDefn.SetWidth(40)
        outRoadLayer.CreateField(fieldDefn)

    fig1 = plt.figure(1)
    ax1 = plt.subplot(111)
    ax1.axis("equal")
    if PLOTIND > 0:
        spatInd.plot(ax1)

    roadInd = 0
    noGeom = 0
    nsplit = 0
    # get first road feature
    inRoadFeature = inRoadLayer.GetNextFeature()
    # Loop over all roads
    log.info("Finding nearest facades, setting heights...")
    pg = ProgressBar(inRoadLayer.GetFeatureCount(), sys.stdout)
    while inRoadFeature:
        pg.update(roadInd)
        outRoadFeatures = splitRoad(inRoadFeature, splitLimit, outRoadLayer.GetLayerDefn())
        if len(outRoadFeatures) > 1:
            log.debug("Raod split into %s parts" % len(outRoadFeatures))
        nsplit += len(outRoadFeatures) - 1
        for outRoadFeature in outRoadFeatures:
            intersections = []
            outRoadGeom = outRoadFeature.GetGeometryRef()
            road = Road(outRoadGeom)
            if outRoadGeom is None or outRoadGeom.GetPointCount() == 0 or not spatInd.inside(road):
                noGeom += 1
                maxHeight1 = None
                maxHeight2 = None
                avgDist = None
                bAngle1 = None
                bAngle2 = None
                avgHeight1 = None
                avgHeight2 = None
            else:
                sumHeight1 = 0
                sumHeight2 = 0
                maxHeight1 = 0
                maxHeight2 = 0
                sumDist = 0
                # Define crossections along the road,
                # Defined by start and endpoints at both side of the road
                cs1List, cs2List = road.defineCrossSections()
                nCS = len(cs1List)
                log.debug("Defined %i cross sections" % nCS)
                # Check intersections with building contours for all cross-sections
                for csInd in range(nCS):
                    cs1 = cs1List[csInd]
                    cs2 = cs2List[csInd]
                    cs1MidPoint = cs1.P0 + 0.5 * (cs1.P1 - cs1.P0)
                    buildingSegments = spatInd.getBuildingSegments(cs1MidPoint[0], cs1MidPoint[1])

                    log.debug("Calculating intersection")
                    if PLOTIND == roadInd and CSIND == csInd:
                        dist1, Pint1 = getIntersectingFacade(ax1, cs1, buildingSegments, True)
                    else:
                        dist1, Pint1 = getIntersectingFacade(ax1, cs1, buildingSegments, False)
                    if Pint1 is None:
                        log.debug("No intersection on side 1")
                        height1 = 0
                        dist1 = MAXDIST
                    else:
                        log.debug("Intersection1 in (%f, %f, %f)" % (Pint1[0], Pint1[1], Pint1[2]))
                        height1 = spatInd.getBuildingHeight(Pint1[0], Pint1[1], Pint1[2], topo) + HEIGHTCORR
                        intersections.append(Pint1[:2])

                    if PLOTIND == roadInd and csInd == CSIND:
                        plotSegments(ax1, buildingSegments, color="red", width=2.0)
                        row, col = spatInd.getInd(cs1MidPoint[0], cs1MidPoint[1])
                        spatInd.plotCell(ax1, row, col, color="purple", width=2.0)
                        plotSegments(ax1, [cs1List[csInd]], color="pink", style="-", width=1.0)
                        plt.draw()

                    cs2MidPoint = cs2.P0 + 0.5 * (cs2.P1 - cs2.P0)
                    buildingSegments = spatInd.getBuildingSegments(cs2MidPoint[0], cs2MidPoint[1])

                    if PLOTIND == roadInd and csInd == CSIND:
                        plotSegments(ax1, buildingSegments, color="red", width=2.0)
                        row, col = spatInd.getInd(cs2MidPoint[0], cs2MidPoint[1])
                        spatInd.plotCell(ax1, row, col, color="brown", width=2.0)
                        plotSegments(ax1, [cs2List[csInd]], color="red", style="-", width=1.0)
                        plt.draw()

                    log.debug("Calculating intersection")
                    if PLOTIND == roadInd and CSIND == csInd:
                        dist2, Pint2 = getIntersectingFacade(ax1, cs2, buildingSegments, True)
                    else:
                        dist2, Pint2 = getIntersectingFacade(ax1, cs2, buildingSegments, False)

                    if Pint2 is None:
                        log.debug("No intersection on side 2")
                        height2 = 0
                    else:
                        log.debug("Intersection2 in (%f, %f, %f)" % (Pint2[0], Pint2[1], Pint2[2]))
                        height2 = spatInd.getBuildingHeight(Pint2[0], Pint2[1], Pint2[2], topo) + HEIGHTCORR
                        intersections.append(Pint2[:2])

                    sumHeight1 += height1
                    sumHeight2 += height2
                    sumDist += dist1 + dist2
                    maxHeight1 = int(max(height1, maxHeight1))
                    maxHeight2 = int(max(height2, maxHeight2))
                    if PLOTIND == roadInd and CSIND == csInd:
                        if Pint1 is not None:
                            ax1.text(Pint1[0], Pint1[1], "Distance=%f" % dist1)
                        if Pint2 is not None:
                            ax1.text(Pint2[0], Pint2[1], "Distance=%f" % dist2)

                avgHeight1 = int(sumHeight1 / float(nCS))
                avgHeight2 = int(sumHeight2 / float(nCS))
                # averaging over both sides of street
                # distance refers to between facades on opposite sides
                avgDist = int(round(sumDist / float(nCS)))
                bAngle1, bAngle2 = road.normalAngles()
                if PLOTIND > 0:
                    plotSegments(ax1, road.getSegments(), color="grey", width=0.3)
                if PLOTIND == roadInd:
                    plotSegments(ax1, road.getSegments(), color="black", width=2.0)
                    plotSegments(ax1, cs1List, color="green", style="--", width=0.5)
                    plotSegments(ax1, cs2List, color="green", style="--", width=0.5)

                    X = [intersect[0] for intersect in intersections]
                    Y = [intersect[1] for intersect in intersections]
                    if len(X) > 0:
                        ax1.plot(X, Y, "*")
                    plt.title("Road %i, cross-section %i" % (PLOTIND, CSIND))
                    plt.draw()

            # building height as list of sectors
            bsect = bheight2sect(avgHeight1, avgHeight2, bAngle1)
            bsectw = bheight2sect(maxHeight1, maxHeight2, bAngle1)

            outRoadFeature.SetField("BSECT", bsect)
            outRoadFeature.SetField("BSECTW", bsectw)
            outRoadFeature.SetField("BHGT1", avgHeight1)
            outRoadFeature.SetField("BHGT2", avgHeight2)
            outRoadFeature.SetField("BHGT1W", maxHeight1)
            outRoadFeature.SetField("BHGT2W", maxHeight2)
            outRoadFeature.SetField("BANG1", bAngle1)
            outRoadFeature.SetField("BANG2", bAngle2)
            outRoadFeature.SetField("BDIST", avgDist)

            outRoadLayer.CreateFeature(outRoadFeature)
            outRoadFeature.Destroy()
        inRoadFeature.Destroy()
        inRoadFeature = inRoadLayer.GetNextFeature()
        roadInd += 1

    inRoads = inRoadLayer.GetFeatureCount()
    outRoads = outRoadLayer.GetFeatureCount()
    # close datasource for building contours
    inRoadFile.Destroy()
    outRoadFile.Destroy()
    pg.finished()
    if PLOTIND > 0:
        plt.show()

    log.info("Read %i roads, wrote %i roads (created %i by splitting)" % (inRoads, outRoads, nsplit))

    if noGeom > 0:
        log.warning("Found %i roads without geometry" % noGeom)

    log.info("Finished")
Exemplo n.º 5
0
def main():
    #-----------Setting up and unsing option parser-----------------------
    parser = argparse.ArgumentParser(description=__doc__)
    utils.add_standard_command_options(parser)
    utils.add_edb_command_options(parser)

    parser.add_argument("-o","--outfile",
                      action="store",dest="filename",default=None,
                      help="Name of output file with sources that fail the verification")

    args = parser.parse_args()

    if args.filename is None:
        parser.error("Need to specify output file for verification errors")

    outfile=codecs.open(path.abspath(args.filename),"w","HP Roman8")

    #Get current domain
    domainName=os.environ["AVDBNAME"]
    dmn = domain.Domain(domainName)
    
    if args.source_edb ==None:
        parser.error("Need to specify edb")
    if args.source_user ==None:
        parser.error("Need to specify user")

    #Check if edb exist
    if not dmn.edbExistForUser(args.source_edb,args.source_user):
        log.error("Edb "+args.source_edb+" does not exist for user "+args.source_user+" in domain "+domainName)    
        sys.exit()

    edb=Edb(dmn,args.source_user,args.source_edb)
    rsrc = edb.rsrc
    source_stream=SourceStream(edb,mode='r')

    source_reader = ModelReader(source_stream)

    nerrors=0
    for src in source_reader:
        log.debug("Looking at src")
        err=False       
        gc=src.GEOCODE

        if len(gc) !=len(rsrc.gc):
            log.info(
                "Wrong number of geocodes, source: %s" %src.NAME)
            err=True
            nerrors+=1
        
        else:
            #Check if each code exists in its corresponding code tree
            for i,code in enumerate(gc):                
                if not rsrc.gc[i].hasCode(code):
                    log.info("Code: %s in source %s not found in edb.rsrc" %(
                            code,src.NAME))
                    nerrors+=1
                    err=True
                
        for emis in chain(src.EMISSION,src.SUBGRP,src.ACTIVITY):
            ac=emis.ACTCODE
            log.debug("AC: "+str(ac))
            
            if len(ac) !=len(rsrc.ac):
                log.info(
                    "Wrong number of activity codes, source: %s" %src.NAME)
                err=True
                nerrors+=1

            else:
                #Check if each code exists in its corresponding code tree
                for i,code in enumerate(ac):
                    if code[-1] == '.':
                        code=code[:-1]
                    if not rsrc.ac[i].hasCode(code):
                        log.info("Code: %s in source %s not found in rsrc" %(
                                code,src.NAME))
                        nerrors+=1
                        err=True            

        if err:
            outfile.write(src.NAME)
            outfile.write("\n")

    log.info("Number of errors found: %i" %nerrors) 
Exemplo n.º 6
0
def main():

    domain = Domain()

    # Parse command line arguments
    parser = argparse.ArgumentParser(description=__doc__)
    utils.add_standard_command_options(parser)

    parser.add_argument(
        '--site',
        action='store', dest='site',
        help='APUB-site',
    )

    # parser.add_argument(
    #     '-i', required=True,
    #     action='store', dest='res',
    #     help='.RES file of the input result',
    # )

    # parser.add_argument(
    #     '--ext', required=True,
    #     action='store', dest='ext',
    #     help='ext for the result field to be exported',
    # )

    # parser.add_argument(
    #     '-d', '--date',
    #     action="store", dest='date',
    #     type=lambda s: datetime.datetime.strptime(s, '%y%m%d%H'),
    #     help="Time stamp to show 'YYMMDDhh, " +
    #     "default is first timestamp of result'"
    # )

    # parser.add_argument(
    #     '--substance',
    #     action='store', dest='substance',
    #     help='Substance/ext for the result field to be imported',
    # )

    parser.add_argument(
        '--areaid',
        action='store', dest='areaid',
        help='Area id'
    )

    args = parser.parse_args()

    site = os.environ.get("SITE", None) or args.site
    if site is None:
        log.error("No apub site specified")
        sys.exit(1)

    with open(path.join('/var/www/html',
                        site,
                        'gmapgridoverlay.htm')) as html_template:
        template = Template(html_template.read())

    rf = ControlFile(
        path.join(os.environ["RSRCDIR"],
                  "apub." + site + ".gmapgridoverlay.rf"),
	"HP Roman8"
    )

    form = cgi.FieldStorage()
    viewports = domain.listViewports()
    areaid = form.getfirst('areaid', None)
    if areaid is not None:
        areaid = cgi.escape(areaid)
    areaid = areaid or \
        args.areaid or \
        viewports[0]

    viewport = ViewPort()
    viewport.read(code=areaid)
    proj = get_proj4(viewport.proj)
    data = get_latlon_bounds(
            viewport.xmin(),
            viewport.ymin(),
            viewport.xmax(),
            viewport.ymax(),
            proj)

    print(template.substitute(data))
Exemplo n.º 7
0
def main():
    domain = Domain()

    # Parse command line arguments
    parser = argparse.ArgumentParser(description=__doc__)
    utils.add_standard_command_options(parser)

    parser.add_argument(
        "-t", "--templatedir",
        action="store", dest="templatedir",
        help="Generate grid .asc template in specified dir"
    )

    parser.add_argument(
        "-i", "--inputdir",
        action="store", dest="inputdir",
        help="Directory containing input files"
    )

    parser.add_argument(
        "-u", "--user",
        action="store", dest="user",
        help="User name (to fetch substance group info)"
    )

    parser.add_argument(
        "-e", "--edb",
        action="store", dest="edb",
        help="EDB name (to fetch substance group info)"
    )

    parser.add_argument(
        "-d", "--dynamicdir",
        action="store", dest="dynamicdir",
        help="Directory with grids for dynamic parameters"
    )

    parser.add_argument(
        "--intensity",
        action="store_true", dest="intensity",
        help="If input rasters are given as ton/(year*km2)"
    )

    args = parser.parse_args()

    if args.templatedir is not None:
        generateTemplate(
            path.join(args.templatedir,
                      "grid_template.asc")
        )

        log.info("Wrote default grid template")
        sys.exit()

    substances = domain.listSubstanceIndices()
    log.debug(
        "Using substance list of current domain: " +
        domain.name
    )

    if args.user is not None and args.edb is not None:
        edb = Edb(domain, args.user, args.edb)

        with SubgrpStream(edb, mode='r') as subgrpstream:
            subgrp_reader = ModelReader(subgrpstream)
            subgrps = list(subgrp_reader)
    else:
        subgrps = None

    dirs = glob.glob(args.inputdir)
    msg = "directories:" + str([path.basename(d) for d in dirs])
    log.info(msg)

    dyndir = args.dynamicdir
    if dyndir is not None:
        dyndir = path.abspath(dyndir)
        dynamic_rasters = glob.glob(path.join(dyndir, "*.txt"))

    for d in dirs:
        log.debug("Processing: " + d)

        raster_paths = glob.glob(path.join(d, "*.txt"))
        if len(raster_paths) == 0:
            log.error("No rasters in directory: " + d)
            sys.exit()
        log.debug(
            "Rasters in directory: " +
            str([path.basename(r) for r in raster_paths])
        )

        substance_rasters = []
        subgrp_rasters = []
        dynamic_rasters = []

        for rp in raster_paths:
            gridname = path.basename(rp)
            nameparts = gridname.split("__")

            log.debug("Raster prefix: " + nameparts[0])

            if len(nameparts) < 3:
                # unvalid raster name
                msg = ("Not able to extract prefix (substance, " +
                       "substance group or dynamic) from raster " +
                       "filename %s" % path.basename(rp))
                log.error(msg)
                sys.exit()

            if nameparts[0] == "subgrp":
                subgrp_rasters.append(rp)

            elif nameparts[0] == "substance":
                substance_name = nameparts[1]
                try:
                    substance_index = substances[substance_name]
                except KeyError:
                    try:
                        substance_index = int(substance_name)
                    except:
                        log.error(
                            "Substance: " + substance_name +
                            " not found in subdb of current domain: " +
                            domain.name
                        )
                        sys.exit(1)
                substance_rasters.append(rp)

            elif nameparts[0] == "dynamic":
                try:
                    dyn_name = gridname.split("__")[1]
                    dyn_level = int(path.basename(rp).split("__")[2])
                except:
                    log.error(
                        "Could not extract name of dynamic " +
                        "parameter and level for raster: " + gridname
                    )
                    sys.exit(1)
                dynamic_rasters.append(rp)

            else:
                log.error(
                    "Prefix of raster: " + nameparts[0] +
                    " is unvalid"
                )
                sys.exit(1)

        if args.dynamicdir is not None:
            for rp in dynamic_rasters:
                gridname = path.splitext(path.basename(rp))[0]
                nameparts = gridname.split("__")
                if nameparts[0] == "dynamic":
                    try:
                        dyn_name = gridname.split("__")[1]
                        dyn_level = int(gridname.split("__")[2])
                    except:
                        log.error(
                            "Could not extract name of dynamic " +
                            "parameter and level for raster: " +
                            gridname
                        )
                        sys.exit(1)
                    dynamic_rasters.append(rp)

        if len(subgrp_rasters) > 1:
            log.error("There can be only one subgrp raster per grid source")
            sys.exit(1)
        if len(subgrp_rasters) > 0 and len(substance_rasters) > 0:
            log.error(
                "Both subgrp rasters and substance rasters " +
                "in the same grid/directory is not allowed"
            )
            sys.exit(1)

        asc_path = path.join(d, "grid_template.asc")
        if not path.exists(asc_path):
            msg = (
                "Could not find "
                "%s, using default template .asc file" % asc_path
            )
            log.warning(msg)
            generateTemplate(asc_path)

        grid = EmissionGrid()
        grid.read_meta_from_file(asc_path)
        rast = raster.Raster()

        if len(substance_rasters) > 0:
            log.debug("Reading substance raster: " + substance_rasters[0])
            try:
                rast.read(substance_rasters[0])
            except IOError as e:
                log.error(e)
                sys.exit(1)
            rast.nodataToZero()

        elif len(subgrp_rasters) > 0:
            log.debug("Reading subgrp raster: " + subgrp_rasters[0])
            try:
                rast.read(subgrp_rasters[0])
            except IOError as e:
                log.error(e)
                sys.exit(1)
            rast.nodataToZero()
        else:
            log.error(
                "Not possible to create grid without any substance " +
                "rasters or subgrp raster"
            )
            sys.exit(1)

        grid.X = int(rast.xll)
        grid.Y = int(rast.yll)
        grid.DX = int(rast.cellsize)
        grid.DY = int(rast.cellsize)
        grid.NX = rast.ncols
        grid.NY = rast.nrows

        if len(subgrp_rasters) > 0:
            subgrp_name = path.basename(subgrp_rasters[0]).split("__")[1]
            if subgrps is None:
                subgrp_index = 1
            else:
                subgrp_index = next(
                    (s.INDEX for s in subgrps if s.NAME == subgrp_name),
                    None
                )

            if subgrp_index is None:
                log.warning(
                    "Could not find subgrp named " +
                    "%s in edb, using default index 1" % subgrp_name
                )
                subgrp_index = 1

            grid.FUEL = subgrp_index
            log.debug("Adding subgrp raster to grid")
            rast.read(subgrp_rasters[0])
            rast.nodataToZero()
            if args.intensity:
                # Convert from ton/(year*km2) to ton/year
                rast.data *= rast.cellsize * rast.cellsize / 1.0e6
            grid.add_field_from_raster(rast, subgrp_index, subgrp=True)
        else:
            for rp in substance_rasters:
                rast.read(rp)
                rast.nodataToZero()
                if args.intensity:
                    # Convert from ton/(year*km2) to ton/year
                    rast.data *= rast.cellsize * rast.cellsize / 1.0e6
                subst = path.basename(rp).split("__")[1]
                try:
                    substance_index = substances[subst]
                except KeyError:
                    substance_index = int(subst)

                log.debug(
                    "Adding substance " + subst + " to grid")
                grid.add_field_from_raster(rast, substance_index)

        dynamic_raster_dict = {"GEOCODE": [], "ACTIVITYCODE": []}
        for dr in dynamic_rasters:
            nameparts = path.basename(dr).split("__")
            if nameparts[1] == "GEOCODE":
                dynamic_raster_dict["GEOCODE"].append(dr)
            elif nameparts[1] == "ACTIVITYCODE":
                dynamic_raster_dict["ACTIVITYCODE"].append(dr)
            else:
                dynamic_raster_dict[nameparts[1]] = [dr]

        # Sorting the code rasters after code level
        # Function that returns the code level from the raster name
        cmpfunc = lambda x: int(path.basename(x)[: -4].split("__")[2])

        dynamic_raster_dict["GEOCODE"].sort(key=cmpfunc)
        dynamic_raster_dict["ACTIVITYCODE"].sort(key=cmpfunc)

        for dp in dynamic_raster_dict.keys():
            if len(dynamic_raster_dict[dp]) > 0:
                log.debug(
                    "Adding dynamic raster for " + dp +
                    ": " + str(dynamic_raster_dict[dp])
                )
                grid.addDynamicRasters(dp, dynamic_raster_dict[dp])
        grid_dir = path.abspath(d)
        grid_name = path.basename(grid_dir)
        grid_path = path.join(grid_dir, grid_name)
        grid.write_data_to_file(grid_path)
        msg = "Wrote grid for %s" % path.basename(d)
        log.info(msg)
    log.info("Finished successfully")
Exemplo n.º 8
0
def main():

    actionList = [
        'addSubstance',
        'removeSubstance',
        'mapSubstances',
        'addCodeTree',
        'removeCodeTree',
        'moveCodeTree',
        'extractByGeocode',
        'uniteGrids',
        'scale'
    ]

    # setting up parser
    parser = argparse.ArgumentParser(
        description=__doc__,
        formatter_class=argparse.ArgumentDefaultsHelpFormatter
    )
    utils.add_standard_command_options(parser)

    parser.add_argument("-d", "--doc",
                        action="store_true", dest="doc",
                        help="Prints more detailed documentation and exit")

    parser.add_argument("-u", "--user",
                        action="store", dest="user", default=None,
                        help="Specify user manually")

    parser.add_argument("-e", "--edb",
                        action="store", dest="edb", default=None,
                        help="Name of target edb")

    parser.add_argument("-s", "--substance", action="store",
                        dest="substance",
                        help="Name of substance to add or remove")

    parser.add_argument("--substanceFactorFile", action="store",
                        dest="substanceFactorFile",
                        help="File with substance factors")

    parser.add_argument("--substanceMapping", action="store",
                        dest="substanceMapping",
                        help="File with mapping for substance indices")

    parser.add_argument("--codeMapping", action="store",
                        dest="codeMapping",
                        help="File with mapping from old codes to new")

    parser.add_argument("--edbMapping", action="store",
                        dest="edbMapping",
                        help="File with mapping from gc to edb")

    parser.add_argument("-f", "--force",
                        action="store_true", dest="force", default=False,
                        help="Start the process without confirming the domain")

    parser.add_argument("--factor",
                        action="store", type=float, dest="factor",
                        help="Factor used to scale emissions")

    parser.add_argument(
        "-n", "--names", action="store",
        dest="sourceNames", default=['.*'],
        nargs='*',
        help="Space separated list of regular expressions"
    )

    parser.add_argument(
        "--sourceTypes", action="store",
        dest="sourceTypes",
        nargs='*',
        default=['griddb', 'sources'],
        choices=["griddb", "sources", "subgrpdb", "svehdb", "vehdb"],
        help="Source types to process"
    )

    parser.add_argument(
        "--codeType", action="store",
        dest="codeType", default='ac',
        help="Code type to process"
    )

    parser.add_argument(
        "--codeIndex", action="store",
        dest="codeIndex", default=1, type=int,
        help="Index of code tree to use in processing"
    )

    parser.add_argument(
        "--newIndex", action="store",
        dest="newIndex", default=None,
        help="New index when moving an already defined code tree"
    )

    parser.add_argument(
        "--codeLevel", action="store",
        dest="codeLevel", default=1, type=int,
        help="Level of code tree to use in processing")

    parser.add_argument(
        "--actions", action="store",
        dest="actions",
        nargs='*',
        choices=actionList,
        help="Action list"
    )

    args = parser.parse_args()

    if args.doc:
        print doc
        sys.exit()

#     if len(args) > 0:
#         log.error("Incorrect number of arguments")
#         sys.exit(1)

    if args.edb is None:
        log.error("Need to specify edb")
        sys.exit(1)

    if args.user is None:
        log.error("Need to specify user")
        sys.exit(1)

    dmn = Domain()
    edb = Edb(dmn, args.user, args.edb)
    subdb = Subdb(edb)
    subdb.read()
    rsrc = Rsrc(edb.rsrcPath())

    # Check if edb exist
    if not edb.exists():
        log.error("Edb " + args.edb + " does not exist for user " +
                  args.user + " in domain " + dmn.name)
        sys.exit(1)

    if args.codeType.lower() == "ac":
        codeType = "ACTIVITYCODE"
    elif args.codeType.lower() == "gc":
        codeType = "GEOCODE"
    else:
        parser.error("codeType should be either 'gc' or 'ac'")

    if args.codeIndex < 1:
        raise ValueError("Minimum code index is 1")
    if args.codeIndex > 1 and codeType == "gc":
        raise OSError("Multiple geocode trees not implemented")

    for a in args.actions:
        if a not in actionList:
            parser.error("Unknown action %s" % a)

    log.info("User: "******"Edb: " + args.edb)

    loadGridData = False

    # Set values in argsDict
    argDict = {
        "edb": edb,
        "user": args.user,
        "domain": dmn,
        "grids": [],
        "filters": args.sourceNames,
        "codeType": codeType,
        "codeIndex": args.codeIndex,
        "codeLevel": args.codeLevel,
        "sources": None,
        "griddb": None,
        "subgrpdb": None,
        "roaddb": None,
        "vehdb": None,
        "svehdb": None,
        "rsrc": rsrc
    }

    # ----- Reading sources from edb -------------------------------
    if 'griddb' in args.sourceTypes:
        grids = []
        gridNames = edb.listGrids()
        for name in gridNames:
            grd = egrid.Egrid(edb, name)
            grd.readAsc()
            grids.append(grd)
        argDict["grids"] = grids

    if 'sources' in args.sourceTypes:
        argDict["sources"] = list(ModelReader(SourceStream(edb, mode='r')))

    if 'roaddb' in args.sourceTypes:
        log.error("Not implemented with new roaddb-structure")
        sys.exit(1)

    if 'subgrpdb' in args.sourceTypes:
        subgrpdb = Subgrpdb(edb)
        subgrpdb.read()
        argDict["subgrpdb"] = subgrpdb

    # emfacdbdb = Emfacdb(edb)
    # emfacdb.read()
    # argDict["subgrpdb"] = subgrpdb

    if 'svehdb' in args.sourceTypes:
        svehdb = Svehdb(edb)
        svehdb.read()
        argDict["svehdb"] = svehdb

    # TODO:  add option for vehdb

    # parse additional args
    # -----addSubstance--------------------------------------
    if "addSubstance" in args.actions:
        if args.substance is None:
            parser.error("Action addSubstance needs" +
                         "--substance to be specified")
        if args.substanceFactorFile is None:
            parser.error("Option addSubstance also needs" +
                         "--substanceFactorFile to be specified")
        if args.substance not in subdb.substIndices:
            raise ValueError(
                "Substance %s not in substance list" % args.substance)

        substanceNameFactorDict = parseMapping(
            path.abspath(args.substanceFactorFile),
            valueType=float)

        substanceFactorDict = {}
        # Converts mapping between substance name and factor to
        # mapping between substance index and factor
        for name, factor in substanceNameFactorDict.iteritems():
            if name not in subdb.substIndices:
                raise KeyError(
                    "Substance : %s not found in substance list" % name
                )
            ind = subdb.substIndices[name]
            substanceFactorDict[ind] = factor

        argDict["substanceFactorDict"] = substanceFactorDict
        argDict["substance"] = subdb.substIndices[args.substance]

    # ----removeSubstance------------------------------------
    if "removeSubstance" in args.actions:
        if args.substance is None:
            parser.error("Action removeSubstance needs" +
                         "--substance to be specified")

        if args.substance not in subdb.substIndices:
            log.error("Substance %s not in substance list" % args.substance)
            sys.exit(1)

        argDict["substance"] = subdb.substIndices[args.substance]

    # ----mapSubstances--------------------------------------
    if "mapSubstances" in args.actions:
        if args.substanceMapping is None:
            parser.error("Action mapSubstances needs" +
                         "--substanceMapping to be specified")
        substanceMappingFile = path.abspath(args.substanceMapping)
        substanceMapping = parseMapping(
            substanceMappingFile,
            keyType=int,
            valueType=int
        )
        argDict["substanceMapping"] = substanceMapping

    # ----addCodeTree-----------------------------------------
    if "addCodeTree" in args.actions:
        if args.codeMapping is None:
            parser.error("Action addCodeTree needs" +
                         "--codeMapping to be specified")
        if args.newIndex is None:
            parser.error("Action addCodeTree needs" +
                         "--newIndex to be specified")

        codeMappingFile = path.abspath(args.codeMapping)
        codeMapping = parseMapping(codeMappingFile)
        argDict["codeMapping"] = codeMapping

        argDict["newIndex"] = int(args.newIndex)

    # ----removeCodeTree-------------------------------------
    # Only arguments with default values needed, no validation is needed

    # ----moveCodeTree---------------------------------------
    if "moveCodeTree" in args.actions:
        if args.newIndex is None:
            parser.error("Action moveCodeTree needs" +
                         "--newIndex to be specified")
        try:
            argDict["newIndex"] = int(args.newIndex)
        except ValueError:
            log.error("newIndex must be an integer value")
            sys.exit(1)

    # ----extractByGeocode-----------------------------------
    if "extractByGeocode" in args.actions:
        if args.edbMapping is None:
            parser.error("Action extractByGeocode needs" +
                         "--edbMapping to be specified")
        log.info("Slicing edb by geocode")
        edbMappingFile = path.abspath(args.edbMapping)
        # Geocode has to be int
        edbMapping = parseMapping(edbMappingFile, keyType=int)
        argDict["edbMapping"] = edbMapping

    if 'scale' in args.actions:
        argDict['substance'] = subdb.substIndices[args.substance]
        argDict['factor'] = args.factor
        if args.factor is None:
            log.error("Must specify --factor")
            sys.exit(1)

        if args.substance is None:
            log.error("Must specify --substance")
            sys.exit(1)

    # Processing actions
    for action in args.actions:
        log.info("Running action: %s" % action)
        argDict = eval(action)(argDict)

    # Writes each processed grid to edb
    if loadGridData:
        for grd in argDict["grids"]:
            if not match(grd["NAME"], argDict["filters"]) or grd.hasSubgrp():
                continue
            else:
                log.debug(
                    "Wrote grid %s to edb: %s,  user: %s" % (
                        grd.par["NAME"].val, grd.edb.name, grd.user)
                )
                grd.load()
    else:
        for grd in argDict["grids"]:
            if not match(grd["NAME"], argDict["filters"]) or grd.hasSubgrp():
                continue
            else:
                log.debug(
                    "Wrote .asc-file for grid %s to edb: %s,  user: %s" % (
                        grd.par["NAME"].val, grd.edb.name, grd.user)
                )
                grd.writeAsc(grd.getAscPath())

    # Writes each processed database to edb
    if argDict["sources"] is not None:
        with SourceStream(edb, mode="w") as stream:
            model_writer = ModelWriter(stream)
            for src in argDict["sources"]:
                log.debug('Wrote source %s' % src.NAME)
                model_writer.write(src)

    for dbName in ["roaddb", "subgrpdb", "svehdb", "vehdb"]:
        if argDict[dbName] is not None:
            argDict[dbName].write()
            log.info(
                "Wrote %s to edb: %s, user: %s" % (
                    dbName, edb.name, edb.user)
            )
Exemplo n.º 9
0
def main():
    #setting up parser
    parser = argparse.ArgumentParser(description=__doc__)
    utils.add_standard_command_options(parser)

    parser.add_argument(
        "-e","--edbs",
        action="store",dest="edbList",
        help="List of 'user/edb' pairs separated by :"
    )

    parser.add_argument(
        "-L","--labels",
        action="store",
        dest="labels",
        help="List of edb labels separated by :"
    )

    parser.add_argument(
        "-s","--substances",
        action="store",dest="substances",
        help="List of substance names separated by :"
    )
        
    parser.add_argument(
        "-t","--title",
        action="store",dest="title",
        help="Report title"
    )
    
    parser.add_argument(
        "-g","--gc-filter",
        action="store",dest="gcfilter",
        help="Filter on Geo codes, separated by :"
    )
    parser.add_argument(
        "-o","--outfile",
        action="store",dest="outfile",
        help="Output filename"
    )
    
    parser.add_argument(
        "-f","--format",
        action="store",dest="format",
        help="Output in 'excel','csv' or 'raw' " +
        "(Excel-format requires xlwt python module)"
    )
    
    parser.add_argument("--substMapping",
                      action="store",dest="substMapping",
                      help="File with tab separated mappings of substance names")

    parser.add_argument("--markerTable",
                      action="store",dest="markerTable",
                      help="Table of codes to be formatted and commented")

    parser.add_argument(
        "macro",metavar="MACRO",
        help="A macro to use"
    )

    args = parser.parse_args()

    if args.markerTable is not None:
        keys=["Year","GC","AC","note_1","note_2"]
        markerTable = DataTable(keys=keys,desc=[{"id":"Year","type":str},{"id":"GC","type":str},{"id":"AC","type":str},{"id":"note_1","type":str},{"id":"note_2","type":str}])
        markerTable.read(args.markerTable)
    else:
        markerTable=None
        
    substMapping={}
    if args.substMapping is not None:
        with codecs.open(args.substMapping,encoding="HP Roman8",mode="r") as f:
            for line in f:
                oldName,newName = line.split(":")
                substMapping[oldName.strip()]=newName.strip()

    dmn = Domain()
    if args.gcfilter is not None:
        args.gcfilter = args.gcfilter.split(":")

    # Read original macro
    with codecs.open(args.macro, encoding="HP Roman8", mode="r") as f:
        originalContent = f.read()
    
    # Create a tmp copy of the macro, write content from the original macro 
    macroTempFile = tempfile.NamedTemporaryFile(
        suffix=".sedb",
        dir=dmn.tmpDir()
    )
    tmpMacro = codecs.open(
        macroTempFile.name,
        encoding="HP Roman8",mode="w"
    )     
    tmpMacro.write(originalContent)
    tmpMacro.flush()

    # Create a ControlFile obj to simplify reading and modifying macro
    macro = ControlFile(macroTempFile.name, removeComments=False)
    ebd = macro.findString("edb.edb:")
    user = macro.findString("edb.user:"******"edb.reportgeocode:")[-1])
    acIndex = int(macro.findString("edb.reportactcode:")[-1])

    if args.edbList is None:
        ebds = [[user, edb]]
    else:
        edbs = args.edbList.split(":")
        edbs = [e.split("/") for e in edbs]

    nedbs = len(edbs)

    if args.labels is None:
        labels = ["No label"] * len(edbs)
    else:
        labels = args.labels.split(":")
        if len(labels) != nedbs:
            log.error("Number of labels specified should match number of edb:s")
            sys.exit(1)

    if args.substances is None:
        log.error("Need to specify substances")
        sys.exit(1)
    else:
        substances = args.substances.split(":")

    if args.format not in ('excel','csv','raw'):
        log.error(
            "Invalid format specifier : %s, should be one of 'excel'" +
            ", 'csv' or 'raw'" %args.format
        )
        sys.exit(1)
    elif args.format == "excel":
        try:
            import xlwt
        except:
            log.error(
                "trendReport.py requires python module xlwt to write excel-files")
            sys.exit(1)

    # first edb
#     import pdb; pdb.set_trace()
    edb = Edb(dmn, edbs[0][0], edbs[0][1])    
    # assume same code definitions in all edbs to be processed, read from first
    rsrc = edb.rsrc
    
    nrsubstances = len(substances)
    unitIndex = int(macro.findString("UNIT        :"))
    units = rsrc.search[unitIndex]    

    subdb = Subdb(edb)
    subdb.read()    
    
    #decode input title using stdin encoding
    title=args.title.decode(sys.stdin.encoding)

    rawOutput = ""
    rawMeta = u"name: %s\nnrmacros: %i\nnrsub: %i\nunit: %s\n" %(
        title, nedbs, nrsubstances, units)
        
    emissions = []
    for ind, edbUser in enumerate(edbs):
        label = labels[ind]
        userName = edbUser[0]
        edbName = edbUser[1]

        macro.setParam("edb.user:"******"edb.edb:", edbName)
        macro.setParam("USER          :"******"EDB           :", edbName)

        rawMeta += "macro.%i.edbuser: %s\n" %(ind, userName)
        rawMeta += "macro.%i.edbname: %s\n" %(ind, edbName)
        rawMeta += "macro.%i.desc: %s\n" %(ind, label)

        for subst in substances:
            log.info(
                "User: %s, edb: %s, substance %s" %(
                    userName, edbName, subst)
            )
            substanceIndex = subdb.substIndex(subst)
            macro.setParam("ELEMENT    :", substanceIndex)
            macro.write()
            command = "xrepedb -i " + macro.name
            log.info("Running xrepedb for substance %s" % subst)
#             import pdb; pdb.set_trace()
            (returnCode, errMsg, outMsg) = utilities.execute(command)          
 
            if returnCode != 0:
                log.error("Could not run %s\nstdout: %s\nstderr:%s" %(
                        command,outMsg,errMsg))
                sys.exit(1)
            
            if len(outMsg) < 10:
                log.error("Invalid output from xrepedb: %s" % outMsg)
                sys.exit(1)

            rawOutput += "#MACRO %i \"%s\" \"%s\"\n" % (ind, subst, labels[ind])
            rawOutput += outMsg

            lines = outMsg.split("\n")[:-1]
            for lineInd, line in enumerate(lines):
                vals = line.split()
                ac = vals[1].split(".")
                gc = vals[3].split(".")
                
                if len(ac) == 1:
                    if ac[0] == "<all>":
                        acLev1 = "alla"
                    else:
                        acLev1 = ac[0]
                    acLev2 = "alla"
                else:
                    acLev1 = ac[0]
                    acLev2 = ac[1]
                    
                if len(gc) == 1:
                    if gc[0] == "<all>":
                        gcLev1 = "alla"
                    else:
                        gcLev1 = gc[0]
                    gcLev2 = "alla"
                else:
                    gcLev1 = gc[0]
                    gcLev2 = gc[1]

                emis = float(vals[4])


                if acLev1 == "alla":
                    acLev1Name = "alla"
                    acLev2Name = "alla"
                else:
                    node = rsrc.ac[acIndex - 1].root.find(acLev1)
                    acLev1Name = node.attrib["name"]
                    if acLev2 == "alla":
                        acLev2Name = "alla"
                    else:
                        node = rsrc.ac[acIndex-1].root.find(
                            acLev1 + "/" + acLev2
                        )
                        acLev2Name = node.attrib["name"]                


                if gcLev1 == "alla":
                    gcLev1Name = "alla"
                    gcLev2Name = "alla"
                else:
                    node = rsrc.gc[gcIndex-1].root.find(gcLev1)
                    gcLev1Name = node.attrib["name"]
                    if gcLev2 == "alla":
                        gcLev2Name = "alla"
                    else:
                        node = rsrc.gc[gcIndex - 1].root.find(
                            gcLev1 + "/" + gcLev2
                        )
                        gcLev2Name = node.attrib["name"]                


                if args.gcfilter is not None:
                    if gc[0] not in args.gcfilter:
#                     if args.gcfilter != gcLev1:
                        continue
                emissions.append({"label": label,
                                  "substance": subst,
                                  "ac": '.'.join(ac),
                                  "gc": '.'.join(gc),
                                  "gcLev1": gcLev1Name,
                                  "gcLev2": gcLev2Name,
                                  "acLev1": acLev1Name,
                                  "acLev2": acLev2Name,
                                  "acLev1Code": acLev1,
                                  "acLev2Code": acLev2,
                                  "val": emis,
                                  "edbIndex": ind})


    
    #Close tempfile to automatically remove it
    tmpMacro.close()

    if args.format == "raw":
        outfile = codecs.open(args.outfile,"w","HP Roman8")
        outfile.write(rawMeta)
        outfile.write(rawOutput)
        outfile.close()
    elif args.format == "csv":
        outfile = open(args.outfile,"w")
        desc = [
            {'id': 'gc', 'type': unicode},
            {'id': 'ac', 'type': unicode},
            {'id': 'label', 'type': unicode},
            {'id': 'user', 'type': unicode},
            {'id': 'edb', 'type': unicode}
            ]
        for subst in substances:
            desc.append({'id': subst, 'type': float})

        keys = ['gc', 'ac', 'label']
    
        table = DataTable(desc=desc, keys=keys)

        log.info("Adding emissions to csv-table")
        for emis in emissions:
            row = [None] * len(desc)
            user = edbs[emis['edbIndex']][0]
            edb = edbs[emis['edbIndex']][1]
            row[table.colIndex['gc']] = emis['gc']
            row[table.colIndex['ac']] = emis['ac']
            row[table.colIndex['label']] = emis['label']
            row[table.colIndex['user']] = user
            row[table.colIndex['edb']] = edb
            row[table.colIndex[emis['substance']]] = emis['val']

            # data is appended to the correct row, or a new row is added if the
            # table keys do not match any existing row
            log.debug(
                "Adding row for substance %s, gc %s, ac %s" %(
                    emis['substance'],
                    emis['gc'],
                    emis['ac'])
            )
            table.addRow(row, append=True)

        table.write(outfile)
        outfile.close()

    else:
        # Create style objects for excel output        
        header1Style = xlwt.easyxf(
            'font: name Times New Roman,color-index black, bold on',
            num_format_str='0.000E+00'
        )

        markerStyle1 = xlwt.easyxf(
            'font: name Times New Roman,color-index red, bold off, italic on',
            num_format_str='0.000E+00')

        markerStyle2 = xlwt.easyxf(
            'font: name Times New Roman,color-index orange, bold off, italic on',
            num_format_str='0.000E+00')

        normalStyle = xlwt.easyxf(
            'font: name Times New Roman,color-index black, bold off',
            num_format_str='0.000E+00'
        )                       

        excelBook = xlwt.Workbook()

        # Creating info sheet
        infoWs = excelBook.add_sheet("Info")
        infoWs.col(0).width = 256*20
        infoWs.col(1).width = 256*25
        infoWs.col(2).width = 256*20
        infoWs.col(3).width = 256*200

        infoWs.write(0,0,u"Rapportnamn:",header1Style)
        infoWs.write(0,1,title,header1Style)
        infoWs.write(1,0,u"Beskrivning av dataunderlaget",header1Style)
        infoWs.write(3,0,u"Makron (specificerar utsökningar ur databasen)",header1Style)
        infoWs.write(4,0,u"Etikett",header1Style)
        infoWs.write(4,1,u"Ägare till EDB",header1Style)
        infoWs.write(4,2,u"EDB (emissiondatabas)",header1Style)
        infoWs.write(4,3,u"Beskrivning",header1Style)

        for i,edbUser in enumerate(edbs):
            userName=edbUser[0]
            edbName=edbUser[1]
            label=labels[i]
            infoWs.write(5+i,0,label)
            infoWs.write(5+i,1,userName)
            infoWs.write(5+i,2,edbName)
            #reading edb description file (if it exists)
            edb=Edb(dmn,userName,edbName)
            infoWs.write(5+i,3,edb.desc().replace("\n"," "))

        #split substances in green house gases and air quality related
        ghgList=[s for s in substances if s in ghgs]        
        aqList=[s for s in substances if s not in ghgs]

        #Write air quality headers
        firstRow=4
        #Add two rows for marker comments
        if markerTable is not None:
            firstRow+=2
        if len(aqList)>0:
            aqWs = excelBook.add_sheet(u"Luftföroreningar")
            aqWs.col(0).width = 256*25
            aqWs.col(1).width = 256*30
            aqWs.col(2).width = 256*20
            aqWs.col(3).width = 256*15
            for col in range(nrsubstances*nedbs):
                aqWs.col(col+4).width=256*15

            aqWs.write(0,0,u"Rapportnamn:",header1Style)
            aqWs.write(0,1,title,header1Style)
            aqWs.write(1,0,u"Emissioner av luftföroreningar",header1Style)
            aqWs.write(1,1,u"Enhet: "+units,header1Style)
            if markerTable is not None:
                aqWs.write(2,0,u"OBS! Röd kursiv text anger osäkra värden p.g.a. att en stor del av emissionen är fördelad med schabloner inom kommungruppen. Granska underkategorin \"Energiförsörjning via el-värmeverk samt inom industrin\" för att se eventuella misstänkta värden.",markerStyle1)
                aqWs.write(3,0,u"OBS! Orange kursiv text anger osäkra värden p.g.a. att trenden varierar kraftigt och eventuellt felaktigt, ytterligare verifiering krävs. Granska underkategorin \"Energiförsörjning via el-värmeverk samt inom industrin\" för att se eventuella misstänkta värden.",markerStyle2)

            aqWs.write(firstRow,0,"Huvudsektor",header1Style)
            aqWs.write(firstRow,1,"Undersektor",header1Style)
            aqWs.write(firstRow,2,u"Län",header1Style)
            aqWs.write(firstRow,3,"Kommun",header1Style)

        #Write ghg headers
        if len(ghgList)>0:
            ghgWs = excelBook.add_sheet(u"Växthusgaser")
            ghgWs.col(0).width = 256*25
            ghgWs.col(1).width = 256*30
            ghgWs.col(2).width = 256*20
            ghgWs.col(3).width = 256*15
            for col in range(nrsubstances*nedbs):
                ghgWs.col(col+4).width=256*15

            ghgWs.write(0,0,u"Rapportnamn:",header1Style)
            ghgWs.write(0,1,title,header1Style)
            ghgWs.write(1,0,u"Emissioner av Växthusgaser",header1Style)
            ghgWs.write(2,0,u"CO2-ekv. efter ämnesnamn innebär att emissionen är uttryckt i CO2-ekvivalenter",header1Style)
            if markerTable is not None:
                ghgWs.write(3,0,u"OBS! Röd kursiv text anger osäkra värden p.g.a. att en stor del av emissionen är fördelad med schabloner inom kommungruppen.  Granska underkategorin \"Energiförsörjning via el-värmeverk samt inom industrin\" för att se eventuella misstänkta värden.",markerStyle1)
                ghgWs.write(4,0,u"OBS! Orange kursiv text anger osäkra värden p.g.a. att trenden varierar kraftigt och eventuellt felaktigt, ytterligare verifiering krävs.  Granska underkategorin \"Energiförsörjning via el-värmeverk samt inom industrin\" för att se eventuella misstänkta värden.",markerStyle2)

            ghgWs.write(1,1,u"Enhet: "+units,header1Style)
            ghgWs.write(firstRow,0,"Huvudsektor",header1Style)
            ghgWs.write(firstRow,1,"Undersektor",header1Style)
            ghgWs.write(firstRow,2,u"Län",header1Style)
            ghgWs.write(firstRow,3,"Kommun",header1Style)

        def getColInd(nmacros, substances,macroInd,subst):
            #gets the column index in excel file
            sInd=substances.index(subst)

            #Including extra columns to write CO2-equivalents
            nSubstWithCO2equivalents=0
            for s in substances[:sInd+1]:
                if s in doubleColumns:
                    nSubstWithCO2equivalents+=1                       

            return 4 + macroInd+sInd*nmacros+nSubstWithCO2equivalents*(macroInd+1)

        #write macro labels and substance headers for air quality sheet
        for sInd,subst in enumerate(aqList):
            for i,edbUser in enumerate(edbs):
                col=getColInd(nedbs,aqList,i,subst)
                aqWs.write(firstRow-1,col,labels[i],header1Style)
                #If a substance name is given in mapping this is used, otherwise
                #The substance bname from the airviro substance list is used
                aqWs.write(firstRow,col,substMapping.get(subst,subst),header1Style)

        #write macro labels and substance headers for ghg sheet
        for sInd,subst in enumerate(ghgList):
            for i,edbUser in enumerate(edbs):
                col=getColInd(nedbs,ghgList,i,subst)

                #If CO2-equivalents are calculated, an extra column is needed
                if subst in doubleColumns:
                    ghgWs.write(firstRow-1,col-1,labels[i],header1Style)
                ghgWs.write(firstRow-1,col,labels[i],header1Style)

                #If CO2-equivalents are calculated, an extra column is needed
                if subst in doubleColumns:
                    #debug statement
                    #print "writing subst %s in col %i and %i" %(subst,col-1,col) 
                    ghgWs.write(firstRow,col-1,substMapping.get(subst,subst),header1Style)
                    ghgWs.write(firstRow,col,substMapping.get(subst,subst)+"CO2-ekv.",header1Style)
                elif subst in storedAsCO2equivalents:
                    #debug statement
                    #print "writing subst %s in col %i" %(subst,col) 
                    ghgWs.write(firstRow,col,substMapping.get(subst,subst)+"CO2-ekv.",header1Style)
                else:
                    #debug statement
                    #print "writing subst %s in col %i" %(subst,col) 
                    ghgWs.write(firstRow,col,substMapping.get(subst,subst),header1Style)


        #looping over all emissions, writing them to the correct column and row
        ghgRow=[]
        aqRow=[]
        for m in range(nedbs*nrsubstances+4+3*nedbs):
            ghgRow.append(firstRow+1)
        for m in range(nedbs*nrsubstances+4):
            aqRow.append(firstRow+1)

        for emis in emissions:
            subst = emis["substance"]
            emisVal=emis["val"]
            edbInd=emis["edbIndex"]

            #Check if gc, ac and year can be found in the error list
            #debugging marker style
            if markerTable is not None:
                TableRowInd=markerTable.rowIndices([labels[edbInd],
                                                    emis["gc"],
                                                    emis["ac"],
                                                    "ja","*"])
                if len(TableRowInd) >0:
                    valueStyle=markerStyle1
                else:
                    TableRowInd=markerTable.rowIndices([labels[edbInd],
                                                        emis["gc"],
                                                        emis["ac"],
                                                        "*","ja"])
                    if len(TableRowInd)>0:
                        valueStyle=markerStyle2
                    else:
                        valueStyle=normalStyle
            else:
                valueStyle=normalStyle



            if subst in ghgList:
                col=getColInd(nedbs,ghgList,edbInd,subst)
                row=ghgRow[col]
                if ghgRow[0]<=+row:
                    ghgWs.write(row,0,emis["acLev1"],normalStyle)
                    ghgWs.write(row,1,emis["acLev2"],normalStyle)
                    ghgWs.write(row,2,emis["gcLev1"],normalStyle)
                    ghgWs.write(row,3,emis["gcLev2"],normalStyle)
                    ghgRow[0]+=1
                    #converts the emission to CO2-ekquivalents
                if subst in doubleColumns:
                    ghgWs.write(row,col-1,float(emisVal),valueStyle)
                    ghgWs.write(row,col,float(emisVal)*float(ekvFactors[subst]),valueStyle)
                else:
                    ghgWs.write(row,col,float(emisVal),valueStyle)

                ghgRow[col]+=1
            else:
                col=getColInd(nedbs,aqList,edbInd,subst)
                row=aqRow[col]
                if aqRow[0]<=+row:
                    aqWs.write(row,0,emis["acLev1"],normalStyle)
                    aqWs.write(row,1,emis["acLev2"],normalStyle)
                    aqWs.write(row,2,emis["gcLev1"],normalStyle)
                    aqWs.write(row,3,emis["gcLev2"],normalStyle)
                    aqRow[0]+=1
                aqWs.write(row,col,float(emisVal),valueStyle)
                aqRow[col]+=1

        excelBook.save(args.outfile)
    log.info("Finished!")
Exemplo n.º 10
0
def main():

   # Parse command line arguments
    parser = argparse.ArgumentParser(description=__doc__)
    utils.add_standard_command_options(parser)


    parser.add_argument("controlfile", metavar='CONTROLFILE',
                       action="store",
                      help="Controlfile for topdown processing")
    
    parser.add_argument("-t", "--template", metavar='TEMPLATEFILE',
                        action="store",dest="cf",default=None,
                        help="Generate default controlfile")

    args = parser.parse_args()


    if args.cf is not None:
        generateCf(args.cf)
        log.info("Wrote default controlfile")
        sys.exit(0)

    log.info("Starting topdown processing")
    # Opening controlfile
    cf = ControlFile(args.controlfile)
    dmn = Domain()

    log.info("Reading topdown table")
    tdTableName = cf.findExistingPath("topDownTable:")
    tdTable = DataTable()
    tdTable.keys.append("Code")
    tdTable.read(tdTableName,delimiter=";")

    log.info("Reading national totals table")
    natTotalTableName = cf.findExistingPath("nationalTotalTable:")
    natTable = DataTable(desc=[{"id": "Code", "type":unicode},
                               {"id": "description", "type":unicode}])
    natTable.keys.append("Code")
    natTable.read(natTotalTableName, units=True, defaultType=str)
    notationKeys = ["NE", "NO", "NA", "IE"]

    
    
    log.debug("Remove notation keys from national totals table")
    for row in natTable.data:
        for i in range(len(row)):
            if row[i] in notationKeys:
                row[i] = None

    log.debug("Convert all emission columns in national totals to float")
    for colId in natTable.listIds():
        if colId not in ["Code","description"]:
            natTable.convertCol(colId,float)

    log.debug("Store units from national totals for each substance in dict")
    natUnits={}
    for col in natTable.desc:
        if col.get("units",None)!=None:
            natUnits[col["id"]]=col["units"]
        
    log.debug("Read remaining data from control file")
    bottomupEdbName = cf.findString("bottomUpEdb:")
    topDownEdbName = cf.findString("topDownEdb:")
    emissionsEdbName = cf.findString("emissionsEdb:")
    userName = cf.findString("user:"******"year:")

    #initialize edb objects
    buEdb = Edb(dmn,userName,bottomupEdbName)
    tdEdb = Edb(dmn,userName,topDownEdbName)
    eEdb = Edb(dmn,userName,emissionsEdbName)
    log.info("Reading/preparing EDB:s")
    
    log.info("Reading subdb")
    subdb = Subdb(eEdb)
    subdb.read()

    log.info("Reading subgrpdb")
    subgrpdb = SubgrpStream(buEdb)
    subgrpdb.read()

    log.info("Reading facilitydb")
    facilityIn = FacilityStream(buEdb)

    log.info("Reading companydb")
    companyIn = CompanyStream(buEdb)
    
    facilityOut = FacilityStream(eEdb,mode="w")
    companyOut = CompanyStream(eEdb,mode="w")

    log.info("Writing company db to result edb")
    companyOut.write(companyIn.read())

    log.info("Writing facility db to result edb")
    facilityOut.write(facilityIn.read())

    if not buEdb.exists():
        log.error("Edb " + buEdb.name + " does not exist for user " + userName +
                  " in domain " + dmn.name)
        sys.exit(1)
    if not tdEdb.exists():
        log.error("Edb " + tdEdb.name + " does not exist for user " + userName +
                  " in domain " + dmn.name)
        sys.exit(1)
    if not eEdb.exists():
        log.error("Edb " + eEdb.name + " does not exist for user " + userName +
                  " in domain " + dmn.name)
        sys.exit(1)

    keys = tdEdb.listGrids()
    msg = "%i keys found in edb: %s" % (len(keys), tdEdb.name)
    log.info(msg)

    # sourcedb from bottom-up edb
    with SourceStream(buEdb, mode='rb') as source_instream:
        source_reader = ModelReader(source_instream)
        bu_sources = list(source_reader)

    log.info(
        "%i point sources found in edb: %s" % (
            len(bu_sources),
            buEdb.name)
    )


    # Empty sourcedb of the result edb
    if cf.findBoolean("emptyEmissionSourcedb:"):
        eEdb.empty_sourcedb()
        e_sources = []
        log.info("Removed point sources from edb: %s" % (eEdb.name))
    else:
        # sourcedb from emission edb (result edb)
        with SourceStream(eEdb, mode='rb') as source_instream:
            source_reader = ModelReader(source_instream)
            e_sources = list(source_reader)

        msg = "%i point sources found in edb: %s" % (len(e_sources), eEdb.name)
        log.info(msg)

    if not path.exists(eEdb.rsrcPath()):
        log.error("No edb.rsrc exists for emission edb")
        sys.exit()
    else:
        rsrc = Rsrc(eEdb.rsrcPath())
    acIndex = cf.findInt("acIndex:")
    codeDepth = rsrc.ac[acIndex-1].depth

    substances = cf.findStringList("substances:")
        
    for subst in substances:
        if subst not in subdb.substIndices:
            log.error("Substance: " + subst + " not in Airviro substance list")
            sys.exit()
    
    # Initialize trace for debug and additional logging
    if cf.findBoolean("trace:") == True:
        log.info("Initializing trace for detailed logging")
        trace = TraceDef(
            active=True,
            substances=cf.findStringList("trace.substances:"),
            logfile=cf.findString("trace.logfile:"),
            regdefgc=cf.findIntList("trace.regdef.gc:",
                                    optional=True,
                                    default=None),
            gcDefRaster=cf.findExistingPath("trace.gcraster:")
        )                               
    else:
        trace = TraceDef(active=False)

    log.info("Initializing result table")
    resTablePath = cf.findString("resTable:")
    resTable = DataTable(desc=[{"id": "Code", "type": unicode}])
    resTable.keys.append("Code")
    for subst in substances:
        resTable.addCol({"id": subst, "type": float, "unit": "%"})
        
    # Create emission grid template (with geocodes)
    log.info("Reading emission grid template")
    eGridTemplatePath = cf.findExistingPath("emisGridTemplatePath:")
    eGridTemplate = Egrid(eEdb,"name")
    if eGridTemplatePath[-4:] == ".asc":
        eGridTemplatePath=eGridTemplatePath[:-4]
    eGridTemplate.readData(eGridTemplatePath)
    eGridTemplate.substances = {}
    eGridTemplate.par["SUBSTANCE"].val = []
    dd = {"key": None,
          "regstat": None,
          "regdef": None,
          "bu_sources": bu_sources,
          "psIndices": [],
          "units": natUnits,
          "rsrc": rsrc,
          "subdb": subdb,
          "trace": trace,
          "subgrpdb": subgrpdb
          }    

    # Process all rows in the topdown table
    for row in tdTable.data:
        code = row[tdTable.colIndex["Code"]]
        active = row[tdTable.colIndex["Active"]]
        statType = row[tdTable.colIndex["Stat_type"]]
        if active == "no":
            continue
        log.info("Code: "+code)
        
        distributed=False

        # Add '-' to the code to reach max length (fix for a GUI bug)
        airviroCode = code
#         while len(airviroCode.split(".")) < codeDepth:
#             airviroCode += ".-"
            
        tdrow = tdTable.data[tdTable.rowIndex([code])]
        nrow = natTable.data[natTable.rowIndex([code])]

        # Create a resTable row to fill with data
        resrow = [None] * resTable.ncols
        resrow[0] = code

        # Check if national totals are non-zero
        nonZero = False
        for val in nrow:
            if val != None:
                if val > 0:
                    nonZero = True
                    break

        # Filter out indices for pointsources with the current ac
        # Also including sources coded with sub-codes
        # This allows to estimate top-down emissions on a higher code-level
        psIndices = []
        for i, ps in enumerate(bu_sources):
            codeMatch = False
            
            for emis in ps.EMISSION:                
                # It is assumed that the first code is used while processing topdown
                ac = emis.ACTCODE[0]
                if ac[-1] == ".":
                    ac=ac[:-1]
#                 if ac[:len(code)] == code:                    
                if ac == code:                    
                    codeMatch = True
                    break

            if not codeMatch:
                for emis in ps.SUBGRP:                
                    # It is assumed that the first code is used while processing topdown
                    ac = emis.ACTCODE[0]
                    if ac[:len(code)] == code:                    
                        codeMatch = True
                        break

            if codeMatch:
                psIndices.append(i)

        dd["psIndices"] = psIndices

        keyName = row[tdTable.colIndex["Key"]]

        #If no distribution key specified and no ps in bottom-up edb - cont.
        if keyName is None and psIndices == []:
            log.debug("No key and no point sources found for code: %s, skipping..." % code)
            resTable.addRow(resrow)
            continue

        if psIndices!=[]:
            msg = "--Found %i pointsources" % len(psIndices)
            log.info(msg)

        if keyName is not None:
            if keyName not in keys:
                log.error("No such key: " + keyName)
                sys.exit()

            msg = "--Key: %s" % keyName
            log.info(msg)
            keyGrid = Egrid(tdEdb, keyName)
            keyGrid.readData()
            log.debug("Read key: " + keyName + " from topdownEdb")

            # create emission grid to store distributed emissions
            eGrid = deepcopy(eGridTemplate)
            eGrid.name = code.replace(".", "_")
            eGrid.par["NAME"].val = code
            eGrid.par["INFO2"].val = "Distribution key: " + keyGrid.par["NAME"].val
            eGrid.par["ACTIVITYCODE"].val = [airviroCode.split(".")]

        regstatName = row[tdTable.colIndex["Regstat"]]
        regdefName = row[tdTable.colIndex["Regdef"]]
                
        if regstatName is not None:
            if regdefName is None:
                log.error("No region definition given for regional statistics: " +
                          regstatName)
                sys.exit(1)
            regstatPath = path.join(dmn.domainPath(), "topdown", "regstat", regstatName)
            regstat = DataTable()
            log.info("regstatPath: "+regstatPath)
            regstat.read(regstatPath, units=True, defaultType=float, delimiter=";")
            if not "Geocode" in regstat.listIds():
                log.error("No Geocode column found in regstat")
                sys.exit(1)
            regstat.convertCol("Geocode", int)
            regstat.keys.append("Geocode")  # Making Geocode the primary key

            # create list of unique geo codes
            geocodes = [row[regstat.colIndex["Geocode"]] for row in regstat.data]
            geocodes = unique(geocodes)


            for colId in regstat.listIds():
                if colId.lower() == "year":
                    rows = []
                    regstat.convertCol(colId, int)
                    # Make it possible to accumulate year
                    regstat.setKeys(regstat.keys + [colId])
            
                    # Calculates the total emission for each geocode
                    # in case there are multiple rows for different fuels etc
                    colsToSum = regstat.listIds()
                    colsToSum.remove(colId)
                    colsToSum.remove("Geocode")
                    for gc in geocodes:
                        # sums all numeric values in colsToSum for
                        # rows matching row id [gc,year]
                        #returns an accumulated row and appends it to rows
                        rowId = regstat.dict2RowId({"Geocode": gc, colId: year})
                        rows.append(regstat.accumulate(rowId, "sum", colsToSum))
                    regstat.data = rows  # replace original rows with accumulated rows
                    regstat.keys.remove(colId)
                    break
                
#             dd["regstat"] = regstat
            regdef = Raster()
            regdefPath = path.join(dmn.domainPath(), "topdown", "regdef", regdefName)
            regdef.read(regdefPath)

            dd["regstat"] = regstat
            dd["regdef"] = regdef
        else:
            dd["regstat"] = None
            dd["regdef"] = None

        if dd["regstat"] is not None and len(bu_sources) > 0 and statType == "fixed":
            log.info("--Regionalizing pointsources")
            dd = regionalizePS(dd, code)

        if keyName is not None and nonZero:
            regionalizedDefault = False
            # Spatial distribution of emissions
            for subst in substances:
                
                sInd = subdb.substIndices[subst]
                toUnit = dd["units"][subst] + "/year"
                ntot = nrow[natTable.colIndex[subst]]                
                pstot = 0
                for i in dd["psIndices"]:
                    source = dd["bu_sources"][i]
                    # TODO: should give reference to subgrps to include emis from them
                    pstot += source.get_emis(
                        sInd,
                        toUnit,
                        eEdb,
                        actcodes=[code]
                    )

                if ntot is None or ntot == 0:
                    if pstot > 0:
                        # 9999 is used as marker for no national total 
                        resrow[resTable.colIndex[subst]] = 9999.0
                        log.warning(
                            "Nattot is 0 but ps tot is: %f %s" % (pstot, toUnit))
                    continue
                
                nrest = ntot - pstot

                resrow[resTable.colIndex[subst]] = 100.0
            
                if abs(nrest / ntot) < 0.0001:
                    nrest = 0
                    log.info(
                        "--Rest is < 0.01 % of national total, rounded to zero"
                    )
                    continue
                elif nrest < 0:
                    log.warning(
                        "--National rest is below zero, %4.2f proc for %s" % (
                            -1 * nrest / ntot * 100,
                             subst)
                    )
                    dd["trace"].write()
#                    continue
                log.info(
                    "---Substance: "+subst+
                    ", rest is: " + str(nrest) +
                    toUnit + " = " + str(nrest / ntot * 100.0) + "%"
                )
                
                try: 
                    keyRast = keyGrid.substances[sInd]
                except KeyError:
                    keyRast = keyGrid.substances[subdb.substIndices["all"]]
                    
                dd["key"] = keyRast
                if dd["regstat"] is not None:
                    if (subst not in regstat.colIndex and 
                        sInd not in keyGrid.substances and not regionalizedDefault):
                        dd = regionalizeKey(dd, subst, code)
                        regionalizedDefault = True                                    
                    else:
                        dd = regionalizeKey(dd, subst, code)
                    
                emisRast = distribute(dd["key"], nrest)
                emisRast = emisRast * unitConvFac(toUnit, "ton/year")
                eGrid.addData(emisRast, dd["subdb"].substIndices[subst])
                distributed = True

        else:
            # resTable is filled
            # In case all national totals are zero but there are ps
            for subst in substances:
                sInd = dd["subdb"].substIndices[subst]
                toUnit = dd["units"][subst] + "/year"
                ntot = nrow[natTable.colIndex[subst]]               
                pstot = 0
                for i in dd["psIndices"]:
                    source = dd["bu_sources"][i]
                    # subgrps are not used!
                    pstot += source.get_emis(sInd, toUnit, buEdb,
                                             actcodes=[code])

                if ntot!=0 and ntot is not None:
                    resrow[resTable.colIndex[subst]] = pstot / ntot * 100.0
                else:
                    resrow[resTable.colIndex[subst]] = -999.0

        if len(dd["psIndices"]) > 0:
            tmp_sources = (bu_sources[i] for i in dd["psIndices"])
            with SourceStream(eEdb, mode='wb') as out_source_stream:
                source_writer = ModelWriter(out_source_stream)
                for source in tmp_sources:
                    source_writer.write(source)
            log.debug("Wrote ps to emission edb")

        if distributed:
            eGrid.load()
            log.debug("Wrote emission grid to emission edb")    

        dd["trace"].write()
        resTable.addRow(resrow)

    resTableFile = open(resTablePath,"w")
    resTable.write(resTableFile)

    log.info("Finished topdown process")
Exemplo n.º 11
0
def main():
    # Parse command line arguments
    parser = argparse.ArgumentParser(description=__doc__)
    utils.add_standard_command_options(parser)

    parser.add_argument(
        '--info',
        dest='info',
        action='store_true',
        help='Print version of sqlite, spatialite, proj and geos')

    parser.add_argument(
        '-e',
        metavar='edb',
        dest='edb',
        help='name of edb to load',
    )

    parser.add_argument('-d',
                        action='store',
                        dest='db',
                        metavar='FILENAME',
                        help='Sqlite database file')

    parser.add_argument(
        '--buffer',
        action='store',
        type=float,
        dest='buffer',
        default=DEFAULTS['buffer'],
        help='Domain buffer width (meters), default=%(default)s')

    parser.add_argument('--flat-buffer',
                        action='store',
                        dest='buffer',
                        default=DEFAULTS['buffer_flat_fraction'],
                        help='Fraction of buffer that should be completely' +
                        ' flat, default=%(default)s')

    parser.add_argument(
        '--max-segment-len',
        action='store',
        dest='max_segment_len',
        default=5,
        help='Maximum length of triangulation edgesx, default=%(default)s')

    parser.add_argument('--height',
                        action='store',
                        dest='height',
                        default=DEFAULTS['domain_height'],
                        help='Domain height above ground, default=%(default)s')

    parser.add_argument('-s',
                        action='store',
                        type=int,
                        metavar='SUBST',
                        dest='substances',
                        nargs='+',
                        help='Substance indices to create emission data for')

    parser.add_argument('--scenario',
                        action='store',
                        type=int,
                        dest='scenario',
                        default=1,
                        help='Scenario index to create emission views for')

    parser.add_argument('-t',
                        '--terrain',
                        action='store',
                        dest='terrain',
                        help='Terrain raster file')

    parser.add_argument('-c',
                        '--case',
                        action='store',
                        dest='case',
                        help='Output case directory')

    parser.add_argument(
        '--no-roads',
        action='store_false',
        dest='roads',
        help='Do not generate road STLs or adapt ground stl to roads')

    parser.add_argument('--load-roads',
                        action='store_true',
                        dest='load_roads',
                        help='Load roads from edb and generate road polygons')

    parser.add_argument('--init-roads',
                        action='store_true',
                        dest='init_roads',
                        help='Create empty roads table in case db' +
                        ' before loading roads from edb')

    parser.add_argument('--init-domain',
                        action='store_true',
                        dest='init_domain',
                        help='Create empty domain table in case db')

    parser.add_argument('--init-structures',
                        action='store_true',
                        dest='init_structures',
                        help='Create empty structures table in case db')

    parser.add_argument('--init',
                        dest='initdb',
                        action='store_true',
                        help='Create new or overwrite existing case db')

    parser.add_argument('--epsg',
                        dest='epsg',
                        type=int,
                        default=3006,
                        action='store',
                        help='EPSG to use when creating new case db')

    parser.add_argument('--emis-ts',
                        dest='emis_ts',
                        action='store_true',
                        help='Write emission timeseries to case')

    parser.add_argument('--begin',
                        dest='begin',
                        metavar='YYMMDDHH',
                        action='store',
                        type=arg2datetime,
                        help='First hour of emission timeseries')

    parser.add_argument('--end',
                        dest='end',
                        metavar='YYMMDDHH',
                        action='store',
                        type=arg2datetime,
                        help='First hour of emission timeseries')

    parser.add_argument(
        '--translate',
        action='store',
        nargs=3,
        type=float,
        dest='translate',
        help='Translate STL (default is origo in centroid of domain)')

    # parser.add_argument(
    #     '-s', '--sources',
    #     action='store_true', dest='sources',
    #     help='Generate emissions for sources'
    # )

    args = parser.parse_args()

    if args.info:
        con, cur = connect(":memory:")
        # testing library versions
        row = cur.execute(
            'SELECT sqlite_version(), spatialite_version()').next()
        msg = "> SQLite v%s Spatialite v%s" % (row[0], row[1])
        log.info(msg)
        sys.exit(0)

    if args.db is None:
        log.info('Connecting to in-memory db')
    else:
        log.info('Connecting to %s' % args.db)
    try:
        con, cur = connect(args.db or ':memory')
    except sqlite3.OperationalError as e:
        log.error(str(e))
        sys.exit(1)

    if args.initdb or not path.exists(args.db):
        log.info('Initializing case database %s...' % args.db)
        initdb(con, cur, args.epsg)
        sys.exit(0)

    epsg = get_epsg(con)

    if args.init_roads:
        drop_tables(con, cur, table_names=['roads'])
        create_roads_table(con, cur, epsg)

    if args.init_domain:
        drop_tables(con, cur, table_names=['domain'])
        create_domain_table(con, cur, epsg)

    if args.init_structures:
        drop_tables(con, cur, table_names=['structures'])
        create_structures_table(con, cur, epsg)

    if args.edb is not None:
        edb_con, edb_cur = connect(args.edb)
        edb_epsg = get_epsg(edb_con)

        if args.load_roads:
            load_roads_from_edb(con, cur, edb_con, edb_epsg)
            log.info('Updated roads from edb')
            sys.exit(0)

    case = Case(buffer=args.buffer)

    if args.case is not None:
        case.create_case(args.case)
Exemplo n.º 12
0
def main():
    # Parse command line arguments
    parser = argparse.ArgumentParser(description=__doc__)
    utils.add_standard_command_options(parser)

    parser.add_argument(
        '-i', '--infile',
        dest='infile', action='store',
        help='Input STL'
    )

    parser.add_argument(
        '-o', '--outfile',
        dest='outfile', action='store',
        help='Output STL'
    )

    parser.add_argument(
        '--from-epsg', dest='from_epsg', type=int,
        action='store',
        help='Current EPSG for STL'
    )

    parser.add_argument(
        '--to-epsg', dest='to_epsg', type=int,
        action='store',
        help='New EPSG for STL'
    )

    parser.add_argument(
        '--move', dest='move', metavar='C',
        type=float, nargs=3,
        action='store',
        help='Move STL by '
    )

    parser.add_argument(
        '--single', dest='single', action='store_true',
        help='Write each solid in a separate file'
    )

    parser.add_argument(
        '--decimals', dest='decimals', type=int,
        action='store',
        help='Number of decimals to use in STL'
    )

    parser.add_argument(
        '--format', dest='format', action='store', default='stl',
        help='Write to format (stl(default), shp)'
    )

    args = parser.parse_args()

    stl = MultiStl()
    stl.read(args.infile)

    if args.to_epsg is not None and args.from_epsg != args.to_epsg:
        if args.from_epsg is None:
            log.error('Must provide both --from-epsg and --to-epsg')
            sys.exit(1)
        stl.transform(args.from_epsg, args.to_epsg)
    if args.move is not None:
        stl.move(*args.move)

    if args.decimals is None:
        bbox = stl.bounding_box()
        dx = bbox[1][0] - bbox[0][0]
        dy = bbox[1][1] - bbox[0][1]
        dz = bbox[1][2] - bbox[0][2]
        decimals = abs(int(np.log(abs(min(dx, dy, dz) / 1.0e6))))
    else:
        decimals = args.decimals

    if args.format == 'stl':
        stl.write(args.outfile, decimals=decimals, single=args.single)
    elif args.format == 'shp':
        to_shp(stl, args.outfile)
    else:
        log.error('Unknown format specified')
Exemplo n.º 13
0
def main():

    # Parse command line arguments
    parser = argparse.ArgumentParser(description=__doc__)

    utils.add_standard_command_options(parser)

    parser.add_argument("-f",
                        "--format",
                        action="store",
                        dest="format",
                        default="ESRI Shapefile",
                        help="Format of road network")

    parser.add_argument("--inRoads",
                        required=True,
                        action="store",
                        dest="inRoads",
                        help="Input road network")

    parser.add_argument("--outRoads",
                        required=True,
                        action="store",
                        dest="outRoads",
                        help="Output road network")

    parser.add_argument("--buildings",
                        required=True,
                        action="store",
                        dest="buildings",
                        help="Input building roof contours (3D)")

    parser.add_argument("--topo",
                        required=True,
                        action="store",
                        dest="topo",
                        help="Input raster DEM")

    parser.add_argument("--split",
                        type=int,
                        action="store",
                        dest="split",
                        help="Threshold in changed road direction (degrees)" +
                        " for when to split road")

    args = parser.parse_args()

    if not path.exists(args.topo):
        log.error("Input raster does not exist")
        sys.exit(1)

    if not path.exists(args.buildings):
        log.error("Input building contours does not exist")
        sys.exit(1)
        return 1

    log.info("Reading DEM")
    topo = readGDAL(args.topo, bandIndex=1)[0]

    # Opening driver for road networks
    try:
        driver = ogr.GetDriverByName(args.format)
    except:
        log.error("Invalid format for road networks, check ogr documentation")
        sys.exit(1)

    if args.split is None:
        log.info("Do not split roads")
        splitLimit = None
    else:
        splitLimit = float(args.split)
        log.info("Split roads that change direction" +
                 " more than %f" % splitLimit)

    # extract extent from topo raster
    xmin = topo.xll
    ymin = topo.yll
    xmax = topo.xur()
    ymax = topo.yur()

    # Calculate dimensions of spatial index
    ncols = int((xmax - xmin) / CELLSIZE)
    nrows = int((ymax - ymin) / CELLSIZE)

    # Init spatial index of building contours
    spatInd = SpatialIndex(xmin, ymin, nrows, ncols, CELLSIZE)

    log.info("Reading and indexing building contours")
    # Read buildings and store using spatial indices
    spatInd.indexBuildingContours(args.buildings)

    # open road network shape-file
    log.info("Reading road network")
    inRoadFile = driver.Open(args.inRoads, update=0)

    if path.exists(args.outRoads):
        driver.DeleteDataSource(args.outRoads)

    outRoadFile = driver.CreateDataSource(args.outRoads)
    if inRoadFile is None:
        log.error("Could not open file with input road network")
        sys.exit(1)

    if outRoadFile is None:
        log.error("Could not open file with output road network")
        sys.exit(1)

    # Get layer definition and first feature of input road network
    inRoadLayer = inRoadFile.GetLayer()
    inRoadLayerDefn = inRoadLayer.GetLayerDefn()

    outRoadLayer = outRoadFile.CreateLayer("first_layer",
                                           geom_type=inRoadLayer.GetGeomType())

    # create fields on output road file
    for fieldInd in range(inRoadLayerDefn.GetFieldCount()):
        fieldDefn = inRoadLayerDefn.GetFieldDefn(fieldInd)
        outRoadLayer.CreateField(fieldDefn)

    outRoadLayerDefn = outRoadLayer.GetLayerDefn()
    fieldNames = [
        outRoadLayerDefn.GetFieldDefn(i).GetName()
        for i in range(outRoadLayerDefn.GetFieldCount())
    ]

    log.info("Adding attributes to road feature (if missing)")
    # Add attributes for street canyon geometry
    if "BHGT1" not in fieldNames:
        outRoadLayer.CreateField(ogr.FieldDefn("BHGT1", ogr.OFTInteger))
    if "BHGT2" not in fieldNames:
        outRoadLayer.CreateField(ogr.FieldDefn("BHGT2", ogr.OFTInteger))
    if "BHGT1W" not in fieldNames:
        outRoadLayer.CreateField(ogr.FieldDefn("BHGT1W", ogr.OFTInteger))
    if "BHGT2W" not in fieldNames:
        outRoadLayer.CreateField(ogr.FieldDefn("BHGT2W", ogr.OFTInteger))
    if "BANG1" not in fieldNames:
        outRoadLayer.CreateField(ogr.FieldDefn("BANG1", ogr.OFTInteger))
    if "BANG2" not in fieldNames:
        outRoadLayer.CreateField(ogr.FieldDefn("BANG2", ogr.OFTInteger))
    if "BDIST" not in fieldNames:
        outRoadLayer.CreateField(ogr.FieldDefn("BDIST", ogr.OFTInteger))
    if "BSECT" not in fieldNames:
        fieldDefn = ogr.FieldDefn("BSECT", ogr.OFTString)
        fieldDefn.SetWidth(40)
        outRoadLayer.CreateField(fieldDefn)
    if "BSECTW" not in fieldNames:
        fieldDefn = ogr.FieldDefn("BSECTW", ogr.OFTString)
        fieldDefn.SetWidth(40)
        outRoadLayer.CreateField(fieldDefn)

    fig1 = plt.figure(1)
    ax1 = plt.subplot(111)
    ax1.axis('equal')
    if PLOTIND > 0:
        spatInd.plot(ax1)

    roadInd = 0
    noGeom = 0
    nsplit = 0
    # get first road feature
    inRoadFeature = inRoadLayer.GetNextFeature()
    # Loop over all roads
    log.info("Finding nearest facades, setting heights...")
    pg = ProgressBar(inRoadLayer.GetFeatureCount(), sys.stdout)
    while inRoadFeature:
        pg.update(roadInd)
        outRoadFeatures = splitRoad(inRoadFeature, splitLimit,
                                    outRoadLayer.GetLayerDefn())
        if len(outRoadFeatures) > 1:
            log.debug("Raod split into %s parts" % len(outRoadFeatures))
        nsplit += len(outRoadFeatures) - 1
        for outRoadFeature in outRoadFeatures:
            intersections = []
            outRoadGeom = outRoadFeature.GetGeometryRef()
            road = Road(outRoadGeom)
            if outRoadGeom is None or \
               outRoadGeom.GetPointCount() == 0 or not spatInd.inside(road):
                noGeom += 1
                maxHeight1 = None
                maxHeight2 = None
                avgDist = None
                bAngle1 = None
                bAngle2 = None
                avgHeight1 = None
                avgHeight2 = None
            else:
                sumHeight1 = 0
                sumHeight2 = 0
                maxHeight1 = 0
                maxHeight2 = 0
                sumDist = 0
                # Define crossections along the road,
                # Defined by start and endpoints at both side of the road
                cs1List, cs2List = road.defineCrossSections()
                nCS = len(cs1List)
                log.debug("Defined %i cross sections" % nCS)
                # Check intersections with building contours for all cross-sections
                for csInd in range(nCS):
                    cs1 = cs1List[csInd]
                    cs2 = cs2List[csInd]
                    cs1MidPoint = cs1.P0 + 0.5 * (cs1.P1 - cs1.P0)
                    buildingSegments = spatInd.getBuildingSegments(
                        cs1MidPoint[0], cs1MidPoint[1])

                    log.debug("Calculating intersection")
                    if PLOTIND == roadInd and CSIND == csInd:
                        dist1, Pint1 = getIntersectingFacade(
                            ax1, cs1, buildingSegments, True)
                    else:
                        dist1, Pint1 = getIntersectingFacade(
                            ax1, cs1, buildingSegments, False)
                    if Pint1 is None:
                        log.debug("No intersection on side 1")
                        height1 = 0
                        dist1 = MAXDIST
                    else:
                        log.debug("Intersection1 in (%f, %f, %f)" %
                                  (Pint1[0], Pint1[1], Pint1[2]))
                        height1 = spatInd.getBuildingHeight(
                            Pint1[0], Pint1[1], Pint1[2], topo) + HEIGHTCORR
                        intersections.append(Pint1[:2])

                    if PLOTIND == roadInd and csInd == CSIND:
                        plotSegments(ax1,
                                     buildingSegments,
                                     color='red',
                                     width=2.0)
                        row, col = spatInd.getInd(cs1MidPoint[0],
                                                  cs1MidPoint[1])
                        spatInd.plotCell(ax1,
                                         row,
                                         col,
                                         color="purple",
                                         width=2.0)
                        plotSegments(ax1, [cs1List[csInd]],
                                     color="pink",
                                     style="-",
                                     width=1.0)
                        plt.draw()

                    cs2MidPoint = cs2.P0 + 0.5 * (cs2.P1 - cs2.P0)
                    buildingSegments = spatInd.getBuildingSegments(
                        cs2MidPoint[0], cs2MidPoint[1])

                    if PLOTIND == roadInd and csInd == CSIND:
                        plotSegments(ax1,
                                     buildingSegments,
                                     color='red',
                                     width=2.0)
                        row, col = spatInd.getInd(cs2MidPoint[0],
                                                  cs2MidPoint[1])
                        spatInd.plotCell(ax1,
                                         row,
                                         col,
                                         color="brown",
                                         width=2.0)
                        plotSegments(ax1, [cs2List[csInd]],
                                     color="red",
                                     style="-",
                                     width=1.0)
                        plt.draw()

                    log.debug("Calculating intersection")
                    if PLOTIND == roadInd and CSIND == csInd:
                        dist2, Pint2 = getIntersectingFacade(
                            ax1, cs2, buildingSegments, True)
                    else:
                        dist2, Pint2 = getIntersectingFacade(
                            ax1, cs2, buildingSegments, False)

                    if Pint2 is None:
                        log.debug("No intersection on side 2")
                        height2 = 0
                    else:
                        log.debug("Intersection2 in (%f, %f, %f)" %
                                  (Pint2[0], Pint2[1], Pint2[2]))
                        height2 = spatInd.getBuildingHeight(
                            Pint2[0], Pint2[1], Pint2[2], topo) + HEIGHTCORR
                        intersections.append(Pint2[:2])

                    sumHeight1 += height1
                    sumHeight2 += height2
                    sumDist += dist1 + dist2
                    maxHeight1 = int(max(height1, maxHeight1))
                    maxHeight2 = int(max(height2, maxHeight2))
                    if PLOTIND == roadInd and CSIND == csInd:
                        if Pint1 is not None:
                            ax1.text(Pint1[0], Pint1[1], "Distance=%f" % dist1)
                        if Pint2 is not None:
                            ax1.text(Pint2[0], Pint2[1], "Distance=%f" % dist2)

                avgHeight1 = int(sumHeight1 / float(nCS))
                avgHeight2 = int(sumHeight2 / float(nCS))
                # averaging over both sides of street
                # distance refers to between facades on opposite sides
                avgDist = int(round(sumDist / float(nCS)))
                bAngle1, bAngle2 = road.normalAngles()
                if PLOTIND > 0:
                    plotSegments(ax1,
                                 road.getSegments(),
                                 color='grey',
                                 width=0.3)
                if PLOTIND == roadInd:
                    plotSegments(ax1,
                                 road.getSegments(),
                                 color='black',
                                 width=2.0)
                    plotSegments(ax1,
                                 cs1List,
                                 color="green",
                                 style="--",
                                 width=0.5)
                    plotSegments(ax1,
                                 cs2List,
                                 color="green",
                                 style="--",
                                 width=0.5)

                    X = [intersect[0] for intersect in intersections]
                    Y = [intersect[1] for intersect in intersections]
                    if len(X) > 0:
                        ax1.plot(X, Y, "*")
                    plt.title("Road %i, cross-section %i" % (PLOTIND, CSIND))
                    plt.draw()

            # building height as list of sectors
            bsect = bheight2sect(avgHeight1, avgHeight2, bAngle1)
            bsectw = bheight2sect(maxHeight1, maxHeight2, bAngle1)

            outRoadFeature.SetField("BSECT", bsect)
            outRoadFeature.SetField("BSECTW", bsectw)
            outRoadFeature.SetField("BHGT1", avgHeight1)
            outRoadFeature.SetField("BHGT2", avgHeight2)
            outRoadFeature.SetField("BHGT1W", maxHeight1)
            outRoadFeature.SetField("BHGT2W", maxHeight2)
            outRoadFeature.SetField("BANG1", bAngle1)
            outRoadFeature.SetField("BANG2", bAngle2)
            outRoadFeature.SetField("BDIST", avgDist)

            outRoadLayer.CreateFeature(outRoadFeature)
            outRoadFeature.Destroy()
        inRoadFeature.Destroy()
        inRoadFeature = inRoadLayer.GetNextFeature()
        roadInd += 1

    inRoads = inRoadLayer.GetFeatureCount()
    outRoads = outRoadLayer.GetFeatureCount()
    # close datasource for building contours
    inRoadFile.Destroy()
    outRoadFile.Destroy()
    pg.finished()
    if PLOTIND > 0:
        plt.show()

    log.info("Read %i roads, wrote %i roads (created %i by splitting)" %
             (inRoads, outRoads, nsplit))

    if noGeom > 0:
        log.warning("Found %i roads without geometry" % noGeom)

    log.info("Finished")
Exemplo n.º 14
0
def main():
    # Parse command line arguments
    parser = argparse.ArgumentParser(description=__doc__)
    utils.add_standard_command_options(parser)

    parser.add_argument(
        '--info',
        dest='info', action='store_true',
        help='Print version of sqlite, spatialite, proj and geos'
    )

    parser.add_argument(
        '-e', metavar='edb',
        dest='edb',
        help='name of edb to load',
    )

    parser.add_argument(
        '-d',
        action='store', dest='db', metavar='FILENAME',
        help='Sqlite database file'
    )

    parser.add_argument(
        '--buffer',
        action='store', type=float, dest='buffer', default=DEFAULTS['buffer'],
        help='Domain buffer width (meters), default=%(default)s'
    )

    parser.add_argument(
        '--flat-buffer',
        action='store', dest='buffer',
        default=DEFAULTS['buffer_flat_fraction'],
        help='Fraction of buffer that should be completely' +
        ' flat, default=%(default)s'
    )

    parser.add_argument(
        '--max-segment-len',
        action='store', dest='max_segment_len',
        default=5,
        help='Maximum length of triangulation edgesx, default=%(default)s'
    )

    parser.add_argument(
        '--height',
        action='store', dest='height',
        default=DEFAULTS['domain_height'],
        help='Domain height above ground, default=%(default)s'
    )

    parser.add_argument(
        '-s',
        action='store', type=int, metavar='SUBST',
        dest='substances', nargs='+',
        help='Substance indices to create emission data for'
    )

    parser.add_argument(
        '--scenario', action='store', type=int,
        dest='scenario', default=1,
        help='Scenario index to create emission views for'
    )

    parser.add_argument(
        '-t', '--terrain',
        action='store', dest='terrain',
        help='Terrain raster file'
    )

    parser.add_argument(
        '-c', '--case',
        action='store', dest='case',
        help='Output case directory'
    )

    parser.add_argument(
        '--no-roads',
        action='store_false', dest='roads',
        help='Do not generate road STLs or adapt ground stl to roads'
    )
        
    parser.add_argument(
        '--load-roads',
        action='store_true', dest='load_roads',
        help='Load roads from edb and generate road polygons'
    )

    parser.add_argument(
        '--init-roads',
        action='store_true', dest='init_roads',
        help='Create empty roads table in case db' +
        ' before loading roads from edb'
    )

    parser.add_argument(
        '--init-domain',
        action='store_true', dest='init_domain',
        help='Create empty domain table in case db'
    )

    parser.add_argument(
        '--init-structures',
        action='store_true', dest='init_structures',
        help='Create empty structures table in case db'
    )

    parser.add_argument(
        '--init', dest='initdb',
        action='store_true',
        help='Create new or overwrite existing case db'
    )

    parser.add_argument(
        '--epsg', dest='epsg', type=int, default=3006,
        action='store',
        help='EPSG to use when creating new case db'
    )

    parser.add_argument(
        '--emis-ts', dest='emis_ts',
        action='store_true',
        help='Write emission timeseries to case'
    )

    parser.add_argument(
        '--begin', dest='begin', metavar='YYMMDDHH',
        action='store', type=arg2datetime,
        help='First hour of emission timeseries'
    )

    parser.add_argument(
        '--end', dest='end', metavar='YYMMDDHH',
        action='store', type=arg2datetime,
        help='First hour of emission timeseries'
    )

    parser.add_argument(
        '--translate', action='store', nargs=3, type=float,
        dest='translate',
        help='Translate STL (default is origo in centroid of domain)'
    )

    # parser.add_argument(
    #     '-s', '--sources',
    #     action='store_true', dest='sources',
    #     help='Generate emissions for sources'
    # )

    args = parser.parse_args()

    if args.info:
        con, cur = connect(":memory:")
        # testing library versions
        row = cur.execute(
            'SELECT sqlite_version(), spatialite_version()'
        ).next()
        msg = "> SQLite v%s Spatialite v%s" % (row[0], row[1])
        log.info(msg)
        sys.exit(0)

    if args.db is None:
        log.info('Connecting to in-memory db')
    else:
        log.info('Connecting to %s' % args.db)
    try:
        con, cur = connect(args.db or ':memory')
    except sqlite3.OperationalError as e:
        log.error(str(e))
        sys.exit(1)

    if args.initdb or not path.exists(args.db):
        log.info('Initializing case database %s...' % args.db)
        initdb(con, cur, args.epsg)
        sys.exit(0)

    epsg = get_epsg(con)

    if args.init_roads:
        drop_tables(con, cur, table_names=['roads'])
        create_roads_table(con, cur, epsg)

    if args.init_domain:
        drop_tables(con, cur, table_names=['domain'])
        create_domain_table(con, cur, epsg)

    if args.init_structures:
        drop_tables(con, cur, table_names=['structures'])
        create_structures_table(con, cur, epsg)

    if args.edb is not None:
        edb_con, edb_cur = connect(args.edb)
        edb_epsg = get_epsg(edb_con)

        if args.load_roads:
            load_roads_from_edb(con, cur, edb_con, edb_epsg)
            log.info('Updated roads from edb')
            sys.exit(0)
    
    case = Case(buffer=args.buffer)

    if args.case is not None:
        case.create_case(args.case)

    log.info('Reading geometry')
    case.read(con)

    if args.terrain is not None:
        log.info('Reading terrain')
        case.read_terrain(args.terrain)

    if args.edb is not None:
        roadids = []
        if args.emis_ts:
            log.info('Validating roads')
            validation_errors = validate_roads_in_db(con)
            if len(validation_errors) > 0:
                log.error(
                    '\nRoadid Validation error\n' +
                    '\n'.join(
                        ('%-6i %-s' % (err['id'], err['reason'])
                         for err in validation_errors)
                    )
                )
                sys.exit(1)
            else:
                log.info('Roads have valid geometries')

            if args.begin is None or args.end is None:
                log.error(
                    'Must specify time interval of emission time-series'
                )
                sys.exit(1)

            log.info(
                'Calculates fraction of road sources within road polygons'
            )
            len_frac_in_poly = get_road_fractions_in_polys(
                cur, edb_cur
            )

            for roadid, len_frac in len_frac_in_poly.iteritems():
                if len_frac == 0:
                    log.warning(
                        'Road source %i does not intersect the' % roadid +
                        ' corresponding road polygon %i'
                        )
                else:
                    roadids.append(roadid)
            
            log.info('Calculates road vehicle emission timeseries')
            emis_ts = calculate_road_vehicle_ts(
                edb_con, edb_cur, args.begin, args.end,
                args.substances, roadids=roadids
            )

            log.info(
                'Scale emission timeseries by fraction within road polygons'
            )
            correct_by_fraction_in_poly(emis_ts, len_frac_in_poly)

            log.info('Group road emissions')
            emission_group_ts, emis_group_fractions = \
                calculate_emission_group_fractions(
                    emis_ts
                )
            # convert to mg/s
            emission_group_ts *= 1000.0
            log.info('Writing constant/emissionTimeSeries_mg_per_s.csv')
            emission_group_ts.to_csv(
                path_or_buf=path.join(
                    args.case, 'constant', 'emissionTimeSeries_mg_per_s.csv'
                ),
                sep=b'\t',
                index_label='Time',
                date_format='%y%m%d%H',
                header=['LDV', 'HDV']
            )

    if args.case is not None:
        if args.emis_ts:
            log.info('Writing constant/trafficDict')
            case.write_traffic_dict(
                edb_con,
                emis_group_fractions,
                path.join(args.case, 'constant', 'trafficDict'),
                translate=args.translate
            )

        case.write_landuse_dict(
            path.join(args.case, 'constant', 'landuseDict')
        )

        log.info('Creating stl-files')
        if args.terrain is None:
            log.error('Must specify terrain file to generate STL')
            sys.exit(1)
        case.to_stl(
            path.join(args.case, 'constant', 'triSurface'),
            translate=args.translate or case.distance_to_origo()
        )