コード例 #1
0
 def add_report(self, parent_node, rs_lyr, replace=False):
     log = Logger('add_html_report')
     file_path = os.path.join(os.path.dirname(self.xml_path),
                              rs_lyr.rel_path)
     self.add_dataset(parent_node, file_path, rs_lyr, 'HTMLFile', replace)
     log.info('Report node created: {}'.format(file_path))
     return file_path
コード例 #2
0
def download_file(file_dict, folder):

    log = Logger('Download')

    if not file_dict['name']:
        log.warning('Missing file name in folder {}'.format(folder))
        return

    if not file_dict['downloadUrl'] or file_dict['downloadUrl'].lower() == '?download':
        log.warning('Missing download URL in folder {}'.format(folder))
        return

    file_path = os.path.join(folder, file_dict['name'])

    if not os.path.isdir(folder):
        os.makedirs(folder)

    # Write file info as JSON
    with open( os.path.splitext(file_path)[0] + '.json', 'w') as json_file:
        json.dump(file_dict, json_file)

    # Skip files that exist unless they are zero bytes in which case remove them
    if os.path.isfile(file_path):
        if os.stat(file_path).st_size == 0:
            log.warning('Removing zero byte file {}'.format(file_path))
            os.remove(file_path)
        else:
            return

    # Download missing file
    with open(file_path, 'w+b') as f:
        response = APIGet(file_dict['downloadUrl'], absolute=True)
        f.write(response.content)

    log.info('Downloaded missing file {}'.format(file_path))
コード例 #3
0
def safe_makedirs(dir_create_path):
    """safely, recursively make a directory

    Arguments:
        dir_create_path {[type]} -- [description]
    """
    log = Logger("MakeDir")

    # Safety check on path lengths
    if len(dir_create_path) < 5 or len(dir_create_path.split('/')) <= 2:
        raise Exception('Invalid path: {}'.format(dir_create_path))

    if os.path.exists(dir_create_path) and os.path.isfile(dir_create_path):
        raise Exception(
            'Can\'t create directory if there is a file of the same name: {}'.
            format(dir_create_path))

    if not os.path.exists(dir_create_path):
        try:
            log.info('Folder not found. Creating: {}'.format(dir_create_path))
            os.makedirs(dir_create_path)
        except Exception as e:
            # Possible that something else made the folder while we were trying
            if not os.path.exists(dir_create_path):
                log.error(
                    'Could not create folder: {}'.format(dir_create_path))
                raise e
コード例 #4
0
def visitTopoAuxMetrics(visitID, metricXMLPath):

    log = Logger('Metrics')
    log.info("Topo aux metrics for visit {0}".format(visitID))

    # Make all the API calls and return a dictionary of API call name keyed to data
    apiData = downloadAPIData(visitID)

    # Dictionary to hold the metric values
    visitMetrics = {}

    metric_uc = UndercutMetrics(apiData)
    integrateMetricDictionaryWithTopLevelType(visitMetrics, 'Undercut', metric_uc.metrics )

    metrics_su = SubstrateMetrics(apiData)
    integrateMetricDictionaryWithTopLevelType(visitMetrics, 'Substrate', metrics_su.metrics)

    metrics_si = SidechannelMetrics(apiData)
    integrateMetricDictionaryWithTopLevelType(visitMetrics, 'SideChannel', metrics_si.metrics)

    metrics_fi = FishcoverMetrics(apiData)
    integrateMetricDictionaryWithTopLevelType(visitMetrics, 'FishCover', metrics_fi.metrics)

    metrics_wo = LargeWoodMetrics(apiData)
    integrateMetricDictionaryWithTopLevelType(visitMetrics, 'LargeWood', metrics_wo.metrics)

    # Metric calculation complete. Write the topometrics to the XML file
    writeMetricsToXML(visitMetrics, visitID, '', metricXMLPath, 'TopoAuxMetrics', __version__)

    log.info("Metric calculation complete for visit {0}".format(visitID))
    return visitMetrics
コード例 #5
0
def validate(topoPath, xmlfile, visitID):
    """
    Validate champ topo data in flat folder structure
    :param topoPath: Full Path to topo data (i.e. GISLayers)
    :return: 0 for success
             1 for all code failures or unhandled problems
             2 for data issues
    """

    returnValue = 0
    log = Logger("Validation")

    survey = CHaMPSurvey()
    survey.load_topo_project(topoPath, visitID)
    validationResults = survey.validate()

    stats = {
        "errors": 0,
        "warnings": 0,
        "nottested": 0,
        "status": Status.PASS,
        "layers": {
        }
    }
    for datasetName, datasetResults in validationResults.iteritems():
        layerstatus = Status.PASS
        for result in datasetResults:
            log.info("[{0:{4}<{5}}] [{1}] [{2}] {3}".format(result["Status"], datasetName, result["TestName"], result["Message"], " ", 10))
            if result["Status"] == "Error" :#or result["Status"] == "NotTested":
                stats["errors"] += 1
                stats["status"] = Status.FAIL
                layerstatus = Status.FAIL
                returnValue = 2
            elif result["Status"] == "NotTested":
                stats["warnings"] += 1
            elif result["Status"] == "Warning":
                stats["nottested"] += 1

        stats['layers'][datasetName] = layerstatus

    if len(validationResults) == 0:
        log.error("No layers found to validate")
        stats["errors"] += 1
        stats["status"] = Status.FAIL
        returnValue = 2

    # The last message is what gets picked up so let's be clever:
    if returnValue == 2:
        log.error("Validation Failed")
    else:
        log.error("Validation Passed")

    writeMetricsToXML(validationResults, stats, xmlfile)

    return returnValue
コード例 #6
0
def getAllVisits(siteID):
    log = Logger('Visits')
    log.info("Getting all visits for site: {}".format(siteID))

    mangledSiteID = re.sub('[\s_-]', '', siteID)

    siteData = APIGet('sites/{}'.format(mangledSiteID))

    if 'visits' not in siteData or len(siteData['visits']) == 0:
        raise MissingException("No visits found for site `{}`.".format(siteID))

    return [
        visit for visit in siteData['visits']
        if visit['sampleDate'] is not None
    ]
コード例 #7
0
def exportAsCSV(feats, outCSVfile):
    log = Logger("CSVExport")
    log.info("Beginning CSV Export")
    with open(outCSVfile, "wb") as csvfile:
        csvWriter = csv.writer(csvfile)
        #fieldsGIS = ("POINT_NUMBER", "SHAPE@Y", "SHAPE@X", "SHAPE@Z", "DESCRIPTION")
        csvWriter.writerow(("PNTNO", "Y", "X", "ELEV", "DESC"))
        for feat in feats:
            # Do some checking on mandatory fields first
            pnfield = getfield(feat, ["POINT_NUMB", "Point_Numb", "numb", "Number", "Point", "points", "p", "Point_Id", "PointId", "POINT_ID", "POINTID", "PointNumbe", "Point_id", "Name", "FID", "OBJECTID"])
            cfield = getfield(feat, ["Code","CODE"])
            row = (feat['fields'][pnfield], feat['geometry'].x, feat['geometry'].y, feat['geometry'].z, feat['fields'][cfield] )
            csvWriter.writerow(row)
    log.info("CSV Export complete")
    return outCSVfile
コード例 #8
0
def champ_topo_checker(workbench, folder):
    log = Logger('CHaMP Files')
    log.setup(logPath=os.path.join(folder, datetime.now().strftime("%Y%m%d-%H%M%S") + '_champ_files.log'))

    dbCon = sqlite3.connect(workbench)
    dbCurs = dbCon.cursor()
    dbCurs.execute('SELECT WatershedName, VisitYear, SiteName, VisitID' +
                   ' FROM vwVisits WHERE ProgramID = 1 AND  ProtocolID IN (2030, 416, 806, 1966, 2020, 1955, 1880, 10036, 9999)' +
                   ' ORDER BY VisitYear, WatershedName')

    for row in dbCurs.fetchall():
        watershed = row[0]
        visit_year = row[1]
        site = row[2]
        visitID = row[3]

        visit_path = os.path.join(folder, str(visit_year), watershed.replace(' ', ''), site.replace(' ', ''), 'VISIT_{}'.format(visitID))
        log.info('Processing {}'.format(visit_path))

        if not os.path.isdir(visit_path):
            os.makedirs(visit_path)

        try:
            visit_data = APIGet('visits/{}'.format(visitID))

            # Write visit information to json file
            with open(os.path.join(visit_path, 'visit_info.json'), 'w') as json_file:
                json.dump(visit_data, json_file)

            # Loop over the two lists of folders per visit: field folders and visit folders
            for api_key, local_folder in {'fieldFolders': 'Field Folders', 'folders': 'Visit Folders'}.items():

                if api_key in visit_data and isinstance(visit_data[api_key], list):
                    for folder_name in visit_data[api_key]:
                        field_folder_path = os.path.join(visit_path, local_folder, folder_name['name'])
                        field_folder_data = APIGet(folder_name['url'], True)

                        if isinstance(field_folder_data, dict) and 'files' in field_folder_data:
                            [download_file(file_dict, field_folder_path) for file_dict in field_folder_data['files']]

            # Get all the miscellaneous files for the visit
            [download_file(file_dict, os.path.join(visit_path, 'Files')) for file_dict in visit_data['files']]

        except Exception as e:
            log.error('Error for visit {}: {}'.format(visitID, e))

    log.info('Process Complete')
コード例 #9
0
def champ_topo_checker(workbench, folder):

    log = Logger('CHaMP Files')
    log.setup(logPath=os.path.join(
        folder,
        datetime.now().strftime("%Y%m%d-%H%M%S") + '_champ_folder_check.log'))

    # # Loop over site names organized by field season and watershed
    # dbCon = sqlite3.connect(workbench)
    # dbCurs = dbCon.cursor()
    # dbCurs.execute('SELECT WatershedName, VisitYear, SiteName' +
    #     ' FROM vwVisits WHERE ProgramID = 1 AND ProtocolID IN (2030, 416, 806, 1966, 2020, 1955, 1880, 10036, 9999)' +
    #     ' GROUP BY WatershedName, VisitYear, SiteName' +
    #     ' ORDER BY VisitYear, WatershedName, SiteName')
    #
    # for row in dbCurs.fetchall():
    #
    #     watershed = row[0]
    #     visit_year = row[1]
    #     site = row[2]
    #     # visitID = row[3]
    #
    #     visit_path1 = os.path.join(folder, str(visit_year), watershed.replace(' ', ''), site)
    #     visit_path2 = visit_path1.replace(' ', '')
    #     if ' ' in site and os.path.isdir(visit_path1) and os.path.isdir(visit_path2):
    #         try:
    #             process_duplicate_folder(visit_path1, visit_path2)
    #         except Exception as e:
    #             log.error('Error processing {}'.format(visit_path1))

    # Create a List
    listOfEmptyDirs = list()
    # Iterate over the directory tree and check if directory is empty.
    for (dirpath, dirnames, filenames) in os.walk(folder):
        if len(dirnames) == 0 and len(filenames) == 0:
            listOfEmptyDirs.append(dirpath)

    print(len(listOfEmptyDirs), 'empty folders')
    for empty in listOfEmptyDirs:
        os.rmdir(empty)

    log.info('Process Complete')
コード例 #10
0
def calculate(apiData):
    """
    Calculate riparian structure metrics
    :param apiData: dictionary of API data. Key is API call name. Value is API data
    :return: metrics dictionary
    """

    raise Exception(
        'TODO: Code abandoned after it was determined that this was not needed.'
    )

    log = Logger('riparianCoverMetrics')
    log.info("Running RiparianCoverMetrics")

    # Retrieve the riparian structure API data
    riparianVals = [
        val['value'] for val in apiData['RiparianStructure']['values']
    ]

    # calculate metrics
    return _calc(riparianVals)
コード例 #11
0
def move_measurements(old_folder, new_folder):

    log = Logger('Move Measurements')
    log.setup(logPath=os.path.join(new_folder, datetime.now().strftime("%Y%m%d-%H%M%S") + 'move_measurements.log'))

    # Create a List
    measurements = list()
    # Iterate over the directory tree and check if directory is empty.
    for (dirpath, dirnames, filenames) in os.walk(old_folder):
        for file in filenames:
            measurements.append(os.path.join(dirpath, file))

    log.info('{} measurement files to move'.format(len(measurements)))

    for meas in measurements:
        new_path = os.path.join(os.path.dirname(meas.replace(old_folder, new_folder)), 'AuxMeasurements', os.path.basename(meas))

        if not os.path.isdir(os.path.dirname(new_path)):
            os.makedirs(os.path.dirname(new_path))

        os.rename(meas, new_path)
        log.info('Moving {} to {}'.format(meas, new_path))

    # Create a List
    listOfEmptyDirs = list()
    # Iterate over the directory tree and check if directory is empty.
    for (dirpath, dirnames, filenames) in os.walk(old_folder):
        if len(dirnames) == 0 and len(filenames) == 0:
            listOfEmptyDirs.append(dirpath)

    print(len(listOfEmptyDirs), 'empty folders')
    for empty in listOfEmptyDirs:
        os.rmdir(empty)

    log.info('Process Complete')
コード例 #12
0
def downloadExtractParseVisits(visits, outputFolder):
    log = Logger('Downloading')
    log.info("Downloading all visits from the API")

    projects = []
    for visit in visits:

        try:
            extractpath = os.path.join(outputFolder, 'VISIT_{}'.format(visit))
            projpath = os.path.join(extractpath, 'project.rs.xml')
            downloadUnzipTopo(visit, extractpath)

            proj = TopoProject(extractpath)

            if proj.isrsproject:
                projects.append({"project": proj, "visit": visit})
            else:
                log.error("File not found: {}".format(projpath))
                raise DataException("Missing Project File")

        # Just move on if something fails
        except Exception, e:
            pass
コード例 #13
0
def process_duplicate_folder(with_spaces, no_spaces):

    log = Logger('Duplicate')

    movers = []
    for root, dirs, files in os.walk(with_spaces):
        for name in files:
            old_path = os.path.join(root, name)
            new_path = old_path.replace(with_spaces, no_spaces)

            # Simply delete the file if it is zero bytes
            if os.stat(old_path).st_size == 0:
                log.info('Deleting zero byte file {}'.format(old_path))
                os.remove(old_path)
                continue

            if not os.path.isdir(os.path.dirname(new_path)):
                os.makedirs(os.path.dirname(new_path))

            if os.path.isfile(new_path):
                os.remove(old_path)
            else:
                print('Moving file {}'.format(old_path))
                os.rename(old_path, new_path)
コード例 #14
0
def myMainMethod(topoDataFolder, xmlfile, visitID):
    """
    :param jsonFilePath:
    :param outputFolder:
    :param bVerbose:
    :return:
    """
    log = Logger("myMainMethod")

    # dothingA()
    log.info("I did thing A")
    # dothingB()
    log.info("I did thing B")
    # Write XML()
    log.info("I wrote my XML file")
    # writelogs()
    log.info("I wrote my log files")
コード例 #15
0
ファイル: create_project.py プロジェクト: wally-mac/RIM
def make_project(project_path, srs_template, image_path, site_name, huc, BRAT_path, VBET_path, DEM_path, hs_path):
    """
    Creates project folders
    :param project_path: where we want project to be located
    """

    # set workspace to desired project location
    arcpy.env.overwriteOutput = True
    arcpy.env.workspace = project_path

    if not os.path.exists(project_path):
        os.mkdir(project_path)

    # build project folder structure in project path
    # inputs folders

    log = Logger('create_project')
    log.info('creating project folders...')

    inputs_folder = make_folder(project_path, "01_Inputs")

    image_folder = make_folder(inputs_folder, "01_Imagery")
    AP01_folder = make_folder(image_folder, "AP_01")

    topo_folder = make_folder(inputs_folder, "02_Topo")
    DEM01_folder = make_folder(topo_folder, "DEM_01")

    context_folder = make_folder(inputs_folder, "03_Context")
    BRAT01_folder = make_folder(context_folder, "BRAT_01")
    VBET01_folder = make_folder(context_folder, "VBET_01")
    make_folder(context_folder, 'WBD')

    log.info('copying input files into new project folder...')

    def add_image(image_path, AP_folder):
        # put input imagery in folder
        arcpy.CopyRaster_management(image_path, os.path.join(AP_folder, 'orthomosaic.png'))
    add_image(image_path, AP01_folder)

    # copy DEM, hillshade to project folder
    arcpy.CopyRaster_management(DEM_path, os.path.join(DEM01_folder, 'DEM.tif'))
    arcpy.CopyRaster_management(hs_path, os.path.join(DEM01_folder, 'hlsd.tif'))

    # copy BRAT, VBET to project folder
    arcpy.CopyFeatures_management(BRAT_path, os.path.join(BRAT01_folder, 'BRAT.shp'))
    arcpy.CopyFeatures_management(VBET_path, os.path.join(VBET01_folder, 'VBET.shp'))

    # mapping folder
    # subsequent DCE and RS folders are created when a new DCE is made using new dce script
    mapping_folder = make_folder(project_path, "02_Mapping")
    DCE01_folder = make_folder(mapping_folder, "DCE_01")

    log = Logger('create_project')
    log.info('creating blank RS and DCE shapefiles...')

    # make empty shapefiles for first DCE
    # Use Describe to get a SpatialReference object
    spatial_reference = arcpy.Describe(srs_template).spatialReference
    # inundation
    if not os.path.exists(os.path.join(DCE01_folder, "inundation.shp")):
        arcpy.CreateFeatureclass_management(DCE01_folder, "inundation.shp", "POLYGON", "", "DISABLED", "DISABLED", spatial_reference)
    # add field for inundation type
        arcpy.AddField_management(os.path.join(DCE01_folder, 'inundation.shp'), 'type', "TEXT")
    # dam crests
    if not os.path.exists(os.path.join(DCE01_folder, "dam_crests.shp")):
        arcpy.CreateFeatureclass_management(DCE01_folder, "dam_crests.shp", "POLYLINE", "", "DISABLED", "DISABLED", spatial_reference)
    # add fields for dam state and crest type
        arcpy.AddField_management(os.path.join(DCE01_folder, 'dam_crests.shp'), 'dam_state', "TEXT")
        arcpy.AddField_management(os.path.join(DCE01_folder, 'dam_crests.shp'), 'crest_type', "TEXT")
        arcpy.AddField_management(os.path.join(DCE01_folder, 'dam_crests.shp'), 'dam_id', "DOUBLE")
    # thalwegs
    if not os.path.exists(os.path.join(DCE01_folder, "thalwegs.shp")):
        arcpy.CreateFeatureclass_management(DCE01_folder, "thalwegs.shp", "POLYLINE", "", "DISABLED", "DISABLED", spatial_reference)
    # add fields for thalweg type
        arcpy.AddField_management(os.path.join(DCE01_folder, 'thalwegs.shp'), 'type', "TEXT")
    # make first RS folder
    RS01_folder = make_folder(mapping_folder, "RS_01")
    # create empty shapefiles for valley bottom and valley bottom centerline
    # valley bottom
    if not os.path.exists(os.path.join(RS01_folder, "valley_bottom.shp")):
        arcpy.CreateFeatureclass_management(RS01_folder, "valley_bottom.shp", "POLYGON", "", "DISABLED", "DISABLED", spatial_reference)
        arcpy.AddField_management(os.path.join(RS01_folder, 'valley_bottom.shp'), 'site_name', "TEXT")
        arcpy.AddField_management(os.path.join(RS01_folder, 'valley_bottom.shp'), 'huc', "DOUBLE")
        with arcpy.da.UpdateCursor(os.path.join(RS01_folder, 'valley_bottom.shp'), ['site_name', 'huc']) as cursor:
            for row in cursor:
                row[0] = site_name
                row[1] = huc
                cursor.updateRow(row)
    # valley bottom centerline
    if not os.path.exists(os.path.join(RS01_folder, "vb_centerline.shp")):
        arcpy.CreateFeatureclass_management(RS01_folder, "vb_centerline.shp", "POLYLINE", "", "DISABLED", "DISABLED", spatial_reference)

    # analysis folder
    analysis_folder = make_folder(project_path, "03_Analysis")
    DCEout = make_folder(analysis_folder, "DCE_01")
    make_folder(DCEout, "Shapefiles")
    make_folder(analysis_folder, "CDs")
    make_folder(analysis_folder, "Summary")
コード例 #16
0
def generate_substrate_raster(topo_project_folder,
                              out_path,
                              di_values,
                              dict_ocular_values,
                              out_channel_value=4000.0):
    """Generate Substrate Raster from Channel units and ocular substrate estimates for each di value provided

    :param str topo_project_folder: folder source of the topo project
    :param str out_path: path for outputs
    :param list di_values: list of int percentile values for roughness calculation
    :param dict dict_ocular_values: dictionary of ocular estimates of grain size values
    :param float out_channel_value: roughness value to use for out of channel areas, default = 4000
    :return: 0 for success
    """

    # Load Topo Project
    log = Logger("SubstrateRaster")
    log.info("topo_project_folder: {}".format(str(topo_project_folder)))
    log.info("outputPath: {}".format(str(out_path)))
    log.info("D Values: {}".format(str(di_values)))
    project = topoproject.TopoProject(topo_project_folder)
    topo_rs_project = riverscapes.Project(
        os.path.join(topo_project_folder, "project.rs.xml"))
    log.info("Topo project loaded")

    # Initialize Riverscapes Project
    rsproject = riverscapes.Project()
    rsproject.create("Substrate", "Substrate", __version__)
    for tagname, tags in {
            "Site": ["Site", "SiteName"],
            "Visit": ["Visit", "VisitID"],
            "Year": ["Year", "FieldSeason"],
            "Watershed": ["Watershed", "Watershed"]
    }.iteritems():
        if tags[0] in topo_rs_project.ProjectMetadata or tags[
                1] in topo_rs_project.ProjectMetadata:
            rsproject.addProjectMetadata(
                tagname, topo_rs_project.ProjectMetadata[tags[0]]
                if tags[0] in topo_rs_project.ProjectMetadata else
                topo_rs_project.ProjectMetadata[tags[1]])
        else:
            raise DataException("Missing project metadata")

    # 1. Do some math on the dictionary of substrate values for each di
    dict_di_roughness_values = {}
    list_keep_units = []
    for di in di_values:
        dict_units = dict_ocular_by_unit(dict_ocular_values)
        dict_roughness_values = {}
        for unitid, dict_unit in dict_units.iteritems():
            if all(dict_unit[key] is not None for key in [
                    "Bedrock", "Boulders", "Cobbles", "CourseGravel",
                    "FineGravel", "Fines", "Sand"
            ]):
                dict_roughness_values[int(unitid)] = calculate_grain_size(
                    dict_unit, di)
                if unitid not in list_keep_units:
                    list_keep_units.append(unitid)
            else:
                log.warning(
                    "Missing Channel Unit Substrate Values for Unit {}.".
                    format(str(unitid)))

        dict_roughness_values[0] = float(
            out_channel_value)  # Out of Channel "UnitNumber" == 0
        dict_di_roughness_values[di] = pandas.DataFrame(
            list(dict_roughness_values.iteritems()),
            index=dict_roughness_values.keys(),
            columns=["UnitNumber", "Roughness"])
        log.info("Calculated Roughness Values for D{}".format(str(di)))

    # 2. Spread the channel Unit areas
    gdf_expanded_channel_units = expand_polygons(
        project.getpath("ChannelUnits"),
        project.getpath("BankfullExtent"),
        keep_units=list_keep_units)
    log.info("Channel Units expanded to Bankfull Area")

    # 3. Add DEM area
    gdf_demextent = geopandas.GeoDataFrame.from_features(
        geopandas.GeoSeries(get_data_polygon(project.getpath("DEM"))))
    if not all(gdf_demextent.geometry.is_valid):
        gdf_demextent.geometry = gdf_demextent.geometry.buffer(0)
        log.info("Fix invalid geoms for DEM Extent")
    gdf_demextent["UnitNumber"] = 0
    gdf_in_channel_union = geopandas.GeoDataFrame.from_features(
        geopandas.GeoSeries(gdf_expanded_channel_units.unary_union.buffer(0)))
    gdf_out_of_channel = geopandas.overlay(gdf_demextent, gdf_in_channel_union,
                                           "difference")
    gdf_full_polygons = gdf_expanded_channel_units.append(gdf_out_of_channel)
    log.info("Out of Channel Area generated")

    for di, df_roughness_values in dict_di_roughness_values.iteritems():
        # 4 Add dict to channel units
        gdf_full_polygons_merged = gdf_full_polygons.merge(df_roughness_values,
                                                           on="UnitNumber")
        gdf_final_polys = gdf_full_polygons_merged.rename(
            columns={"Roughness_y": "Roughness"})
        gdf_final_polys.drop([
            col for col in gdf_final_polys.columns
            if col not in ["UnitNumber", "Roughness", 'geometry']
        ],
                             axis=1,
                             inplace=True)
        log.info("Roughness Values added to Channel Units for D{}".format(
            str(di)))

        # 5. Rasterize Polygons
        raster_substrate = path.join(out_path,
                                     "substrate_D{}.tif".format(str(di)))
        shp_substrate = path.join(out_path,
                                  "substrate_D{}.shp".format(str(di)))
        gdf_final_polys.to_file(shp_substrate)
        log.info("Saved Substrate Shapefile: {}".format(shp_substrate))
        rasterize_polygons(shp_substrate, project.getpath("DEM"),
                           raster_substrate, "Roughness")
        log.info("Created Substrate Raster: {}".format(raster_substrate))

        # Add Realization to Riverscapes
        realization = riverscapes.Realization("Substrate")
        realization.name = "Substrate_D{}".format(str(di))
        realization.productVersion = __version__
        ds_shapefile = riverscapes.Dataset().create(
            "Substrate_Shapefile", "substrate_D{}.shp".format(str(di)))
        ds_raster = riverscapes.Dataset().create(
            "Substrate_Raster", "substrate_D{}.tif".format(str(di)))
        ds_shapefile.metadata["D_Value"] = str(di)
        ds_raster.metadata["D_Value"] = str(di)
        ds_shapefile.id = "substrate_shapefile_d{}".format(str(di))
        ds_raster.id = "substrate_shapefile_d{}".format(str(di))
        realization.outputs[ds_shapefile.name] = ds_shapefile
        realization.outputs[ds_raster.name] = ds_raster
        rsproject.addRealization(realization)

    # Write Riverscapes Project.
    rsprojectxml = os.path.join(out_path, "project.rs.xml")
    rsproject.writeProjectXML(rsprojectxml)
    log.info("Riverscapes Project file saved: {}".format(rsprojectxml))

    return 0
コード例 #17
0
def hydro_gis_export(hydro_project_xml, topo_project_xml, outfolder):
    """
    :param jsonFilePath:
    :param outputFolder:
    :param bVerbose:
    :return:
    """
    #gdal.UseExceptions()

    log = Logger("Hydro GIS Export")

    # 1 todo Read project.rs.xml
    rs_hydro = Project(hydro_project_xml)
    rs_topo = TopoProject(topo_project_xml)
    hydro_results_folder = os.path.dirname(hydro_project_xml)

    if not rs_hydro.ProjectMetadata.has_key("Visit"):
        raise MissingException("Cannot Find Visit ID")
    visit_id = rs_hydro.ProjectMetadata['Visit']

    dem = gdal.Open(rs_topo.getpath("DEM"))
    dem_srs = dem.GetProjection()
    dem_x_size = dem.RasterXSize
    dem_y_size = dem.RasterYSize
    dem_band = dem.GetRasterBand(1)
    dem_ndv = dem_band.GetNoDataValue()
    dem_geotransfrom = dem.GetGeoTransform()

    # 3 Get data columns in csv file
    csvfile = os.path.join(hydro_results_folder, "dem_grid_results.csv")
    csvfile_clean = os.path.join(hydro_results_folder, "dem_grid_results_clean_header.csv")
    if not os.path.isfile(csvfile):
        raise MissingException("Required file {} does not exist.".format(csvfile))
    with open(csvfile, "rb") as f_in, open(csvfile_clean, "wb") as f_out:
        reader = csv.reader(f_in)
    #     writer = csv.writer(f_out)
        cols = [col for col in reader.next() if col not in ["Y", "X"]]#[col.replace(".", "_") for col in reader.next() if col not in ["Y", "X"]]
        log.info("Loaded fields from csv file.")

        # writer.writerow(['X', 'Y'] + cols)
        # for row in reader:
        #     writer.writerow(row)
        # log.info("Saved csv file with sanitized headers.")

    # Write VRT file
    vrt = os.path.join(hydro_results_folder, '{}.vrt'.format("dem_grid_results"))
    with open(vrt, 'wt') as f:
        f.write('<OGRVRTDataSource>\n')
        f.write('\t<OGRVRTLayer name="{}">\n'.format("dem_grid_results"))
        f.write('\t\t<SrcDataSource>{}</SrcDataSource>\n'.format(csvfile))
        f.write('\t\t<SrcLayer>{}</SrcLayer>\n'.format("dem_grid_results"))
        f.write('\t\t<GeometryType>wkbPoint25D</GeometryType>\n')
        f.write('\t\t<LayerSRS>{}</LayerSRS>\n'.format(dem_srs))
        f.write('\t\t<GeometryField encoding="PointFromColumns" x="X" y="Y" />\n')
        for field in cols:
            f.write('\t\t<Field name="{}" type="Real" subtype="Float32" />\n'.format(field))
        f.write('\t</OGRVRTLayer>\n')
        f.write('</OGRVRTDataSource>\n')
        log.info("Generated vrt file {}".format(vrt))

    # Open csv as OGR
    ogr_vrt = ogr.Open(vrt, 1)
    if ogr_vrt is None:
        raise DataException("unable to open {}".format(vrt))
    layer = ogr_vrt.GetLayer()

    # 4 Generate geotiff for each column in the CSV file
    driver = gdal.GetDriverByName("GTiff")
    for col in cols:
        out_tif = os.path.join(outfolder, '{}.tif'.format(col))

        out_raster = driver.Create(out_tif, dem_x_size, dem_y_size, 1, gdalconst.GDT_Float32)
        out_raster.SetGeoTransform(dem_geotransfrom)
        out_raster.SetProjection(dem_srs)
        band = out_raster.GetRasterBand(1)
        band.SetNoDataValue(dem_ndv)
        band.FlushCache()

        gdal.RasterizeLayer(out_raster, [1], layer, options=["ATTRIBUTE={}".format(col)])
        band.GetStatistics(0, 1)
        band.FlushCache()
        out_raster.FlushCache()
        log.info("Generated {} for attribute {}".format(out_tif, col))

        if col == "Depth":
            raw = numpy.array(band.ReadAsArray())
            masked = numpy.ma.masked_array(raw, raw == dem_ndv)
            bool_raster = numpy.array(masked, "bool")
            numpy.greater(masked, 0, bool_raster)

            raster_mem = gdal.GetDriverByName("GTIFF").Create(os.path.join(outfolder, "Temp.tif"), dem_x_size, dem_y_size, 1, gdalconst.GDT_Int16)
            raster_mem.SetGeoTransform(dem_geotransfrom)
            raster_mem.SetProjection(dem_srs)
            band_mem = raster_mem.GetRasterBand(1)
            band_mem.WriteArray(bool_raster, 0, 0)
            band_mem.SetNoDataValue(dem_ndv)
            band_mem.FlushCache()

            temp = ogr.GetDriverByName("ESRI Shapefile").CreateDataSource(os.path.join(outfolder, "TempExtent.shp"))
            temp_layer = temp.CreateLayer("RawExtent", osr.SpatialReference(wkt=dem_srs), ogr.wkbPolygon)
            temp_layer.CreateField(ogr.FieldDefn("Value", ogr.OFTInteger))
            temp_layer.CreateField(ogr.FieldDefn("Area", ogr.OFTReal))

            gdal.Polygonize(band_mem, None, temp_layer, 0)

            del raster_mem
        #
        #     for feature in temp_layer:
        #         feature.SetField("Area", feature.GetGeometryRef().GetArea())
        #         temp_layer.SetFeature(feature)

            # Stage Extent
            # temp_layer.SetAttributeFilter("Value=1")
            # shp_extent = os.path.join(outfolder, "StageExtent.shp")
            # driver_extent = ogr.GetDriverByName("ESRI Shapefile").CreateDataSource(shp_extent)
            # driver_extent.CopyLayer(temp_layer, "StageExtent")
            # driver_extent = None
            # ogr_extent = ogr.Open(shp_extent, 1)
            # layer_extent = ogr_extent.GetLayer("StageExtent")
            # field_extent = ogr.FieldDefn("ExtentType", ogr.OFTString)
            # layer_extent.CreateField(field_extent)
            # area_current = 0.0
            # fid_current = None
            # for feature in layer_extent:
            #     area_feat = feature.GetGeometryRef().GetArea()
            #     if area_feat > area_current:
            #         area_current = area_feat
            #         fid_current = feature.GetFID()
            #
            # edit_feat = layer_extent.GetFeature(fid_current)
            # edit_feat.SetField("ExtentType", "Channel")
            # layer_extent.SetFeature(edit_feat)
            #
            # layer_extent.DeleteField(layer_extent.FindFieldIndex("Value", True))
            # #ogr_extent.Destroy()
            # log.info("Generated Stage Extent Shapefile {}".format(shp_extent))
            #
            # # Stage Islands
            # import time
            # time.sleep(5)
            # temp_layer.ResetReading()
            # temp_layer.SetAttributeFilter("Value=0")
            # shp_islands = os.path.join(outfolder, "StageIslands.shp")
            # driver_islands = ogr.GetDriverByName("ESRI Shapefile").CreateDataSource(shp_islands)
            # driver_islands.CopyLayer(temp_layer, "StageIslands")
            # driver_islands = None
            # ogr_islands = ogr.Open(shp_islands, 1)
            # layer_islands = ogr_islands.GetLayer("StageIslands")
            #
            # field_qual = ogr.FieldDefn("Qualifying", ogr.OFTInteger)
            # field_qual.SetDefault("0")
            # field_valid = ogr.FieldDefn("IsValid", ogr.OFTInteger)
            # field_valid.SetDefault("0")
            # layer_islands.CreateField(field_qual)
            # layer_islands.CreateField(field_valid)
            # layer_islands.SyncToDisk()
            #
            # area_current = 0.0
            # fid_current = None
            # for feature in layer_islands:
            #     if feature is not None:
            #         g = feature.GetGeometryRef()
            #         area_feat = g.GetArea()
            #         # todo identify qualifying islands here?
            #         if area_feat > area_current:
            #             area_current = area_feat
            #             fid_current = feature.GetFID()
            #
            # #feat_del = layer_islands.GetFeature(fid_current)
            # layer_islands.DeleteFeature(fid_current)
            #
            # layer_islands.DeleteField(layer_islands.FindFieldIndex("Value", True))
            # ogr_islands = None
            # ogr_extent = None
            # log.info("Generated Stage Islands Shapefile {}".format(shp_islands))
            temp = None
        del out_raster

    shp_hydroresults = os.path.join(outfolder, "HydroResults.shp")
    ogr.GetDriverByName("ESRI Shapefile").CopyDataSource(ogr_vrt, shp_hydroresults)
    #out_shp = ogr.GetDriverByName("ESRI Shapefile").CreateDataSource()
    # ogr_shp = ogr.Open(shp_hydroresults, 1)
    # lyr = ogr_shp.GetLayer()
    # lyr_defn = lyr.GetLayerDefn()
    # for i in range(lyr_defn.GetFieldCount()):
    #     fielddefn = lyr_defn.GetFieldDefn(i)
    #     fielddefn.SetName(fielddefn.GetName().replace(".","_"))
    #     lyr.AlterFieldDefn(i, fielddefn, ogr.ALTER_NAME_FLAG)
    #
    # new_field = ogr.FieldDefn('V_Bearing', ogr.OFTReal)
    # lyr.CreateField(new_field)
    # # Calculate Velocity Bearing
    # for feat in lyr:
    #     vel_x = feat.GetField("X_Velocity")
    #     vel_y = feat.GetField("Y_Velocity")
    #     dir = 90 - math.degrees(math.atan2(float(vel_y), float(vel_x)))
    #     bearing = 360 + dir if dir < 0 else dir
    #     feat.SetField('V_Bearing', float(bearing))
    #     lyr.SetFeature(feat)

    log.info("Generated Hydro Results Shapefile {}".format(shp_hydroresults))
    ogr_vrt = None
    ogr_shp = None

    return 0
コード例 #18
0
def export_hydro_model(hydro_rs_xml, topo_rs_xml, out_path):

    log = Logger("Hydro GIS Export")

    # 1 todo Read project.rs.xml
    rs_hydro = Project(hydro_rs_xml)
    rs_topo = TopoProject(topo_rs_xml)
    hydro_results_folder = os.path.dirname(hydro_rs_xml)
    csvfile_hydro = os.path.join(
        hydro_results_folder,
        "dem_grid_results.csv")  # todo get this from hydro project xml

    if not rs_hydro.ProjectMetadata.has_key("Visit"):
        raise MissingException("Cannot Find Visit ID")
    visit_id = rs_hydro.ProjectMetadata['Visit']

    df_csv = pandas.read_csv(csvfile_hydro)
    log.info("Read hydro results csv as data frame")

    # Get DEM Props
    with rasterio.open(rs_topo.getpath("DEM")) as rio_dem:
        dem_crs = rio_dem.crs
        dem_bounds = rio_dem.bounds
        dem_nodata = rio_dem.nodata
    out_transform = rasterio.transform.from_origin(dem_bounds.left,
                                                   dem_bounds.top, 0.1, 0.1)

    pad_top = int((dem_bounds.top - max(df_csv['Y'])) / 0.1)
    pad_bottom = int((min(df_csv['Y']) - dem_bounds.bottom) / 0.1)
    pad_right = int((dem_bounds.right - max(df_csv['X'])) / 0.1)
    pad_left = int((min(df_csv['X']) - dem_bounds.left) / 0.1)
    log.info("Read DEM properties")

    # generate shp
    geom = [Point(xy) for xy in zip(df_csv.X, df_csv.Y)]
    df_output = df_csv.drop(
        ["X", "Y", "Depth.Error", "WSE", "BedLevel"],
        axis="columns")  #, inplace=True) # save a bit of space
    gdf_hydro = geopandas.GeoDataFrame(df_output, geometry=geom)
    gdf_hydro.crs = dem_crs
    gdf_hydro.columns = gdf_hydro.columns.str.replace(".", "_")
    gdf_hydro["VelDir"] = numpy.subtract(
        90,
        numpy.degrees(
            numpy.arctan2(gdf_hydro["Y_Velocity"], gdf_hydro["X_Velocity"])))
    gdf_hydro["VelBearing"] = numpy.where(gdf_hydro['VelDir'] < 0,
                                          360 + gdf_hydro["VelDir"],
                                          gdf_hydro["VelDir"])
    gdf_hydro.drop("VelDir", axis="columns", inplace=True)
    #gdf_hydro.to_file(os.path.join(out_path, "HydroResults.shp"))
    del df_output, gdf_hydro
    log.info("Generated HydroResults.shp")

    for col in [
            col for col in df_csv.columns
            if col not in ["X", "Y", "X.Velocity", "Y.Velocity"]
    ]:
        df_pivot = df_csv.pivot(index="Y", columns="X", values=col)
        np_raw = df_pivot.iloc[::-1].as_matrix()

        np_output = numpy.pad(np_raw,
                              ((pad_top, pad_bottom), (pad_left, pad_right)),
                              mode="constant",
                              constant_values=numpy.nan)

        with rasterio.open(os.path.join(out_path, "{}.tif".format(col)),
                           'w',
                           driver='GTiff',
                           height=np_output.shape[0],
                           width=np_output.shape[1],
                           count=1,
                           dtype=np_output.dtype,
                           crs=dem_crs,
                           transform=out_transform,
                           nodata=dem_nodata) as rio_output:
            rio_output.write(np_output, 1)
        log.info("Generated output Raster for {}".format(col))

        if col == "Depth":
            # Generate water extent polygon
            np_extent = numpy.greater(np_output, 0)
            mask = numpy.isfinite(np_output)
            shapes = features.shapes(np_extent.astype('float32'),
                                     mask,
                                     transform=out_transform)
            gdf_extent_raw = geopandas.GeoDataFrame.from_features(
                geopandas.GeoSeries([asShape(s) for s, v in shapes]))
            gdf_extent = geopandas.GeoDataFrame.from_features(
                gdf_extent_raw.geometry.simplify(0.5))
            gdf_extent.crs = dem_crs

            gdf_extent['Area'] = gdf_extent.geometry.area
            gdf_extent['Extent'] = ""
            gdf_extent.set_value(
                gdf_extent.index[gdf_extent['Area'].idxmax()], "Extent",
                "Channel")  # Set largest Polygon as Main Channel
            gdf_extent.to_file(os.path.join(out_path, "StageExtent.shp"))
            log.info("Generated Water Extent Polygons")

            # Generate islands and spatial join existing islands attributes
            gdf_exterior = geopandas.GeoDataFrame.from_features(
                geopandas.GeoSeries([
                    Polygon(shape) for shape in gdf_extent.geometry.exterior
                ]))
            gs_diff = gdf_exterior.geometry.difference(gdf_extent.geometry)
            if not all(g.is_empty for g in gs_diff):
                gdf_islands_raw = geopandas.GeoDataFrame.from_features(
                    geopandas.GeoSeries(
                        [shape for shape in gs_diff if not shape.is_empty]))
                gdf_islands_explode = geopandas.GeoDataFrame.from_features(
                    gdf_islands_raw.geometry.explode())
                gdf_islands_clean = geopandas.GeoDataFrame.from_features(
                    gdf_islands_explode.buffer(0))
                gdf_islands_clean.crs = dem_crs
                if fiona.open(rs_topo.getpath("WettedIslands")).__len__(
                ) > 0:  # Exception when createing gdf if topo islands shapefile is empty feature class
                    gdf_topo_islands = geopandas.GeoDataFrame.from_file(
                        rs_topo.getpath("WettedIslands"))
                    gdf_islands_sj = geopandas.sjoin(gdf_islands_clean,
                                                     gdf_topo_islands,
                                                     how="left",
                                                     op="intersects")
                    gdf_islands_sj.drop(["index_right", "OBJECTID"],
                                        axis="columns",
                                        inplace=True)
                    gdf_islands_sj.crs = dem_crs
                    gdf_islands_sj.to_file(
                        os.path.join(out_path, "StageIslands.shp"))

    #todo: Generate Lyr file and copy
    #todo: Generate readme

    return 0
コード例 #19
0
ファイル: create_DCE.py プロジェクト: wally-mac/RIM
def new_DCE(srs_template, project_path, AP_fold, DCE_fold, image_path):

    #    LayerTypes = {
    # RSLayer(name, id, tag, rel_path)
    # 'AP_new': RSLayer(date_name, AP_fold, 'Raster', os.path.join('01_Inputs/01_Imagery', AP_fold, 'imagery.tif')),
    # 'INUN_new': RSLayer('Inundation', 'DCE_01_inun', 'Vector', os.path.join('03_Analysis', DCE_fold, 'Shapefiles/inundation.shp')),
    # 'DAM_CREST_new': RSLayer('Dam Crests', 'DCE_01_damcrests', 'Vector', os.path.join('03_Analysis', DCE_fold, 'Shapefiles/dam_crests.shp')),
    # 'TWG_new': RSLayer('Thalwegs', 'DCE_01_thalwegs', 'Vector', os.path.join('03_Analysis', DCE_fold, 'Shapefiles/thalwegs.shp'))
    # }

    #log = Logger('edit_xml')
    #log.info('Loading the XML to make edits...')
    # Load up a new RSProject class
    #project = RSProject(cfg, project_path)

    log = Logger('new_DCE')

    # Set local variables
    has_m = "DISABLED"
    has_z = "DISABLED"

    log.info('before getting spatial reference')
    # Use Describe to get a SpatialReference object
    spatial_reference = arcpy.Describe(srs_template).spatialReference

    log.info('checking if project folders exist')
    # check if Inputs, Mapping, and Analysis folders exist, if not create them
    folder_list = ['01_Inputs', '02_Mapping', '03_Analysis']
    for folder in folder_list:
        if not os.path.exists(os.path.join(project_path, folder)):
            os.makedirs(os.path.join(project_path, folder))

    log.info('Inputs, Mapping, Analysis folders exist')

    # set pathway to imagery folder
    image_folder = os.path.join(project_path, '01_Inputs/01_Imagery')

    # create new AP folder
    if not os.path.exists(os.path.join(image_folder, AP_fold)):
        os.makedirs(os.path.join(image_folder, AP_fold))
        AP_path = os.path.join(image_folder, AP_fold)
    else:
        AP_path = os.path.join(image_folder, AP_fold)

    log.info('copying image to project folder...')

    def add_image(image_path, AP_folder):
        # put input imagery in folder
        if not os.path.exists(os.path.join(AP_folder, 'imagery.png')):
            arcpy.CopyRaster_management(image_path,
                                        os.path.join(AP_folder, 'imagery.tif'))
        else:
            print("existing image already exists in this AP folder")

    add_image(image_path, AP_path)

    # set pathway to mapping folder
    map_path = os.path.join(project_path, '02_Mapping')

    #  check if RS folder exists, if not make one
    if not os.path.exists(os.path.join(map_path, 'RS_01')):
        os.makedirs(os.path.join(map_path, 'RS_01'))

    # create new DCE folder
    if not os.path.exists(os.path.join(map_path, DCE_fold)):
        log.info('creating new DCE shapefiles...')
        os.makedirs(os.path.join(map_path, DCE_fold))

        # inundation
        arcpy.CreateFeatureclass_management(os.path.join(map_path, DCE_fold),
                                            "inundation.shp", "POLYGON", "",
                                            has_m, has_z, spatial_reference)
        # add field for inundation type
        arcpy.AddField_management(
            os.path.join(map_path, DCE_fold, 'inundation.shp'), 'type', "TEXT")

        # dam crests
        arcpy.CreateFeatureclass_management(os.path.join(map_path, DCE_fold),
                                            "dam_crests.shp", "POLYLINE", "",
                                            has_m, has_z, spatial_reference)
        # add fields for dam state and crest type
        arcpy.AddField_management(
            os.path.join(map_path, DCE_fold, 'dam_crests.shp'), 'dam_state',
            "TEXT")
        arcpy.AddField_management(
            os.path.join(map_path, DCE_fold, 'dam_crests.shp'), 'crest_type',
            "TEXT")
        arcpy.AddField_management(
            os.path.join(map_path, DCE_fold, 'dam_crests.shp'), 'dam_id',
            "DOUBLE")

        # thalwegs
        arcpy.CreateFeatureclass_management(os.path.join(map_path, DCE_fold),
                                            "thalwegs.shp", "POLYLINE", "",
                                            has_m, has_z, spatial_reference)
        arcpy.AddField_management(
            os.path.join(map_path, DCE_fold, 'thalwegs.shp'), 'type', "TEXT")

    else:
        print("this DCE already exists")
    log.info('updating xml with new DCE...')

    # create a folder in Analysis for this DCE
    analysis_path = os.path.join(project_path, '03_Analysis')
    if not os.path.exists(os.path.join(analysis_path, DCE_fold)):
        os.makedirs(os.path.join(analysis_path, DCE_fold))
        DCEout = os.path.join(analysis_path, DCE_fold)
        if not os.path.exists(os.path.join(DCEout, 'shapefiles')):
            os.makedirs(os.path.join(DCEout, 'Shapefiles'))
コード例 #20
0
def export_cad_files(project_xml, out_path):
    """exports dxf files containing tin components of topo tin and Topographic Survey Points, Lines and Survey Extent"""

    log = Logger("CADExport")

    # Load Topo project
    log.info("Load Topo project")
    project = topoproject.TopoProject(project_xml)

    # TIN stuff
    log.info("Beginning TIN Work")
    tin = TIN(project.getpath("TopoTin"))
    dict_tinlayers = {}
    dict_tinlayers["tin_points"] = {"layer_type":"POINT", "Features":[feat for feat in tin.nodes.values()]}
    dict_tinlayers["tin_lines"] = {"layer_type":"POLYLINE", "Features":[feat['geometry'] for feat in tin.breaklines.values()]}#, "linetype_field":"LineType"}
    dict_tinlayers["tin_area"] = {"layer_type":"POLYGON", "Features":[feat for feat in tin.hull_polygons.values()]}

    out_tin_dxf = export_as_dxf(dict_tinlayers, os.path.join(out_path, "TopoTin.dxf"))

    # Topo Stuff
    log.info("Beginning Topo Work")
    shpTopo = Shapefile(project.getpath("Topo_Points"))
    shpEOW = Shapefile(project.getpath("EdgeofWater_Points"))
    shpCP = Shapefile(project.getpath("Control_Points"))
    shpBL = Shapefile(project.getpath("Breaklines")) if project.layer_exists("Breaklines") else None
    shpExtent = Shapefile(project.getpath("Survey_Extent"))
    dict_topolayers = {}
    dict_topolayers["Topo_Points"] = {"layer_type":"POINT", "Features":[feat['geometry'] for feat in shpTopo.featuresToShapely()]}
    dict_topolayers["EdgeofWater_Points"] = {"layer_type":"POINT", "Features":[feat['geometry'] for feat in shpEOW.featuresToShapely()]}
    dict_topolayers["Control_Points"] = {"layer_type":"POINT", "Features":[feat['geometry'] for feat in shpCP.featuresToShapely()]}
    dict_topolayers["Breaklines"] = {"layer_type":"POLYLINE", "Features":[feat['geometry'] for feat in shpBL.featuresToShapely()]} if shpBL else None
    dict_topolayers["Survey_Extent"] = {"layer_type":"POLYGON", "Features":[feat['geometry'] for feat in shpExtent.featuresToShapely()]}

    out_topo_dxf = export_as_dxf(dict_topolayers, os.path.join(out_path, "SurveyTopography.dxf"))

    out_topo_csv = exportAsCSV(shpTopo.featuresToShapely() + shpEOW.featuresToShapely(), os.path.join(out_path, "SurveyTopographyPoints.csv"))
    out_control_csv = exportAsCSV(shpCP.featuresToShapely(), os.path.join(out_path, "ControlNetworkPoints.csv"))

    topo_rs_project = riverscapes.Project(project_xml)

    out_project = riverscapes.Project()
    out_project.create("CHaMP_Survey_CAD_Export", "CAD_Export", __version__)
    out_project.addProjectMetadata("Watershed", topo_rs_project.ProjectMetadata["Watershed"])

    #  find previous meta tags
    for tagname, tags in {"Site": ["Site", "SiteName"], "Visit": ["Visit", "VisitID"], "Year": ["Year", "FieldSeason"], "Watershed": ["Watershed", "Watershed"]}.iteritems():
        if tags[0] in topo_rs_project.ProjectMetadata or tags[1] in topo_rs_project.ProjectMetadata:
            out_project.addProjectMetadata(tagname, topo_rs_project.ProjectMetadata[tags[0]] if tags[0] in topo_rs_project.ProjectMetadata else topo_rs_project.ProjectMetadata[tags[1]])
        else:
            raise DataException("Missing project metadata")

    out_realization = riverscapes.Realization("CAD_Export")
    out_realization.name = "CHaMP Survey CAD Export"
    out_realization.productVersion = out_project.projectVersion
    ds = []
    ds.append(out_project.addInputDataset("TopoTin", "tin", None, None, "TIN", project.get_guid("TopoTin")))
    ds.append(out_project.addInputDataset("Topo_Points", "topo_points", None, guid=project.get_guid("Topo_Points")))
    ds.append(out_project.addInputDataset("EdgeofWater_Points", "eow_points", None, guid=project.get_guid("EdgeofWater_Points")))
    ds.append(out_project.addInputDataset("Control_Points", "control_ponts", None, guid=project.get_guid("Control_Points")))
    if shpBL:
        ds.append(out_project.addInputDataset("Breaklines", "breaklines", None, guid=project.get_guid("Breaklines")))
    ds.append(out_project.addInputDataset("Survey_Extent", "survey_extent", None, guid=project.get_guid("Survey_Extent")))
    for inputds in ds:
        out_realization.inputs[inputds.name] = inputds.id

    ds_tin_dxf = riverscapes.Dataset()
    ds_tin_dxf.create("TIN_DXF", "TopoTin.dxf")
    ds_tin_dxf.id = 'tin_dxf'
    ds_topo_dxf = riverscapes.Dataset()
    ds_topo_dxf.create("Topo_DXF", "SurveyTopography.dxf")
    ds_topo_dxf.id = 'topo_dxf'
    ds_topo_csv = riverscapes.Dataset()
    ds_topo_csv.create("Topo_CSV", "SurveyTopographyPoints.csv", "CSV")
    ds_topo_csv.id = 'topo_csv'
    ds_con_csv = riverscapes.Dataset()
    ds_con_csv.create("Control_CSV", "ControlNetworkPoints.csv", "CSV")
    ds_con_csv.id = 'control_csv'
    out_realization.outputs.update({"TIN_DXF": ds_tin_dxf,
                                    "Topo_DXF": ds_topo_dxf,
                                    "Topo_CSV": ds_topo_csv,
                                    "Control_CSV": ds_con_csv})

    out_project.addRealization(out_realization)
    out_project.writeProjectXML(os.path.join(out_path, "project.rs.xml"))

    return 0
コード例 #21
0
def visitTopoMetrics(visitID, metricXMLPath, topoDataFolder, channelunitsfile,
                     workbenchdb, channelUnitDefs):

    log = Logger('Metrics')
    log.info("Topo topometrics for visit {0}".format(visitID))

    # Load the topo data object that specifies the full paths to each data layer
    log.info("Loading topo data from {0}".format(topoDataFolder))

    topo = TopoData(topoDataFolder, visitID)
    topo.loadlayers()

    # Load the channel unit information from the argument XML file
    if channelunitsfile is not None:
        channelUnitInfo = loadChannelUnitsFromJSON(channelunitsfile)
    elif workbenchdb is not None:
        channelUnitInfo = loadChannelUnitsFromSQLite(visitID, workbenchdb)
    else:
        channelUnitInfo = loadChannelUnitsFromAPI(visitID)

    # This is the dictionary for all topometrics to this visit. This will get written to XML when done.
    visitMetrics = {}

    # Loop over all the channels defined in the topo data (wetted and bankfull)
    for channelName, channel in topo.Channels.iteritems():
        log.info("Processing topometrics for {0} channel".format(
            channelName.lower()))

        # Dictionary for the topometrics in this channel (wetted or bankfull)
        dChannelMetrics = {}

        metrics_cl = CenterlineMetrics(channel.Centerline)
        integrateMetricDictionary(dChannelMetrics, 'Centerline',
                                  metrics_cl.metrics)

        metrics_we = WaterExtentMetrics(channelName, channel.Extent,
                                        channel.Centerline, topo.Depth)
        integrateMetricDictionary(dChannelMetrics, 'WaterExtent',
                                  metrics_we.metrics)

        metrics_cs = CrossSectionMetrics(channel.CrossSections,
                                         topo.Channels[channelName].Extent,
                                         topo.DEM, 0.1)
        integrateMetricDictionary(dChannelMetrics, 'CrossSections',
                                  metrics_cs.metrics)

        metrics_i = IslandMetrics(channel.Islands)
        integrateMetricDictionary(dChannelMetrics, 'Islands',
                                  metrics_i.metrics)

        # Put topometrics for this channel into the visit metric dictionary keyed by the channel (wetted or bankfull)
        integrateMetricDictionary(visitMetrics, channelName, dChannelMetrics)

        log.info("{0} channel topometrics complete".format(channelName))

    metrics_thal = ThalwegMetrics(topo.Thalweg, topo.Depth, topo.WaterSurface,
                                  0.1, visitMetrics)
    integrateMetricDictionary(visitMetrics, 'Thalweg', metrics_thal.metrics)

    # Channel units creates four groupings of topometrics that are returned as a Tuple
    cuResults = ChannelUnitMetrics(topo.ChannelUnits, topo.Thalweg, topo.Depth,
                                   visitMetrics, channelUnitInfo,
                                   channelUnitDefs)
    integrateMetricList(visitMetrics, 'ChannelUnits', 'Unit',
                        cuResults.metrics['resultsCU'])
    integrateMetricDictionary(visitMetrics, 'ChannelUnitsTier1',
                              cuResults.metrics['ResultsTier1'])
    integrateMetricDictionary(visitMetrics, 'ChannelUnitsTier2',
                              cuResults.metrics['ResultsTier2'])
    integrateMetricDictionary(visitMetrics, 'ChannelUnitsSummary',
                              cuResults.metrics['ResultsChannelSummary'])

    temp = RasterMetrics(topo.Depth)
    integrateMetricDictionary(visitMetrics, 'WaterDepth', temp)

    temp = RasterMetrics(topo.DEM)
    integrateMetricDictionary(visitMetrics, "DEM", temp)

    temp = RasterMetrics(topo.Detrended)
    integrateMetricDictionary(visitMetrics, "Detrended", temp)

    # special bankfull metrics appending to existing dictionary entry
    visitMetrics['Bankfull']['WaterExtent'].update(
        BankfullMetrics(topo.DEM, topo.Detrended, topo.TopoPoints))

    # Metric calculation complete. Write the topometrics to the XML file
    writeMetricsToXML(visitMetrics, visitID, topoDataFolder, metricXMLPath,
                      "TopoMetrics", __version__)

    log.info("Metric calculation complete for visit {0}".format(visitID))
    return visitMetrics
コード例 #22
0
def export_as_dxf(dict_layers, out_dxf):
    log = Logger("DXFExport")

    header = "  0\nSECTION\n  2\nENTITIES\n"
    h = Handle()

    log.info("Beginning DXF Export")
    with open(out_dxf, "wt") as f:
        f.write(header)
        for name, layer in dict_layers.iteritems():
            log.info("Exporting Layer: {}".format(name))
            if layer:
                if layer["layer_type"] == "POINT":
                    for point in layer["Features"]:
                        f.write("  0\nPOINT\n  5\n{}\n100\nAcDBEntity\n  8\n{}\n".format(h.next(), name)) #  ,
                        f.write("100\nAcDbPoint\n 10\n{}\n 20\n{}\n 30\n{}\n".format(point.x, point.y, point.z))
                elif layer["layer_type"] == "POLYLINE":
                    for line in layer["Features"]: #['geometry'] if layer["Features"].has_key("geometry") else layer["Features"]['geometry']:
                        if line:
                            hline = h.next()
                            f.write("  0\nPOLYLINE\n  5\n{}\n100\nAcDBEntity\n  8\n{}\n".format(hline, name))
                            # if layer.has_key("linetype_field"):
                            #     f.write("  6\n{}\n".format("Linetypefield"))
                            f.write("100\nAcDb3dPolyline\n 10\n0.0\n 20\n0.0\n30\n0.0\n70\n      8\n")
                            for listcoords in [[line.coords] if line.geom_type == "LineString" else [linepart.coords for linepart in line]]:
                                for coords in listcoords:
                                    for vertex in coords:
                                        h.next()
                                        f.write("  0\nVERTEX\n  5\n{}\n330\n{}\n100\nAcDbEntitiy\n  8\n{}\n".format(h.next(),hline, name))
                                        # if layer.has_key("linetype_field"):
                                        #     f.write("  6\n{}\n".format("Linetypefield"))
                                        f.write("100\nAcDbVertex\n100\nAcDb3dPolylineVertex\n")
                                        f.write(" 10\n{}\n 20\n{}\n 30\n{}\n 70\n     32\n".format(vertex[0], vertex[1], vertex[2]))
                                    f.write("  0\nSEQEND\n  5\n{}\n330\n{}\n100\nAcDbEntity\n  8\n{}\n".format(h.next(), hline, name))
                                    # if layer['Features'].has_key("linetype_field"):
                                    #     f.write("  6\n{}\n".format(layer['Features']["linetype_field"]))
                elif layer["layer_type"] == "POLYGON":
                    for poly in layer["Features"]:
                        if poly :
                            hpoly = h.next()
                            f.write("  0\nPOLYLINE\n  5\n{}\n100\nAcDBEntity\n  8\n{}\n".format(hpoly, name))
                            if layer.has_key("linetype_field"):
                                f.write("  6\n{}\n".format("Linetypefield"))
                            f.write("100\nAcDb3dPolyline\n 10\n0.0\n 20\n0.0\n30\n0.0\n70\n      9\n")
                            for listcoords in [[poly.exterior.coords] if poly.geom_type == "Polygon" else [polypart.exterior.coords for polypart in poly]]:
                                for coord in listcoords:
                                    for vertex in coord:
                                        h.next()
                                        f.write("  0\nVERTEX\n  5\n{}\n330\n{}\n100\nAcDbEntitiy\n  8\n{}\n".format(h.next(),hpoly, name))
                                        if layer.has_key("linetype_field"):
                                            f.write("  6\n{}\n".format("Linetypefield"))
                                        f.write("100\nAcDbVertex\n100\nAcDb3dPolylineVertex\n")
                                        f.write(" 10\n{}\n 20\n{}\n 30\n{}\n 70\n     32\n".format(vertex[0], vertex[1], 0.0))
                                    f.write("  0\nSEQEND\n  5\n{}\n330\n{}\n100\nAcDbEntity\n  8\n{}\n".format(h.next(), hpoly, name))
                                    if layer.has_key("linetype_field"):
                                        f.write("  6\n{}\n".format("Linetypefield"))
        f.write("  0\nENDSEC\n")
        f.write("  0\nEOF")

    log.info("DXF Export Complete")
    return out_dxf
コード例 #23
0
def BankfullMetrics(dem, detrended_dem, shp_points):
    """
    :param topoDataFolder:
    :param results_xmlfile:
    :param visitid:
    :return:
    """

    log = Logger("Bankfull Metrics")

    # 1.  find the average elevation of crew bankfull points in the detrended DEM.
    gdf_topo_points = geopandas.GeoDataFrame().from_file(shp_points)

    gdf_bf_points = None
    if 'Code' in gdf_topo_points:
        gdf_bf_points = gdf_topo_points[gdf_topo_points["Code"] == 'bf']
    else:
        gdf_bf_points = gdf_topo_points[gdf_topo_points["code"] == 'bf']

    log.info("Loaded BF points")

    with rasterio.open(detrended_dem) as rio_detrended:
        bf_elevations = [
            v[0] for v in rio_detrended.sample(
                zip([Point(p).x for p in gdf_bf_points.geometry],
                    [Point(p).y for p in gdf_bf_points.geometry]))
            if v[0] != rio_detrended.nodata
        ]  # Filter out points not within detrendedDEM data extent.
        detrended_band = rio_detrended.read(1)

    if len(bf_elevations) == 0:
        log.error("No valid bf elevation points found.")
    else:
        log.info("Sampled {} valid BF point elevations from the DetrendedDEM".
                 format(str(len(bf_elevations))))

    with rasterio.open(dem) as rio_dem:
        dem_band = rio_dem.read(1)

    # enforce orthogonal rasters
    dem_pad_top = int(
        (rio_detrended.bounds.top - rio_dem.bounds.top) /
        0.1) if rio_detrended.bounds.top > rio_dem.bounds.top else 0
    dem_pad_bottom = int(
        (rio_dem.bounds.bottom - rio_detrended.bounds.bottom) /
        0.1) if rio_dem.bounds.bottom > rio_detrended.bounds.bottom else 0
    dem_pad_right = int(
        (rio_detrended.bounds.right - rio_dem.bounds.right) /
        0.1) if rio_detrended.bounds.right > rio_dem.bounds.right else 0
    dem_pad_left = int(
        (rio_dem.bounds.left - rio_detrended.bounds.left) /
        0.1) if rio_dem.bounds.left > rio_detrended.bounds.left else 0

    det_pad_top = int(
        (rio_dem.bounds.top - rio_detrended.bounds.top) /
        0.1) if rio_detrended.bounds.top < rio_dem.bounds.top else 0
    det_pad_bottom = int(
        (rio_detrended.bounds.bottom - rio_dem.bounds.bottom) /
        0.1) if rio_dem.bounds.bottom < rio_detrended.bounds.bottom else 0
    det_pad_right = int(
        (rio_dem.bounds.right - rio_detrended.bounds.right) /
        0.1) if rio_detrended.bounds.right < rio_dem.bounds.right else 0
    det_pad_left = int(
        (rio_detrended.bounds.left - rio_dem.bounds.left) /
        0.1) if rio_dem.bounds.left < rio_detrended.bounds.left else 0

    np_detrended_ortho = np.pad(detrended_band,
                                ((det_pad_top, det_pad_bottom),
                                 (det_pad_left, det_pad_right)),
                                mode="constant",
                                constant_values=np.nan)
    np_dem_ortho = np.pad(dem_band, ((dem_pad_top, dem_pad_bottom),
                                     (dem_pad_left, dem_pad_right)),
                          mode="constant",
                          constant_values=np.nan)

    if all(v == 0 for v in [
            dem_pad_top, dem_pad_bottom, dem_pad_right, dem_pad_left,
            det_pad_top, det_pad_bottom, det_pad_right, det_pad_left
    ]):
        log.info("DEM and DetrendedDEM have concurrent extents")
    else:
        log.warning(
            "Non-Concurrent Rasters encountered. DEM and DetrendedDEM using padded extents"
        )

    ma_detrended = np.ma.MaskedArray(
        np_detrended_ortho, np.equal(np_detrended_ortho, rio_detrended.nodata))
    ma_dem = np.ma.MaskedArray(np_dem_ortho,
                               np.equal(np_dem_ortho, rio_dem.nodata))

    # Generate Trend Grid
    np_trendgrid = np.subtract(ma_dem, ma_detrended)
    log.info("Trend surface created")

    # Average BF elev to constant raster in detrended space
    ave_bf_det_elev = sum(bf_elevations) / float(len(bf_elevations))
    ma_bf_detrended = np.full_like(ma_detrended,
                                   ave_bf_det_elev,
                                   dtype=np.float64)
    log.info("Detrended BF surface created")

    # add trend grid to BF detrended surface
    np_bf_surface = np.add(ma_bf_detrended, np_trendgrid)
    log.info("BF elevation surface created")

    # Generate depth and volume
    np_bf_depth_raw = np.subtract(np_bf_surface, ma_dem)
    np_bf_depth = np.multiply(np.greater(np_bf_depth_raw, 0), np_bf_depth_raw)
    np_bf_volume = np.multiply(np_bf_depth, 0.1 * 0.1)
    log.info("BF Depth surface created")

    ma_bf_depth = np.ma.MaskedArray(np_bf_depth, np.equal(
        np_bf_depth,
        -0.0))  # -0.0 values were getting included in the mean calculation

    # Run ZonalStatisticsAsTable to get the metric values:
    # Sum the bankfull depth raster values and multiply by the area of one cell to produce BFVol.
    # Max the bankfull depth raster values is DepthBF_Max.
    # Average the bankfull depth raster values is DepthBF_Avg
    bf_volume = np.nansum(np_bf_volume)
    bf_depth_max = np.nanmax(ma_bf_depth)
    bf_depth_mean = np.nanmean(ma_bf_depth)
    log.info("BF metrics calculated")

    results = {
        "Volume": bf_volume,
        "Depth": {
            "Max": bf_depth_max,
            "Mean": bf_depth_mean
        }
    }

    return results
コード例 #24
0
def champ_topo_checker(workbench, folder):

    log = Logger('Topo Checker')
    log.setup(logPath=os.path.join(
        folder,
        datetime.now().strftime("%Y%m%d-%H%M%S") + '_topo_checker.log'))

    dbCon = sqlite3.connect(workbench)
    dbCurs = dbCon.cursor()
    dbCurs.execute(
        'SELECT WatershedName, VisitYear, SiteName, VisitID' +
        ' FROM vwVisits WHERE ProgramID = 1 AND ProtocolID IN (2030, 416, 806, 1966, 2020, 1955, 1880, 10036, 9999)'
    )

    file_exists = 0
    file_zero = 0
    file_download = []
    file_errors = []

    for row in dbCurs.fetchall():
        watershed = row[0]
        visit_year = row[1]
        site = row[2]
        visitID = row[3]

        topo_path = os.path.join(folder, str(visit_year),
                                 watershed.replace(' ', ''), site,
                                 'VISIT_{}'.format(visitID), 'Field Folders',
                                 'Topo', 'TopoData.zip')

        download_needed = False
        if os.path.isfile(topo_path):
            file_exists += 1

            if os.stat(topo_path).st_size == 0:
                file_zero += 0
                download_needed = True
        else:
            download_needed = True

        if not download_needed:
            continue

        file_download.append(topo_path)

        try:
            topoFieldFolders = APIGet(
                'visits/{}/fieldFolders/Topo'.format(visitID))
            file = next(file for file in topoFieldFolders['files']
                        if file['componentTypeID'] == 181)
            downloadUrl = file['downloadUrl']
        except Exception, e:
            log.warning('No topo data for visit information {}: {}'.format(
                visitID, topo_path))
            continue

        # Download the file to a temporary location
        if not os.path.isdir(os.path.dirname(topo_path)):
            os.makedirs(os.path.dirname(topo_path))

        with open(topo_path, 'w+b') as f:
            response = APIGet(downloadUrl, absolute=True)
            f.write(response.content)
            log.info(topo_path)

        log.info('Downloaded {}'.format(topo_path))