예제 #1
0
    def createPMPfc():

        arcpy.AddMessage(
            "\nCreating feature class: 'PMP_Points' in Scratch.gdb...")
        dm.MakeFeatureLayer(
            home + "\\Input\Non_Storm_Data.gdb\Vector_Grid",
            "vgLayer")  # make a feature layer of vector grid cells
        dm.SelectLayerByLocation(
            "vgLayer", "INTERSECT", aoiBasin
        )  # select the vector grid cells that intersect the aoiBasin polygon
        dm.MakeFeatureLayer(home + "\\Input\Non_Storm_Data.gdb\Grid_Points",
                            "gpLayer")  # make a feature layer of grid points
        dm.SelectLayerByLocation(
            "gpLayer", "HAVE_THEIR_CENTER_IN", "vgLayer"
        )  # select the grid points within the vector grid selection
        con.FeatureClassToFeatureClass(
            "gpLayer", env.scratchGDB,
            "PMP_Points")  # save feature layer as "PMP_Points" feature class
        arcpy.AddMessage("(" + str(dm.GetCount("gpLayer")) +
                         " grid points will be analyzed)\n")

        # Add PMP Fields
        for dur in durList:
            arcpy.AddMessage("\t...adding field: PMP_" + str(dur))
            dm.AddField(env.scratchGDB + "\\PMP_Points", "PMP_" + dur,
                        "DOUBLE")

        # Add STORM Fields (this string values identifies the driving storm by SPAS ID number)
        for dur in durList:
            arcpy.AddMessage("\t...adding field: STORM_" + str(dur))
            dm.AddField(env.scratchGDB + "\\PMP_Points", "STORM_" + dur,
                        "TEXT", "", "", 16)

        return
예제 #2
0
def process_ws(ws_fc, zone_name):

    # generate new zone ids
    DM.AddField(ws_fc, 'zoneid', 'TEXT', field_length=10)
    DM.CalculateField(ws_fc, 'zoneid', '!lagoslakeid!', 'PYTHON')
    ws_fc_lyr = DM.MakeFeatureLayer(ws_fc)

    # multipart
    DM.AddField(ws_fc, 'ismultipart', 'TEXT', field_length=2)
    with arcpy.da.UpdateCursor(ws_fc, ['ismultipart', 'SHAPE@']) as u_cursor:
        for row in u_cursor:
            if row[1].isMultipart:
                row[0] = 'Y'
            else:
                row[0] = 'N'
            u_cursor.updateRow(row)


    print("Edge flags...")
    # add flag fields
    DM.AddField(ws_fc, 'onlandborder', 'TEXT', field_length = 2)
    DM.AddField(ws_fc, 'oncoast', 'TEXT', field_length = 2)

    # identify border zones
    border_lyr = DM.MakeFeatureLayer(LAND_BORDER, 'border_lyr')
    DM.SelectLayerByLocation(ws_fc_lyr, 'INTERSECT', border_lyr)
    DM.CalculateField(ws_fc_lyr, 'onlandborder', "'Y'", 'PYTHON')
    DM.SelectLayerByAttribute(ws_fc_lyr, 'SWITCH_SELECTION')
    DM.CalculateField(ws_fc_lyr, 'onlandborder' ,"'N'", 'PYTHON')

    # identify coastal zones
    coastal_lyr = DM.MakeFeatureLayer(COASTLINE, 'coastal_lyr')
    DM.SelectLayerByLocation(ws_fc_lyr, 'INTERSECT', coastal_lyr)
    DM.CalculateField(ws_fc_lyr, 'oncoast', "'Y'", 'PYTHON')
    DM.SelectLayerByAttribute(ws_fc_lyr, 'SWITCH_SELECTION')
    DM.CalculateField(ws_fc_lyr, 'oncoast' ,"'N'", 'PYTHON')

    print("State assignment...")
    # States
    state_geo = r'D:\Continental_Limnology\Data_Working\LAGOS_US_GIS_Data_v0.6.gdb\Spatial_Classifications\state'
    find_states(ws_fc, STATES_GEO)
    # glaciation status?
    calc_glaciation(ws_fc, 'zoneid')

    # preface the names with the zones
    DM.DeleteField(ws_fc, 'ORIG_FID')
    fields = [f.name for f in arcpy.ListFields(ws_fc, '*') if f.type not in ('OID', 'Geometry') and not f.name.startswith('Shape_')]
    for f in fields:
        new_fname = '{zn}_{orig}'.format(zn=zone_name, orig = f).lower()
        try:
            DM.AlterField(ws_fc, f, new_fname, clear_field_alias = 'TRUE')
        # sick of debugging the required field message-I don't want to change required fields anyway
        except:
            pass

    # cleanup
    lyr_objects = [lyr_object for var_name, lyr_object in locals().items() if var_name.endswith('lyr')]
    for l in lyr_objects:
        DM.Delete(l)
def drought_analysis(date_string):
    ARCPY.env.overwriteOutput = True
    working_dir = r"C:\Data\git\devsummit-14-python"
    zip_name = "USDM_" + date_string + "_M.zip"
    url = "http://droughtmonitor.unl.edu/data/shapefiles_m/" + zip_name
    mxd_path = OS.path.join(working_dir, "MapTemplate.mxd")
    lyr_template = OS.path.join(working_dir, "CurrentDroughtConditions.lyr")
    zip_name = OS.path.basename(url)

    drought_zip_file = URLLIB.URLopener()
    dzf = drought_zip_file.retrieve(url, OS.path.join(r"C:\Temp", zip_name))
    zf = ZIPFILE.ZipFile(dzf[0], "r")
    shp_name = [n for n in zf.namelist() if n.endswith('.shp')][0]
    zf.extractall(working_dir)

    drought = OS.path.splitext(shp_name)[0]
    DM.MakeFeatureLayer(OS.path.join(working_dir, shp_name), drought)

    #### Add Winery Data ####
    beerWinePath = OS.path.join(working_dir, "BeerWine", "BeerWine.gdb",
                                "BeerWine")
    intermediate_output = OS.path.join(working_dir, "BeerWine", "BeerWine.gdb",
                                       "BeerWineDrought")
    wine = "BeerWine"
    wine_drought = "Wine_Drought"
    DM.MakeFeatureLayer(beerWinePath, wine)
    DM.SelectLayerByAttribute(wine, "NEW_SELECTION", "Type = 'Winery'")
    ANALYSIS.SpatialJoin(drought, wine, intermediate_output, "JOIN_ONE_TO_ONE",
                         "KEEP_ALL")
    try:
        DM.DeleteField(intermediate_output, "NAME")
    except:
        pass
    final_wine_drought = "Wine_Drought_Summary"
    DM.MakeFeatureLayer(intermediate_output, final_wine_drought)

    lf = DM.SaveToLayerFile(
        final_wine_drought,
        OS.path.join(working_dir, '{}.lyr'.format(final_wine_drought)))
    DM.ApplySymbologyFromLayer(lf, lyr_template)

    pw = "PASSWORDHERE"  #GETPASS.getpass("Enter AGOL password:"******"Drought_Wine_Service"

    agol = AGOLHandler("USERNAMEHERE", pw, service_name)

    publish_service(agol, service_name, mxd_path, lf[0])
    TIME.sleep(5)
    fs_url = agol.findItemURL('Feature Service')
    TIME.sleep(35)
    gp_url, jsondata = enrich(agol, fs_url + '/0',
                              '{}_Enriched'.format(service_name), agol.token)
    check_job_status(gp_url, jsondata, agol.token)

    DM.Delete(OS.path.join(working_dir, shp_name))
    DM.Delete(OS.path.join(working_dir, lf[0]))
예제 #4
0
def erase_and_clean(tract_id, block_id):
    tract_clause = '"STATEFP" = ' + "'" + tract_id + "'"
    # First, generate the state-specific layer
    state_tracts = mg.SelectLayerByAttribute(tract_layer, "NEW_SELECTION", tract_clause)
    # Next, read in the appropriate block filter
    blocks = "data/blocks/nopop/" + block_id + "_nopop.shp"
    block_layer = mg.MakeFeatureLayer(blocks)
    # Run the Erase tool
    out_erase = "data/erase/" + block_id + "_erase.shp"
    arcpy.analysis.Erase(state_tracts, block_layer, out_feature_class = out_erase)
    # Check for slivers and remove them - let's set a threshold of 5000 sq meters
    out_singlepart = "data/singlepart/" + block_id + "_single.shp"
    mg.MultipartToSinglepart(out_erase, out_singlepart)
    single_layer = mg.MakeFeatureLayer(out_singlepart)
    mg.AddGeometryAttributes(single_layer, Geometry_Properties = "AREA", Area_Unit = "SQUARE_METERS")
    # Now, specify the where clause and dissolve the remainder
    area_clause = '"POLY_AREA" >= 5000'
    mg.SelectLayerByAttribute(single_layer, "NEW_SELECTION", area_clause)
    out_dissolve = "data/dissolve/" + block_id + "_dissolve.shp"
    mg.Dissolve(single_layer, out_dissolve, dissolve_field = "GISJOIN")
예제 #5
0
    def createOutput(self, outputFC):
        """Creates an Output Feature Class with the Directional Mean
        Results.

        INPUTS:
        outputFC (str): path to the output feature class
        """

        #### Validate Output Workspace ####
        ERROR.checkOutputPath(outputFC)

        #### Shorthand Attributes ####
        ssdo = self.ssdo
        caseField = self.caseField

        #### Create Output Feature Class ####
        ARCPY.SetProgressor("default", ARCPY.GetIDMessage(84003))
        tempCFLayer = "tmpCFLayer"

        try:
            DM.MakeFeatureLayer(ssdo.inputFC, tempCFLayer)
            first = True
            for key, value in self.cf.iteritems():
                oids = value[0]
                for oid in oids:
                    sqlString = ssdo.oidName + '=' + str(oid)
                    if first:
                        DM.SelectLayerByAttribute(tempCFLayer, "NEW_SELECTION",
                                                  sqlString)
                        first = False
                    else:
                        DM.SelectLayerByAttribute(tempCFLayer,
                                                  "ADD_TO_SELECTION",
                                                  sqlString)

            UTILS.clearExtent(DM.CopyFeatures(tempCFLayer, outputFC))
        except:
            ARCPY.AddIDMessage("ERROR", 210, outputFC)
            raise SystemExit()

        #### Set Attribute ####
        self.outputFC = outputFC
예제 #6
0
def simplifyParks():
    """Simplify parks so that they can effectively be displayed at a scale
	of 1:100,000 on the principal system map"""

    # limit parks to sites that are at least 100 acres and that are named
    parks_lyr = 'parks_layer'
    park_type = 'Park and/or Natural Area'
    park_size = 100  # acres
    where_clause = """"TYPE" = '{0}' AND "ACREAGE" > {1} AND "SITENAME" <> ' '"""
    where_populated = where_clause.format(park_type, park_size)
    management.MakeFeatureLayer(orca_sites, parks_lyr, where_populated)

    # use union tool to get rid of any holes in park features
    gaps_setting = 'NO_GAPS'
    parks_union = os.path.join('in_memory', 'parks_union')
    analysis.Union(parks_lyr, parks_union, gaps=gaps_setting)

    # the holes have been filled in by the union tool, but
    parks_dissolve = os.path.join('in_memory', 'parks_dissolve')
    management.Dissolve(parks_union, parks_dissolve)

    # split mulitpart features to single part
    single_part_parks = os.path.join('in_memory', 'single_part_parks')
    management.MultipartToSinglepart(parks_dissolve, single_part_parks)

    # delete any park fragments
    parks_fields = ['OID@', 'SHAPE@AREA']
    with da.UpdateCursor(single_part_parks, parks_fields) as cursor:
        for oid, area in cursor:
            if area < 1000000:  # square feet
                cursor.deleteRow()

    # simplify the parks by smoothing out their edges
    algorithm = 'PAEK'
    tolerance = 5000  # feet
    endpoint_option = 'NO_FIXED'
    cartography.SmoothPolygon(single_part_parks, simplified_parks, algorithm,
                              tolerance, endpoint_option)
def check_points_are_in_cost_raster(in_file, raster):
    proc_layer = "checker"
    arcmgt.MakeFeatureLayer(in_file, proc_layer)
    rows = arcpy.SearchCursor(proc_layer)

    for row_cur in rows:
        shp = row_cur.shape
        try:
            centroid = shp.centroid
            (x, y) = (centroid.X, centroid.Y)
        except:
            arcpy.AddError (
                'One or more input points have no geometry (X or Y coords are not defined).  '
                + 'This can be caused by imported spreadsheets having excess rows.'
            )
            raise PointHasNoGeometry
        result = arcmgt.GetCellValue(raster, "%s %s" % (x, y), "1")
        value = result.getOutput(0)
        if value == 'NoData':
            return 0
        #print value

    return 1
def georeference_lakes(
    lake_points_fc,
    out_fc,
    lake_id_field,
    lake_name_field,
    lake_county_field='',
    state='',
    master_gdb=r'C:\Users\smithn78\Dropbox\CL_HUB_GEO\Lake_Georeferencing\Masters_for_georef.gdb'
):
    """
    Evaluate water quality sampling point locations and either assign the point to a lake polygon or flag the
    point for manual review.
    :param lake_points_fc:
    :param out_fc:
    :param lake_id_field:
    :param lake_name_field:
    :param lake_county_field:
    :param state:
    :param master_gdb: Location of master geodatabase used for linking
    :return:
    """
    master_lakes_fc = os.path.join(master_gdb, MASTER_LAKES_FC)
    master_lakes_lines = os.path.join(master_gdb, MASTER_LAKES_LINES)
    master_streams_fc = os.path.join(master_gdb, MASTER_STREAMS_FC)
    master_xwalk = os.path.join(master_gdb, MASTER_XWALK)

    # setup
    arcpy.AddMessage("Joining...")
    state = state.upper()
    if state not in STATES:
        raise ValueError('Use the 2-letter state code abbreviation')
    arcpy.env.workspace = 'in_memory'
    out_short = os.path.splitext(os.path.basename(out_fc))[0]
    join1 = '{}_1'.format(out_short)
    join2 = '{}_2'.format(out_short)
    join3 = '{}_3'.format(out_short)
    join3_select = join3 + '_select'
    join4 = '{}_4'.format(out_short)
    join5 = '{}_5'.format(out_short)
    joinx = '{}_x'.format(out_short)

    county_name_results = arcpy.ListFields(
        lake_points_fc, '{}*'.format(lake_county_field))[0].name
    if lake_county_field and not lake_county_field in county_name_results:
        print('{} field does not exist in dataset.'.format(lake_county_field))
        raise Exception

    point_fields = [f.name for f in arcpy.ListFields(lake_points_fc)]

    # update the lake id to a text field if not already
    lake_id_field_type = arcpy.ListFields(lake_points_fc,
                                          lake_id_field)[0].type
    if lake_id_field_type != 'String':
        temp_id_field = '{}_t'.format(lake_id_field)
        arcpy.AddField_management(lake_points_fc, '{}_t'.format(lake_id_field),
                                  'TEXT', '255')
        expr = '!{}!'.format(lake_id_field)
        arcpy.CalculateField_management(lake_points_fc, temp_id_field, expr,
                                        'PYTHON')
        arcpy.DeleteField_management(lake_points_fc, lake_id_field)
        arcpy.AlterField_management(lake_points_fc,
                                    temp_id_field,
                                    new_field_name=lake_id_field)

    # Try to make some spatial connections and fulfill some logic to assign a link
    join1 = AN.SpatialJoin(lake_points_fc,
                           master_lakes_fc,
                           join1,
                           'JOIN_ONE_TO_MANY',
                           'KEEP_ALL',
                           match_option='INTERSECT')
    join2 = AN.SpatialJoin(join1,
                           master_streams_fc,
                           join2,
                           'JOIN_ONE_TO_MANY',
                           'KEEP_ALL',
                           match_option='INTERSECT')
    join3 = AN.SpatialJoin(join2,
                           master_lakes_fc,
                           join3,
                           'JOIN_ONE_TO_MANY',
                           'KEEP_ALL',
                           match_option='INTERSECT',
                           search_radius='10 meters')
    join4 = AN.SpatialJoin(join3,
                           master_lakes_fc,
                           join4,
                           'JOIN_ONE_TO_MANY',
                           'KEEP_ALL',
                           match_option='INTERSECT',
                           search_radius='100 meters')

    # setup for editing lake assignment values
    DM.AddField(join4, 'Auto_Comment', 'TEXT', field_length=100)
    DM.AddField(join4, 'Manual_Review', 'SHORT')
    DM.AddField(join4, 'Shared_Words', 'TEXT', field_length=100)
    DM.AddField(join4, 'Linked_lagoslakeid', 'LONG')
    DM.AddField(join4, 'GEO_Discovered_Name', 'TEXT', field_length=255)
    DM.AddField(join4, 'Duplicate_Candidate', 'TEXT', field_length=1)
    DM.AddField(join4, 'Is_Legacy_Link', 'TEXT', field_length=1)

    update_fields = [
        lake_id_field,
        lake_name_field,
        MASTER_LAKE_ID,
        MASTER_GNIS_NAME,  # 0m match
        'PERMANENT_IDENTIFIER_1',
        'GNIS_NAME_1',  # stream match
        MASTER_LAKE_ID + '_1',
        MASTER_GNIS_NAME + '_12',  # 10m match
        MASTER_LAKE_ID + '_12',
        MASTER_GNIS_NAME + '_12_13',  # 100m match
        'Auto_Comment',
        'Manual_Review',
        'Shared_Words',
        'Linked_lagoslakeid'
    ]

    # use a cursor to go through each point and evaluate its assignment
    cursor = arcpy.da.UpdateCursor(join4, update_fields)
    arcpy.AddMessage("Calculating link status...")
    for row in cursor:
        id, name, mid_0, mname_0, stream_id, streamname_0, mid_10, mname_10, mid_100, mname_100, comment, review, words, lagosid = row
        if mid_0 is not None:  # if the point is directly in a polygon
            if name and mname_0:
                words = lagosGIS.list_shared_words(name,
                                                   mname_0,
                                                   exclude_lake_words=False)
            comment = 'Exact location link'
            lagosid = mid_0
            review = -1
        elif mid_0 is None and mid_10 is not None:  # if the point is only within 10m of a lake
            if name and mname_10:
                words = lagosGIS.list_shared_words(name,
                                                   mname_10,
                                                   exclude_lake_words=False)
            if words:
                comment = 'Linked by common name and location'
                lagosid = mid_10
                review = -1
            else:
                comment = 'Linked by common location'
                lagosid = mid_10
                review = 1
        elif mid_0 is None and mid_10 is None:
            if stream_id is not None:  # if there is a stream match
                comment = 'Not linked because represented as river in NHD'
                review = 2
            else:
                if mid_100 is not None:  # if the point is only within 100m of lake(s)
                    if name and mname_100:
                        words = lagosGIS.list_shared_words(
                            name, mname_100, exclude_lake_words=True)
                # TODO: Frequency check
                    if words:
                        comment = 'Linked by common name and location'
                        lagosid = mid_100
                        review = 1
                    else:
                        comment = 'Linked by common location'
                        lagosid = mid_100
                        review = 2
        cursor.updateRow(
            (id, name, mid_0, mname_0, stream_id, streamname_0, mid_10,
             mname_10, mid_100, mname_100, comment, review, words, lagosid))

    # # So I haven't been able to get the county logic to work and it hasn't been that important yet, ignore for now
    # Select down to a minimum set because we're about to join on county, which will create lots of duplicate matches
    # Then join calculated results back to full set
    # if lake_county_field:
    #     join5 = AN.Select(join4, join5, 'Manual_Review IS NULL')
    #     lakes_state = AN.Select(MASTER_LAKES_FC, 'lakes_state', "{0} = '{1}'".format(MASTER_STATE_NAME, state))
    #     lakes_state_lyr = DM.MakeFeatureLayer(lakes_state, 'lakes_state_lyr')
    #     join5_lyr = DM.MakeFeatureLayer(join5, 'join5_lyr')
    #     DM.AddJoin(join5_lyr, lake_county_field, lakes_state_lyr, MASTER_COUNTY_NAME)
    #     join5_with_county = DM.CopyFeatures(join5_lyr, 'join5_with_cty')
    #     j5 = 'DEDUPED_CA_SWAMP_data_linked_5.'
    #
    #     county_update_fields = [j5 + lake_id_field, j5 + lake_name_field, j5 + lake_county_field,
    #                             'lakes_state.' + MASTER_LAKE_ID, 'lakes_state.' + MASTER_GNIS_NAME, 'lakes_state.' + MASTER_COUNTY_NAME,
    #                             j5 + 'Auto_Comment', j5 + 'Manual_Review', j5 + 'Shared_Words',
    #                             j5 + 'Linked_lagoslakeid']
    #     with arcpy.da.UpdateCursor(join5_lyr, county_update_fields) as cursor:
    #         for row in cursor:
    #             id, name, county, mid_cty, mname_cty, mcounty, comment, review, words, lagosid = row
    #             if county is not None and mcounty is not None:
    #                 if name and mname_cty:
    #                     words = lagosGIS.list_shared_words(name, mname_cty, exclude_lake_words=True)
    #                 if words:
    #                     comment = 'PRELIMINARY: Linked by common name and location'
    #                     lagosid = mid_cty
    #                     review = 2
    #             cursor.updateRow((id, name, county, mid_cty, mname_cty, mcounty, comment, review, words, lagosid))
    #     DM.RemoveJoin(join5_lyr)
    #     join5_with_county = DM.CopyFeatures(join5_lyr, 'join5_with_county')
    #
    #     # join5 = DM.JoinField(join5, lake_county_field, lakes_state, MASTER_COUNTY_NAME,
    #                          fields = [MASTER_COUNTY_NAME, MASTER_LAKE_ID, MASTER_GNIS_NAME])
    #
    #     # This is a long way to make a join
    #     join_dict = {}
    #     with arcpy.da.SearchCursor(lakes_state, [MASTER_COUNTY_NAME, MASTER_LAKE_ID, MASTER_GNIS_NAME]) as cursor:
    #         for row in cursor:
    #             join_value, val1, val2 = row
    #             join_dict[join_value] = [val1, val2]
    #
    #     arcpy.AddField_management(join5, MASTER_LAKE_ID + 'cntyj', 'LONG')
    #     arcpy.AddField_management(join5, MASTER_GNIS_NAME + 'cntyj', 'TEXT', 255)
    #
    #     with arcpy.da.SearchCursor(join5, [lake_county_field, MASTER_LAKE_ID + 'cntyj', MASTER_GNIS_NAME + 'cntyj']) as cursor:
    #         for row in cursor:
    #             key_value = row[0]
    #             words = lagosGIS.list_shared_words()
    #             if join_dict.has_key(key_value):
    #                 row[1] = join_dict[key_value][0]
    #                 row[2] = join_dict[key_value][1]
    #             else:
    #                 row[1] = None
    #                 row[2] = None
    #             cursor.updateRow(row)
    #
    #
    #     county_update_fields = [lake_id_field, lake_name_field, lake_county_field,
    #                 MASTER_LAKE_ID + '_12_13_14', MASTER_GNIS_NAME + '_12_13',  MASTER_COUNTY_NAME + '_12_13', # county
    #                  'Auto_Comment', 'Manual_Review', 'Shared_Words',
    #                  'Linked_lagoslakeid']
    #     cursor = arcpy.da.UpdateCursor(join5, county_update_fields)
    #     for row in cursor:
    #         id, name, county, lagosid_cty, lagosname_cty, mcounty, comment, mreview, words, linked_lagosid = row
    #         if mcounty is not None:
    #             words = lagosGIS.list_shared_words()
    # else:
    #     join5 = join4
    #

    if state in LAGOSNE_STATES:
        DM.JoinField(join4, lake_id_field, master_xwalk, 'lagosne_legacyid',
                     ['lagoslakeid', 'lagos_lakename', 'lagos_state'])
        update_fields = [
            lake_id_field,
            lake_name_field,
            MASTER_LAKE_ID + '_12_13',
            'lagos_lakename',
            'lagos_state',  # crosswalk match
            'Auto_Comment',
            'Manual_Review',
            'Shared_Words',
            'Linked_lagoslakeid',
            'Is_Legacy_Link'
        ]

        with arcpy.da.UpdateCursor(join4, update_fields) as uCursor:
            for uRow in uCursor:
                id, name, mid_x, mname_x, state_x, comment, review, words, lagosid, legacy_flag = uRow
                # fields are populated already from links above. Revise only if legacy links
                if mid_x is not None:
                    if state == state_x:
                        legacy_flag = 'Y'  # set to Y regardless of whether using legacy comment if state matches
                    if comment != 'Exact location link':
                        review = 1
                        if state != state_x:
                            review = 3  # downgrade if states mismatch--border lakes OK, random common IDs NOT. Check.
                        legacy_flag = 'Y'
                        comment = 'LAGOS-NE legacy link'  # only comment non-exact location matches
                        lagosid = mid_x
                        if name and mname_x:
                            words = lagosGIS.list_shared_words(
                                name,
                                mname_x)  # update words only if legacy comment

                new_row = id, name, mid_x, mname_x, state_x, comment, review, words, lagosid, legacy_flag
                uCursor.updateRow(new_row)

        # # Undo the next line if you ever bring this chunk back.
    join5 = join4

    # then re-code the no matches as a 3 and copy comments to the editable field
    # compress the joined lake ids into one field
    # having two fields lets us keep track of how many of the auto matches are bad
    if arcpy.ListFields(join5, 'Comment'):
        comment_field_name = 'Comment_LAGOS'
    else:
        comment_field_name = 'Comment'

    DM.AddField(join5, comment_field_name, 'TEXT', field_length=100)
    with arcpy.da.UpdateCursor(
            join5, ['Manual_Review', 'Auto_Comment', 'Comment']) as cursor:
        for flag, ac, comment in cursor:
            if flag is None:
                flag = 3
                ac = 'Not linked'
            comment = ac
            cursor.updateRow((flag, ac, comment))

    # Re-code points more than 100m into the polygon of the lake as no need to check
    DM.MakeFeatureLayer(join5, 'join5_lyr')
    DM.MakeFeatureLayer(master_lakes_lines, 'lake_lines_lyr')
    DM.SelectLayerByAttribute('join5_lyr', 'NEW_SELECTION',
                              "Auto_Comment = 'Exact location link'")
    DM.SelectLayerByLocation('join5_lyr', 'INTERSECT', 'lake_lines_lyr',
                             '100 meters', 'SUBSET_SELECTION', 'INVERT')
    DM.CalculateField('join5_lyr', 'Manual_Review', '-2', 'PYTHON')
    DM.Delete('join5_lyr', 'lake_lines_lyr')

    # Then make sure to only keep the fields necessary when you write to an output
    copy_fields = point_fields + [
        'Linked_lagoslakeid', 'Auto_Comment', 'Manual_Review',
        'Is_Legacy_Link', 'Shared_Words', 'Comment', 'Duplicate_Candidate',
        'GEO_Discovered_Name'
    ]
    copy_fields.remove('Shape')
    copy_fields.remove('OBJECTID')

    lagosGIS.select_fields(join5, out_fc, copy_fields)

    DM.AssignDomainToField(out_fc, 'Comment', 'Comment')

    DM.AddField(out_fc, 'Total_points_in_lake_poly', 'Short')

    # Remove any duplicates. (These originate from the join3/join4 transition because a point can be both
    # within 10m and 100m of lakes, this code takes the closest lake as true for my current sanity.)
    # Or, in other words, this is a hack solution.
    out_fc_fields = [
        f.name for f in arcpy.ListFields(out_fc) if f.name != 'OBJECTID'
    ]
    DM.DeleteIdentical(out_fc, out_fc_fields)

    # Get the join_count for each limno lake ID
    # De-dupe anything resulting from limno ID duplicates first before counting
    id_pairs = list(
        set(
            arcpy.da.SearchCursor(out_fc,
                                  [lake_id_field, 'Linked_lagoslakeid'])))
    # THEN pull out LAGOS id. Any duplicate now are only due to multiple distinct points within lake
    lagos_ids = [ids[1] for ids in id_pairs]
    sample_ids = [ids[0] for ids in id_pairs]
    lagos_lake_counts = Counter(lagos_ids)
    linked_multiple_lake_counts = Counter(sample_ids)

    # Get the count of points in the polygon
    with arcpy.da.UpdateCursor(
            out_fc,
        ['Linked_lagoslakeid', 'Total_points_in_lake_poly']) as cursor:
        for lagos_id, join_count in cursor:
            join_count = lagos_lake_counts[lagos_id]
            cursor.updateRow((lagos_id, join_count))

    # Mark any samples linked to more than one lake so that the analyst can select the correct lake in the
    # manual process
    with arcpy.da.UpdateCursor(
            out_fc, [lake_id_field, 'Duplicate_Candidate']) as cursor:
        for sample_id, duplicate_flag in cursor:
            duplicate_count = linked_multiple_lake_counts[sample_id]
            if duplicate_count > 1:
                duplicate_flag = "Y"
            else:
                duplicate_flag = "N"
            cursor.updateRow((sample_id, duplicate_flag))

    # clean up
    DM.AddField(out_fc, 'Note', 'TEXT', field_length=140)
    DM.Delete('in_memory')
    arcpy.AddMessage('Completed.')
예제 #9
0
    def __init__(self,
                 inputFC,
                 templateFC=None,
                 explicitSpatialRef=None,
                 silentWarnings=False,
                 useChordal=True):
        #### Validate Input Feature Class ####
        ERROR.checkFC(inputFC)
        try:
            self.inPath, self.inName = OS.path.split(inputFC)
        except:
            self.inPath = None
            self.inName = inputFC

        #### Validate Template FC ####
        if templateFC != None:
            if ARCPY.Exists(templateFC) == False:
                templateFC = None

        #### ShapeFile Boolean ####
        self.shapeFileBool = False
        if self.inPath:
            self.shapeFileBool = UTILS.isShapeFile(inputFC)

            #### Create Feature Layer if LYR File ####
            path, ext = OS.path.splitext(inputFC)
            if ext.upper() == ".LYR":
                tempFC = "SSDO_FeatureLayer"
                DM.MakeFeatureLayer(inputFC, tempFC)
                inputFC = tempFC

        #### Describe Input ####
        self.info = ARCPY.Describe(inputFC)

        #### Assure Input are Features with OIDs ####
        if not self.info.hasOID:
            ARCPY.AddIDMessage("ERROR", 339, self.inName)
            raise SystemExit()

        #### Assign Describe Objects to Class Attributes ####
        self.inputFC = inputFC
        self.catPath = self.info.CatalogPath
        self.shapeType = self.info.ShapeType
        self.oidName = self.info.oidFieldName
        self.dataType = self.info.DataType
        self.shapeField = self.info.ShapeFieldName
        self.templateFC = templateFC
        self.hasM = self.info.HasM
        self.hasZ = self.info.HasZ
        self.silentWarnings = silentWarnings

        #### Set Initial Extent Depending on DataType ####
        if self.dataType in ["FeatureLayer", "Layer"]:
            try:
                tempInfo = ARCPY.Describe(self.catPath)
                extent = tempInfo.extent
            except:
                #### in_memory, SDE, NetCDF etc... ####
                extent = self.info.extent
            self.fidSet = self.info.FIDSet
            if self.fidSet == "":
                self.selectionSet = False
            else:
                self.selectionSet = True
        else:
            extent = self.info.extent
            self.fidSet = ""
            self.selectionSet = False
        self.extent = extent

        #### Set Spatial Reference ####
        inputSpatRef = self.info.SpatialReference
        inputSpatRefName = inputSpatRef.name
        if explicitSpatialRef:
            #### Explicitely Override Spatial Reference ####
            self.templateFC = None
            self.spatialRef = explicitSpatialRef
        else:
            #### 1. Feature Dataset, 2. Env Setting, 3. Input Hierarchy ####
            self.spatialRef = UTILS.returnOutputSpatialRef(inputSpatRef,
                                                           outputFC=templateFC)
        self.spatialRefString = UTILS.returnOutputSpatialString(
            self.spatialRef)
        self.spatialRefName = self.spatialRef.name
        self.spatialRefType = self.spatialRef.type

        #### Warn if Spatial Reference Changed ####
        if not silentWarnings:
            UTILS.compareSpatialRefNames(inputSpatRefName, self.spatialRefName)

        #### Check for Projection ####
        if self.spatialRefType.upper() != "PROJECTED":
            if self.spatialRefType.upper() == "GEOGRAPHIC":
                self.useChordal = useChordal
                if not explicitSpatialRef:
                    if self.useChordal:
                        ARCPY.AddIDMessage("WARNING", 1605)
                    else:
                        ARCPY.AddIDMessage("WARNING", 916)
            else:
                self.useChordal = False
                if not explicitSpatialRef:
                    ARCPY.AddIDMessage("WARNING", 916)
        else:
            self.useChordal = False

        #### Angular/Linear Unit Info ####
        self.distanceInfo = UTILS.DistanceInfo(
            self.spatialRef, useChordalDistances=self.useChordal)

        #### Create Composition and Accounting Structure ####
        self.fields = {}
        self.master2Order = {}
        self.order2Master = {}

        #### Obtain a Full List of Field Names/Type ####
        self.allFields = {}
        listFields = self.info.fields
        for field in listFields:
            name = field.name.upper()
            self.allFields[name] = FCField(field)

        #### Set Z and M Flags and Defaults ####
        zmInfo = UTILS.setZMFlagInfo(self.hasM, self.hasZ, self.spatialRef)
        self.zFlag, self.mFlag, self.defaultZ = zmInfo
        self.zBool = self.zFlag == "ENABLED"

        #### Render Type ####
        self.renderType = UTILS.renderType[self.shapeType.upper()]
예제 #10
0
    desc = arcpy.Describe(scratch)
    arcpy.env.extent = desc.extent
    arcmgt.Delete(scratch)
    print "Extent is %s" % arcpy.env.extent

    add_msg_and_print('Currently in directory: %s\n' % os.getcwd())
    add_msg_and_print('Workspace is: %s' % arcpy.env.workspace)
    #add_msg_and_print ('Scratch table is: %s' % out_table)

    table_view = "table_view"
    arcmgt.MakeTableView(in_file, table_view)

    fields = arcpy.ListFields(in_file)

    layer = "feat_layer"
    arcmgt.MakeFeatureLayer(in_file, layer)
    desc = arcpy.Describe(layer)
    fld_names = []
    for fld in desc.fields:
        fld_names.append(fld.name)

    try:
        fields = ["PATH_FROM", "PATH_TO", "PATH_DIST"]
        #arcmgt.DeleteField(layer, fields)
        #arcmgt.DeleteField(layer, "FROM_")
        for fld in fields:
            if not fld in fld_names:
                arcmgt.AddField(table_view, fld,
                                "DOUBLE")  #  SHOULD GET TYPE FROM target_fld

    except Exception as e:
예제 #11
0
def network2SWM(inputFC, masterField, swmFile, inputNetwork, 
                impedance, cutoff = "#", numberOfNeighs = "#", 
                inputBarrier = "#", uturnPolicy = "ALLOW_UTURNS", 
                restrictions = "#", hierarchy = 'NO_HIERARCHY',
                searchTolerance = "#", fixed = 0,
                exponent = 1.0, rowStandard = True):

    """Creates spatial weights in SWM format from a combination
    of network data and feature classes.

    INPUTS: 
    inputFC (str): path to the input feature class
    masterField (str): field in table that serves as the mapping
    swmFile (str): path to the SWM file
    inputNetwork (str): path to the network dataset (*.nd)
    impedance (str): attribute from network dataset (1)
    cutoff {float, "#"}: impedance threshold
    numberOfNeighs {int, "#"}: number of neighbors to return
    inputBarrier {str, "#"}: path to the input barrier feature class
    uturnPolicy {str, ALLOW_UTURNS}: uturn policy (2)
    restrictions {str, "#"}: attribute from network dataset (3)
    hierarchy {str, NO_HIERARCHY}: NO_HIERARCHY or USE_HIERARCHY
    searchTolerance {linear measure, "#"}: snap tolerance for network (4)
    fixed {int, 0}: Invert impedance as weight or return a weight = 1? 
    exponent {float, 1.0}: distance decay
    rowStandard {bool, True}: row standardize weights?

    NOTES:
    (1) E.g. MINUTES and METERS
    (2) E.g. ALLOW_UTURNS or NO_UTURNS
    (3) E.g. ONEWAY
    (4) E.g. 5000 METERS
    """
    
    #### Check out Network Analyst ####
    try:
        ARCPY.CheckOutExtension("Network")
    except:
        ARCPY.AddIDMessage("ERROR", 849)
        raise SystemExit()

    #### OD Matrix and Layers ####
    ODCostMatrix = "ODMatrix"
    BarriersLayerNames = {"POINT": 'Barriers',
                          "POLYLINE" : 'PolylineBarriers',
                          "LINE" : 'PolylineBarriers',
                          "POLYGON" : 'PolygonBarriers'}
    lines = ODCostMatrix + "\\Lines"
    destFCLayer = "NetSWM_Dest"

    ##### Delete Layers If They Exist ####
    cleanupNetLayer(ODCostMatrix)
    cleanupNetLayer(destFCLayer)
    cleanupNetLayer(lines)

    #### Get Master Field From inputFC ####
    ssdo = SSDO.SSDataObject(inputFC,
                             useChordal = False)
    ssdo.obtainDataGA(masterField, minNumObs = 2)
    master2Order = ssdo.master2Order
    masterFieldObj = ssdo.allFields[masterField.upper()]
    allMaster = master2Order.keys()
    numObs = ssdo.numObs
    numPossNeighs = numObs - 1
    
    #### Get Spatial Ref From Net Data Set ####
    netDesc = ARCPY.Describe(inputNetwork)
    netSpatialRef = netDesc.SpatialReference
    netSpatName = netSpatialRef.Name

    #### Set Maximum Neighbor Argument ####
    if numberOfNeighs == "#":
        numberOfNeighs = min( [numPossNeighs, 30] )
        ARCPY.AddIDMessage("WARNING", 1012, numberOfNeighs)

    if numberOfNeighs >= numObs:
        numberOfNeighs = numPossNeighs
        ARCPY.AddIDMessage("WARNING", 1013, numberOfNeighs)

    if numberOfNeighs == 0:
        numberOfNeighs = numPossNeighs

    #### All Features are Related.  Force Inverse Impedance ####
    if (numObs - numberOfNeighs) <= 1:
        if fixed:
            ARCPY.AddIDMessage("WARNING", 974)
            fixed = 0

    #### Add Self Neighbor For OD Solve ####
    numberOfNeighsOD = numberOfNeighs + 1

    #### Make OD Cost Matrix Layer ####
    ARCPY.SetProgressor("default", ARCPY.GetIDMessage(84132))
    odCostMatrixLayer = NET.MakeODCostMatrixLayer(inputNetwork, ODCostMatrix, impedance, cutoff,
                              numberOfNeighsOD, "#", uturnPolicy,
                              restrictions, hierarchy, "#", "NO_LINES").getOutput(0)
    
    #### OD Matrix and Layers ####
    naClassNames = NET.GetNAClassNames(odCostMatrixLayer)
    destinationLayer = ODCostMatrix + OS.sep + naClassNames["Destinations"]
    originLayer = ODCostMatrix + OS.sep + naClassNames["Origins"]
    lines = ODCostMatrix + OS.sep + naClassNames["ODLines"]

    #### Add Barriers ####
    if inputBarrier != "" and inputBarrier != "#":
        ARCPY.SetProgressor("default", ARCPY.GetIDMessage(84147))
        barDesc = ARCPY.Describe(inputBarrier)
        barShapeType = barDesc.ShapeType.upper()
        if barShapeType in BarriersLayerNames:
            barString = naClassNames[BarriersLayerNames[barShapeType]]
            NET.AddLocations(ODCostMatrix, barString, inputBarrier, "",
                             searchTolerance)

    #### Add Master Field to OD for Selection ####
    masterType = UTILS.convertType[masterFieldObj.type]
    NET.AddFieldToAnalysisLayer(ODCostMatrix, naClassNames["Destinations"], masterField,
                                masterType)

    #### Add Destinations ####
    ARCPY.SetProgressor("default", ARCPY.GetIDMessage(84133)) 
    masterToken = "Name " + masterField + " #;"
    masterToken += masterField + " " + masterField + " #"
    NET.AddLocations(ODCostMatrix, naClassNames["Destinations"], inputFC, masterToken,
                     searchTolerance, exclude_restricted_elements = "EXCLUDE")

    #### Initialize Spatial Weights Matrix File ####
    hierarchyBool = hierarchy == 'USE_HIERARCHY'
    addConcept = WU.wTypeDispatch[fixed].split("_")[0]
    forceFixed = (fixed == True)
    swmWriter = WU.SWMWriter(swmFile, masterField, netSpatName, 
                             numObs, rowStandard,
                             inputFC = inputFC, wType = 10,
                             inputNet = inputNetwork, 
                             impedanceField = impedance,
                             barrierFC = inputBarrier,
                             uturnPolicy = uturnPolicy,
                             restrictions = restrictions,
                             useHierarchy = hierarchyBool,
                             searchTolerance = searchTolerance,
                             addConcept = addConcept,
                             exponent = exponent,
                             forceFixed = forceFixed)

    #### Create FieldList for Subset Searching ####
    totalImpedance = "Total_" + impedance
    fieldList = ";".join( ["NAME", totalImpedance] )

    #### Get Chunks if Necessary ####
    numOrigins = int(10000000. / numObs)
    allMaster.sort()
    chunkedIDs = UTILS.chunk(allMaster, numOrigins)
    sqlStrings = UTILS.sqlChunkStrings(inputFC, masterField, chunkedIDs)
    numChunks = len(sqlStrings)

    #### Create Field Map for Origins ####
    masterToken = "Name " + masterField + " #"
    orgFieldMap = [masterToken, 'CurbApproach CurbApproach 0', 
                    'SourceID SourceID #', 'SourceOID SourceOID #',
                    'PosAlong PosAlong #', 'SideOfEdge SideOfEdge #']   
    orgFieldMap = ";".join(orgFieldMap)

    #### Keep Track of Features That Snap to Network ####
    snappedFeatures = set([])

    for chunkNum in xrange(numChunks):
        progMsg = ARCPY.GetIDMessage(84145).format(chunkNum + 1, numChunks)
        ARCPY.SetProgressor("default", progMsg)
        
        #### Make Origins from Chunk of Destinations ####
        sqlValue = sqlStrings[chunkNum]
        DM.MakeFeatureLayer(destinationLayer, destFCLayer, sqlValue)
        NET.AddLocations(ODCostMatrix, naClassNames["Origins"], destFCLayer, orgFieldMap,
                         "#", "#", "#", "#", "CLEAR")

        #### Solve OD Matrix and Select Data ####
        NET.Solve(ODCostMatrix, "SKIP")

        #### Count the Number of NonZero Spatial Linkages #### 
        numLinks = UTILS.getCount(lines)

        #### Create Search Cursor for OD Line Info ####
        rows = ARCPY.SearchCursor(lines, "", None, fieldList)
        row = rows.next()

        #### Set Tool Progressor and Process Information ####
        ARCPY.SetProgressor("step", ARCPY.GetIDMessage(84127), 0, numLinks, 1)

        #### Process First Record ####
        ODInfo = row.getValue("NAME")
        lastID, neighID = [ int(i) for i in ODInfo.split(" - ") ]
        impValue = row.getValue(totalImpedance)
        weight = WU.distance2Weight(impValue, wType = fixed, 
                                    exponent = exponent)
        neighs = []
        weights = []
        if lastID != neighID:
            neighs.append(neighID)
            weights.append(weight)

        #### Process Remaining Records ####
        progMsg = ARCPY.GetIDMessage(84146).format(chunkNum + 1, numChunks)
        ARCPY.SetProgressor("step", progMsg, 0, numLinks, 1)
        while row:
            #### Get Origin and Destination Unique IDs ####
            ODInfo = row.getValue("NAME")
            masterID, neighID = [ int(i) for i in ODInfo.split(" - ") ]

            #### Obtain Impedance and Create Weight ####
            impValue = row.getValue(totalImpedance)
            weight = WU.distance2Weight(impValue, wType = fixed, 
                                        exponent = exponent)

            #### Check Whether it is the Same ID ####
            if masterID == lastID:
                if masterID != neighID:
                    neighs.append(neighID)
                    weights.append(weight)

            else:
                #### New ID, Add Last ID Result to SWM File ####
                swmWriter.swm.writeEntry(lastID, neighs, weights) 
                snappedFeatures.add(lastID)

                #### Reset and Initialize Containers ####
                neighs = []
                weights = []
                if masterID != neighID: 
                    neighs.append(neighID)
                    weights.append(weight)
                lastID = masterID

            ARCPY.SetProgressorPosition()
            row = rows.next()

        #### Write Last ID Result ####
        swmWriter.swm.writeEntry(lastID, neighs, weights) 
        snappedFeatures.add(lastID)

        #### Clean Up ####
        del rows

    ##### Delete Layers If They Exist ####
    cleanupNetLayer(ODCostMatrix)
    cleanupNetLayer(destFCLayer)
    cleanupNetLayer(lines)

    #### Add Empty SWM Entries for Features Not Snapped to Network ####
    notSnapped = snappedFeatures.symmetric_difference(allMaster)
    for masterID in notSnapped:
        swmWriter.swm.writeEntry(masterID, [], [])

    #### Report Warning/Max Neighbors ####
    swmWriter.reportNeighInfo()

    #### Clean Up ####
    swmWriter.close()

    #### Report Spatial Weights Summary ####
    swmWriter.report()

    #### Report SWM File is Large ####
    swmWriter.reportLargeSWM()
def snap_points_to_mask_raster (in_file, mask, out_file, distance, workspace):
    
    if distance is None or len (distance) == 0:
        distance = "100 METERS"
    
    if arcpy.env.outputCoordinateSystem is None:
        arcpy.env.outputCoordinateSystem = mask
    print arcpy.env.outputCoordinateSystem.name

    if len(workspace):
        arcpy.env.workspace = workspace
    if arcpy.env.workspace is None or len(arcpy.env.workspace) == 0:
        arcpy.env.workspace = os.getcwd()

    arcpy.AddMessage ("workspace is %s" % arcpy.env.workspace)

    try:
        suffix = None
        wk = arcpy.env.workspace
        if not '.gdb' in wk:
            suffix = '.shp'
        poly_file = arcpy.CreateScratchName(None, suffix, 'POLYGON')
        arcpy.RasterToPolygon_conversion (mask, poly_file, 'NO_SIMPLIFY')
    except:
        raise

    arcpy.AddMessage ("poly_file is %s" % poly_file)

    #  handle layers and datasets
    desc = arcpy.Describe(in_file)
    in_file = desc.catalogPath

    #  add .shp extension if needed - clunky, but otherwise system fails below
    re_gdb = re.compile ('\.gdb$')
    re_shp = re.compile ('\.shp$')
    path = os.path.dirname(out_file)
    if len (path) == 0:
        path = arcpy.env.workspace
    if not re_gdb.search (path) and not re_shp.search (out_file):
        out_file += '.shp'

    arcpy.AddMessage ("Input point file is %s" % in_file)
    arcpy.AddMessage ("Output point file is %s" % out_file)

    arcmgt.CopyFeatures (in_file, out_file)

    try:
        snap_layer_name = 'get_layer_for_snapping'
        arcmgt.MakeFeatureLayer (out_file, snap_layer_name)
        arcmgt.SelectLayerByLocation (snap_layer_name, 'intersect', poly_file, '#', 'NEW_SELECTION')
        arcmgt.SelectLayerByAttribute(snap_layer_name, 'SWITCH_SELECTION')
        if arcmgt.GetCount(snap_layer_name) > 0:
            arcpy.Snap_edit (snap_layer_name, [[poly_file, "EDGE", distance]])
        else:
            arcpy.AddMessage ('No features selected, no snapping applied')
    except Exception as e:
        print arcpy.GetMessages()
        raise e

    arcmgt.Delete (snap_layer_name)
    arcmgt.Delete (poly_file)

    print arcpy.GetMessages()
    print "Completed"

    return
예제 #13
0
    #desc = arcpy.Describe(polygon_file)
    #sr = desc.spatialReference
    arcpy.env.outputCoordinateSystem = default_coord_sys
    arcpy.env.geographicTransformations = "GDA_1994_To_WGS_1984"
    arcpy.env.XYResolution = "0.0000000001 Meters"
    arcpy.env.XYTolerance = "0.0000000001 Meters"

    temp_xy = arcpy.CreateScratchName("xx", ".shp")
    add_msg_and_print("temp_xy is %s" % temp_xy)
    try:
        arcmgt.XYToLine(in_table, temp_xy, "Longitude_dd", "Lattitude_dd",
                        "Longitude_dd_2", "Lattitude_dd_2", "GEODESIC",
                        "New_WP")

    except:
        add_msg_and_print("Unable to create XY to line feature class")
        raise
    layer = "feat_layer"
    arcmgt.MakeFeatureLayer(temp_xy, layer)

    arcmgt.SelectLayerByLocation(layer, "COMPLETELY_WITHIN", polygon_file)
    arcmgt.SelectLayerByAttribute(layer, "SWITCH_SELECTION")

    temp_overlap = arcpy.CreateScratchName("xx_overlap_", ".shp")
    arcpy.CopyFeatures_management(layer, temp_overlap)

    #  now we need to iterate over those overlapping vertices and integrate them with the boundary polygon

    print "Completed"
예제 #14
0
def lake_from_to(nhd_subregion_gdb, output_table):
    arcpy.env.workspace = 'in_memory'
    waterbody0 = os.path.join(nhd_subregion_gdb, 'NHDWaterbody')
    network = os.path.join(nhd_subregion_gdb, 'Hydrography', 'HYDRO_NET')
    junctions0 = os.path.join(nhd_subregion_gdb, 'HYDRO_NET_Junctions')

    # use layers for selections. We will only work with lakes over 1 hectare for this tool.
    waterbody = DM.MakeFeatureLayer(waterbody0,
                                    'waterbody',
                                    where_clause=LAGOS_LAKE_FILTER)
    num_wbs = int(arcpy.GetCount_management(waterbody).getOutput(0))
    junctions = DM.MakeFeatureLayer(junctions0, 'junctions')

    DM.SelectLayerByLocation(junctions, 'INTERSECT', waterbody, '1 Meters',
                             'NEW_SELECTION')
    junctions_1ha = DM.MakeFeatureLayer(junctions, 'junctions_1ha')

    # insert results into output table
    DM.CreateTable(os.path.dirname(output_table),
                   os.path.basename(output_table))
    DM.AddField(output_table, 'FROM_PERMANENT_ID', 'TEXT', field_length=40)
    DM.AddField(output_table, 'TO_PERMANENT_ID', 'TEXT', field_length=40)

    # create a dictionary to hold results in memory
    results = []

    counter = 0
    progress = .01
    arcpy.AddMessage("Starting network tracing...")
    with arcpy.da.SearchCursor(waterbody, 'Permanent_Identifier') as cursor:
        for row in cursor:
            # set up a progress printer
            counter += 1
            if counter >= float(num_wbs) * progress:
                progress += .01
                arcpy.AddMessage("{}% complete...".format(
                    round(progress * 100), 1))

            # select this lake
            id = row[0]
            where_clause = """"{0}" = '{1}'""".format('Permanent_Identifier',
                                                      id)
            this_waterbody = DM.MakeFeatureLayer(waterbody, 'this_waterbody',
                                                 where_clause)

            # select junctions overlapping this lake. only the downstream one matters, rest have no effect
            DM.SelectLayerByLocation(junctions_1ha, 'INTERSECT',
                                     this_waterbody, '1 Meters')
            count_junctions = int(
                arcpy.GetCount_management(junctions_1ha).getOutput(0))
            if count_junctions == 0:
                # add a row with no "TO" lake to the results
                results.append({'FROM': id, 'TO': None})
            else:
                # copy with selection on
                this_junctions = DM.MakeFeatureLayer(junctions_1ha,
                                                     'this_junctions')
                DM.TraceGeometricNetwork(network, 'downstream', this_junctions,
                                         'TRACE_DOWNSTREAM')
                # select lakes that intersect the downstream network with a tolerance of 1 meters
                DM.SelectLayerByLocation(waterbody, 'INTERSECT',
                                         'downstream/NHDFlowline', '1 Meters',
                                         'NEW_SELECTION')
                # remove this lake
                DM.SelectLayerByAttribute(waterbody, 'REMOVE_FROM_SELECTION',
                                          where_clause)
                # get the count, if it's 0 then there should be no table entry or something?
                count_waterbody = int(
                    arcpy.GetCount_management(waterbody).getOutput(0))
                # copy those into the table that you're storing stuff in
                if count_waterbody == 0:
                    # add a row with no "TO" lake to the results
                    results.append({'FROM': id, 'TO': None})
                else:
                    # for each ID, how am I getting those
                    to_ids = [
                        row[0] for row in arcpy.da.SearchCursor(
                            waterbody, 'Permanent_Identifier')
                    ]
                    for to_id in to_ids:
                        result = {'FROM': id, 'TO': to_id}
                        results.append(result)

                # delete all the intermediates
            DM.SelectLayerByAttribute(waterbody, 'CLEAR_SELECTION')
            for item in [this_waterbody, this_junctions, 'downstream']:
                DM.Delete(item)

    # insert the results in the table
    insert_cursor = arcpy.da.InsertCursor(
        output_table, ['FROM_PERMANENT_ID', 'TO_PERMANENT_ID'])
    for result in results:
        insert_cursor.insertRow([result['FROM'], result['TO']])

    # delete everything
    for item in [waterbody, junctions, junctions_1ha, 'in_memory']:
        DM.Delete(item)
    arcpy.AddMessage("Completed.")
예제 #15
0
    newCol = "Richness"
    man.AddField(curZo, newCol, "SHORT")
    expr = "sum(int(i) for i in !spp0!)"
    man.CalculateField(curZo, newCol, expr, "PYTHON")

# cycle through each zone raster
for i in range(len(hypZ)):
    print("linking zones up to polys")
    print("working on " + hypZ[i])
    # don't assume i is the class level -- extract class here
    classLevel = hypZ[i][-1:]
    curZo = wrk + "/zon_C" + classLevel
    polyZo = wrk + "/hyp_backOut_dissolve_" + classLevel
    polyZoLyr = "polyZoLayer"
    # join the table from the raster to the poly zone layer
    man.MakeFeatureLayer(polyZo, polyZoLyr)
    man.AddJoin(polyZoLyr, "OBJECTID", curZo, "OBJECTID", "KEEP_ALL")
    # find any polys with Richness below zone level
    # each dict entry is [zone: min richness]
    dictMinRich = {1: 1, 2: 2, 3: 5}
    targMinRich = dictMinRich[int(classLevel)]
    expr = "Richness >= " + str(targMinRich)
    man.SelectLayerByAttribute(polyZoLyr, "NEW_SELECTION", expr)
    # write out the selected set
    outFeat = wrk + "/zon_Joined_C" + classLevel
    man.CopyFeatures(polyZoLyr, outFeat)
    # if rows were dropped AND we are above level 1, then need
    # to add dropped polys to one level down.
    numRowsSelSet = int(man.GetCount(polyZoLyr).getOutput(0))
    numRowsLyr = int(man.GetCount(polyZo).getOutput(0))
    if numRowsSelSet < numRowsLyr & int(classLevel) > 1:
예제 #16
0
state_codes = {
    'WA': '53', 'DE': '10', 'DC': '11', 'WI': '55', 'WV': '54', 'HI': '15',
    'FL': '12', 'WY': '56', 'NJ': '34', 'NM': '35', 'TX': '48',
    'LA': '22', 'NC': '37', 'ND': '38', 'NE': '31', 'TN': '47', 'NY': '36',
    'PA': '42', 'AK': '02', 'NV': '32', 'NH': '33', 'VA': '51', 'CO': '08',
    'CA': '06', 'AL': '01', 'AR': '05', 'VT': '50', 'IL': '17', 'GA': '13',
    'IN': '18', 'IA': '19', 'MA': '25', 'AZ': '04', 'ID': '16', 'CT': '09',
    'ME': '23', 'MD': '24', 'OK': '40', 'OH': '39', 'UT': '49', 'MO': '29',
    'MN': '27', 'MI': '26', 'RI': '44', 'KS': '20', 'MT': '30', 'MS': '28',
    'SC': '45', 'KY': '21', 'OR': '41', 'SD': '46'
}

tracts = "data-raw/nhgis0077_shape/us_tract_2015.shp"

tract_layer = mg.MakeFeatureLayer(tracts)
          
def erase_and_clean(tract_id, block_id):
    tract_clause = '"STATEFP" = ' + "'" + tract_id + "'"
    # First, generate the state-specific layer
    state_tracts = mg.SelectLayerByAttribute(tract_layer, "NEW_SELECTION", tract_clause)
    # Next, read in the appropriate block filter
    blocks = "data/blocks/nopop/" + block_id + "_nopop.shp"
    block_layer = mg.MakeFeatureLayer(blocks)
    # Run the Erase tool
    out_erase = "data/erase/" + block_id + "_erase.shp"
    arcpy.analysis.Erase(state_tracts, block_layer, out_feature_class = out_erase)
    # Check for slivers and remove them - let's set a threshold of 5000 sq meters
    out_singlepart = "data/singlepart/" + block_id + "_single.shp"
    mg.MultipartToSinglepart(out_erase, out_singlepart)
    single_layer = mg.MakeFeatureLayer(out_singlepart)
예제 #17
0
    ARCPY.env.overwriteOutput = True
    working_dir = r"C:\Data\DevSummit14"
    zip_name = "USDM_" + date_string + "_M.zip"
    url = "http://droughtmonitor.unl.edu/data/shapefiles_m/" + zip_name
    mxd_path = OS.path.join(working_dir, "MapTemplate.mxd")
    lyr_template = OS.path.join(working_dir, "CurrentDroughtConditions.lyr")
    zip_name = OS.path.basename(url)

    drought_zip_file = URLLIB.URLopener()
    dzf = drought_zip_file.retrieve(url, OS.path.join(r"C:\Temp", zip_name))
    zf = ZIPFILE.ZipFile(dzf[0], "r")
    shp_name = [n for n in zf.namelist() if n.endswith('.shp')][0]
    zf.extractall(working_dir)

    drought = OS.path.splitext(shp_name)[0]
    DM.MakeFeatureLayer(OS.path.join(working_dir, shp_name), drought)

    #### Add Winery Data ####
    beerWinePath = OS.path.join(working_dir, "BeerWine", "BeerWine.gdb",
                                "BeerWine")
    intermediate_output = OS.path.join(working_dir, "BeerWine", "BeerWine.gdb",
                                       "BeerWineDrought")
    wine = "BeerWine"
    wine_drought = "Wine_Drought"
    DM.MakeFeatureLayer(beerWinePath, wine)
    DM.SelectLayerByAttribute(wine, "NEW_SELECTION", "Type = 'Winery'")
    ANALYSIS.SpatialJoin(drought, wine, intermediate_output, "JOIN_ONE_TO_ONE",
                         "KEEP_ALL")
    try:
        DM.DeleteField(intermediate_output, "NAME")
    except:
예제 #18
0
    def doIntegrate(self):
        #### Initial Data Assessment ####
        printOHSSection(84428, prependNewLine=True)
        printOHSSubject(84431, addNewLine=False)

        #### Find Unique Locations ####
        msg = ARCPY.GetIDMessage(84441)
        ARCPY.SetProgressor("default", msg)
        initCount = UTILS.getCount(self.ssdo.inputFC)
        self.checkIncidents(initCount)
        collectedPointFC = UTILS.returnScratchName("Collect_InitTempFC")
        collInfo = EVENTS.collectEvents(self.ssdo, collectedPointFC)
        self.cleanUpList.append(collectedPointFC)

        collSSDO = SSDO.SSDataObject(collectedPointFC,
                                     explicitSpatialRef=self.ssdo.spatialRef,
                                     useChordal=True)
        collSSDO.obtainDataGA(collSSDO.oidName)
        #################################

        #### Locational Outliers ####
        lo = UTILS.LocationInfo(collSSDO,
                                concept="EUCLIDEAN",
                                silentThreshold=True,
                                stdDeviations=3)
        printOHSLocationalOutliers(lo, aggType=self.aggType)

        #### Raster Boundary ####
        if self.outputRaster:
            self.validateRaster(collSSDO.xyCoords)

        #### Agg Header ####
        printOHSSection(84444)

        #### Copy Features for Integrate ####
        msg = ARCPY.GetIDMessage(84443)
        ARCPY.SetProgressor("default", msg)
        intFC = UTILS.returnScratchName("Integrated_TempFC")
        self.cleanUpList.append(intFC)
        DM.CopyFeatures(self.ssdo.inputFC, intFC)

        #### Make Feature Layer To Avoid Integrate Bug with Spaces ####
        mfc = "Integrate_MFC_2"
        DM.MakeFeatureLayer(intFC, mfc)
        self.cleanUpList.append(mfc)

        #### Snap Subject ####
        printOHSSubject(84442, addNewLine=False)
        nScale = (collSSDO.numObs * 1.0) / self.cnt
        if lo.nonZeroAvgDist < lo.nonZeroMedDist:
            useDist = lo.nonZeroAvgDist * nScale
            useType = "average"
        else:
            useDist = lo.nonZeroMedDist * nScale
            useType = "median"
        distance2Integrate = lo.distances[lo.distances < useDist]
        distance2Integrate = NUM.sort(distance2Integrate)
        numDists = len(distance2Integrate)

        #### Max Snap Answer ####
        msg = ARCPY.GetIDMessage(84445)
        useDistStr = self.ssdo.distanceInfo.printDistance(useDist)
        msg = msg.format(useDistStr)
        printOHSAnswer(msg)

        percs = [10, 25, 100]
        indices = [int(numDists * (i * .01)) for i in percs]
        if indices[-1] >= numDists:
            indices[-1] = -1

        ARCPY.SetProgressor("default", msg)
        for pInd, dInd in enumerate(indices):
            dist = distance2Integrate[dInd]
            snap = self.ssdo.distanceInfo.linearUnitString(dist, convert=True)
            DM.Integrate(mfc, snap)
        del collSSDO

        #### Run Collect Events ####
        collectedFC = UTILS.returnScratchName("Collect_TempFC")
        self.cleanUpList.append(collectedFC)
        intSSDO = SSDO.SSDataObject(intFC,
                                    explicitSpatialRef=self.ssdo.spatialRef,
                                    silentWarnings=True,
                                    useChordal=True)
        intSSDO.obtainDataGA(intSSDO.oidName)
        EVENTS.collectEvents(intSSDO, collectedFC)
        descTemp = ARCPY.Describe(collectedFC)
        oidName = descTemp.oidFieldName

        #### Delete Integrated FC ####
        del intSSDO

        #### Set VarName, MasterField, AnalysisSSDO ####
        self.createAnalysisSSDO(collectedFC, "ICOUNT")
예제 #19
0
def pmpAnalysis(aoiBasin, stormType, durList):

    ###########################################################################
    ## Create PMP Point Feature Class from points within AOI basin and add fields
    def createPMPfc():

        arcpy.AddMessage(
            "\nCreating feature class: 'PMP_Points' in Scratch.gdb...")
        dm.MakeFeatureLayer(
            home + "\\Input\Non_Storm_Data.gdb\Vector_Grid",
            "vgLayer")  # make a feature layer of vector grid cells
        dm.SelectLayerByLocation(
            "vgLayer", "INTERSECT", aoiBasin
        )  # select the vector grid cells that intersect the aoiBasin polygon
        dm.MakeFeatureLayer(home + "\\Input\Non_Storm_Data.gdb\Grid_Points",
                            "gpLayer")  # make a feature layer of grid points
        dm.SelectLayerByLocation(
            "gpLayer", "HAVE_THEIR_CENTER_IN", "vgLayer"
        )  # select the grid points within the vector grid selection
        con.FeatureClassToFeatureClass(
            "gpLayer", env.scratchGDB,
            "PMP_Points")  # save feature layer as "PMP_Points" feature class
        arcpy.AddMessage("(" + str(dm.GetCount("gpLayer")) +
                         " grid points will be analyzed)\n")

        # Add PMP Fields
        for dur in durList:
            arcpy.AddMessage("\t...adding field: PMP_" + str(dur))
            dm.AddField(env.scratchGDB + "\\PMP_Points", "PMP_" + dur,
                        "DOUBLE")

        # Add STORM Fields (this string values identifies the driving storm by SPAS ID number)
        for dur in durList:
            arcpy.AddMessage("\t...adding field: STORM_" + str(dur))
            dm.AddField(env.scratchGDB + "\\PMP_Points", "STORM_" + dur,
                        "TEXT", "", "", 16)

        return

    ###########################################################################
    ##  Define getAOIarea() function:
    ##  getAOIarea() calculates the area of AOI (basin outline) input shapefile/
    ##  featureclass.  The basin outline shapefile must be projected.  The area
    ##  is sqaure miles, converted from the basin layers projected units (feet
    ##  or meters).  The aoiBasin feature class should only have a single feature
    ##  (the basin outline).  If there are multiple features, the area will be stored
    ##  for the final feature only.

    def getAOIarea():
        sr = arcpy.Describe(
            aoiBasin
        ).SpatialReference  # Determine aoiBasin spatial reference system
        srname = sr.name
        srtype = sr.type
        srunitname = sr.linearUnitName  # Units
        arcpy.AddMessage("\nAOI basin spatial reference:  " + srname +
                         "\nUnit type: " + srunitname +
                         "\nSpatial reference type: " + srtype)

        aoiArea = 0.0
        rows = arcpy.SearchCursor(aoiBasin)
        for row in rows:
            feat = row.getValue("Shape")
            aoiArea += feat.area
        if srtype == 'Geographic':  # Must have a surface projection.  If one doesn't exist it projects a temporary file and uses that.
            arcpy.AddMessage(
                "\n***The basin shapefile's spatial reference 'Geographic' is not supported.  Projecting temporary shapefile for AOI.***"
            )
            arcpy.Project_management(
                aoiBasin, env.scratchGDB + "\\TempBasin", 102039
            )  #Projects AOI Basin (102039 = USA_Contiguous_Albers_Equal_Area_Conic_USGS_version)
            TempBasin = env.scratchGDB + "\\TempBasin"  # Path to temporary basin created in scratch geodatabase
            sr = arcpy.Describe(
                TempBasin
            ).SpatialReference  # Determine Spatial Reference of temporary basin
            aoiArea = 0.0
            rows = arcpy.SearchCursor(
                TempBasin)  # Assign area size in square meters
            for row in rows:
                feat = row.getValue("Shape")
                aoiArea += feat.area
            aoiArea = aoiArea * 0.000000386102  # Converts square meters to square miles
        elif srtype == 'Projected':  # If a projection exists, it re-projects a temporary file and uses that for data consistency.
            arcpy.AddMessage(
                "\n***The basin shapefile's spatial reference will be reprojected to USA_Contiguous_Albers_Equal_Area_Conic_USGS_version for data consistency.  Projecting temporary shapefile for AOI.***"
            )
            arcpy.Project_management(
                aoiBasin, env.scratchGDB + "\\TempBasin", 102039
            )  #Projects AOI Basin (102039 = USA_Contiguous_Albers_Equal_Area_Conic_USGS_version)
            TempBasin = env.scratchGDB + "\\TempBasin"  # Path to temporary basin created in scratch geodatabase
            sr = arcpy.Describe(
                TempBasin
            ).SpatialReference  # Determine Spatial Reference of temporary basin
            aoiArea = 0.0
            rows = arcpy.SearchCursor(
                TempBasin)  # Assign area size in square meters
            for row in rows:
                feat = row.getValue("Shape")
                aoiArea += feat.area
            aoiArea = aoiArea * 0.000000386102  # Converts square meters to square miles

        aoiArea = round(aoiArea, 3)
        arcpy.AddMessage("\nArea of interest: " + str(aoiArea) +
                         " square miles.")

        if arcpy.GetParameter(5) == False:
            aoiArea = arcpy.GetParameter(6)  # Enable a constant area size
        aoiArea = round(aoiArea, 1)
        arcpy.AddMessage("\n***Area used for PMP analysis: " + str(aoiArea) +
                         " sqmi***")
        return aoiArea

    ###########################################################################
    ##  Define dadLookup() function:
    ##  The dadLookup() function determines the DAD value for the current storm
    ##  and duration according to the basin area size.  The DAD depth is interpolated
    ##  linearly between the two nearest areal values within the DAD table.
    def dadLookup(
        stormLayer, duration, area
    ):  # dadLookup() accepts the current storm layer name (string), the current duration (string), and AOI area size (float)
        #arcpy.AddMessage("\t\tfunction dadLookup() called.")
        durField = "H_" + duration  # defines the name of the duration field (eg., "H_06" for 6-hour)
        dadTable = dadGDB + "\\" + stormLayer
        rows = arcpy.SearchCursor(dadTable)

        try:
            row = rows.next(
            )  # Sets DAD area x1 to the value in the first row of the DAD table.
            x1 = row.AREASQMI
            y1 = row.getValue(durField)
            xFlag = "FALSE"  # xFlag will remain false for basins that are larger than the largest DAD area.
        except RuntimeError:  # return if duration does not exist in DAD table
            return

        row = rows.next()
        i = 0
        while row:  # iterates through the DAD table - assiging the bounding values directly above and below the basin area size
            i += 1
            if row.AREASQMI < area:
                x1 = row.AREASQMI
                y1 = row.getValue(durField)
            else:
                xFlag = "TRUE"  # xFlag is switched to "TRUE" indicating area is within DAD range
                x2 = row.AREASQMI
                y2 = row.getValue(durField)
                break

            row = rows.next()
        del row, rows, i

        if xFlag == "FALSE":
            x2 = area  # If x2 is equal to the basin area, this means that the largest DAD area is smaller than the basin and the resulting DAD value must be extrapolated.
            arcpy.AddMessage(
                "\t\tThe basin area size: " + str(area) +
                " sqmi is greater than the largest DAD area: " + str(x1) +
                " sqmi.\n\t\tDAD value is estimated by extrapolation.")
            y = x1 / x2 * y1  # y (the DAD depth) is estimated by extrapolating the DAD area to the basin area size.
            return y  # The extrapolated DAD depth (in inches) is returned.

        # arcpy.AddMessage("\nArea = " + str(area) + "\nx1 = " + str(x1) + "\nx2 = " + str(x2) + "\ny1 = " + str(y1) + "\ny2 = " + str(y2))

        x = area  # If the basin area size is within the DAD table area range, the DAD depth is interpolated
        deltax = x2 - x1  # to determine the DAD value (y) at area (x) based on next lower (x1) and next higher (x2) areas.
        deltay = y2 - y1
        diffx = x - x1

        y = y1 + diffx * deltay / deltax

        if x < x1:
            arcpy.AddMessage(
                "\t\tThe basin area size: " + str(area) +
                " sqmi is less than the smallest DAD table area: " + str(x1) +
                " sqmi.\n\t\tDAD value is estimated by extrapolation.")

        return y  # The interpolated DAD depth (in inches) is returned.

    ###########################################################################
    ##  Define updatePMP() function:
    ##  This function updates the 'PMP_XX_' and 'STORM_XX' fields of the PMP_Points
    ##  feature class with the largest value from all analyzed storms stored in the
    ##  pmpValues list.
    def updatePMP(
        pmpValues, stormID, duration
    ):  # Accepts four arguments: pmpValues - largest adjusted rainfall for current duration (float list); stormID - driver storm ID for each PMP value (text list); and duration (string)
        pmpfield = "PMP_" + duration
        stormfield = "STORM_" + duration
        gridRows = arcpy.UpdateCursor(
            env.scratchGDB +
            "\\PMP_Points")  # iterates through PMP_Points rows
        i = 0
        for row in gridRows:
            row.setValue(
                pmpfield, pmpValues[i]
            )  # Sets the PMP field value equal to the Max Adj. Rainfall value (if larger than existing value).
            row.setValue(
                stormfield, stormID[i]
            )  # Sets the storm ID field to indicate the driving storm event
            gridRows.updateRow(row)
            i += 1
        del row, gridRows, pmpfield, stormfield
        arcpy.AddMessage("\n\t" + duration +
                         "-hour PMP values update complete. \n")
        return

    ###########################################################################
    ##  The outputPMP() function produces raster GRID files for each of the PMP durations.
    ##  Aslo, a space-delimited PMP_Distribition.txt file is created in the 'Text_Output' folder.
    def outputPMP(type, area, outPath):
        desc = arcpy.Describe(basin)
        basinName = desc.baseName
        pmpPoints = env.scratchGDB + "\\PMP_Points"  # Location of 'PMP_Points' feature class which will provide data for output

        outType = type[:1]
        outArea = str(int(round(area, 0))) + "sqmi"
        outFC = outType + "_" + outArea  #I don't think I need this.....
        arcpy.AddMessage("\nCopying PMP_Points feature class to " + outFC +
                         "...")  #outFC might be replaced with outpath...
        dm.Merge(
            pmpPoints, outPath
        )  # merge the scratch feature layer(s) of vector grid cells into the outputs

        arcpy.AddMessage("\nCreating Basin Summary Table...")
        tableName = type + "_PMP_Basin_Average" + "_" + outArea
        tablePath = env.scratchGDB + "\\" + tableName
        dm.CreateTable(env.scratchGDB, tableName)  # Create blank table
        cursor = arcpy.da.InsertCursor(
            tablePath,
            "*")  # Create Insert cursor and add a blank row to the table
        cursor.insertRow([0])
        del cursor

        dm.AddField(tablePath, "STORM_TYPE", "TEXT", "", "", 10,
                    "Storm Type")  # Create "Storm Type" field
        dm.CalculateField(tablePath, "STORM_TYPE", "'" + type + "'",
                          "PYTHON_9.3")  # populate storm type field

        i = 0
        for field in arcpy.ListFields(
                pmpPoints, "PMP_*"
        ):  # Add fields for each PMP duration and calculate the basin average
            fieldName = field.name
            fieldAve = basinAve(
                basin, fieldName
            )  # Calls the basinAve() function - returns the average (weighted or not)
            dm.AddField(tablePath, fieldName, "DOUBLE", "",
                        2)  # Add duration field
            dm.CalculateField(tablePath, fieldName, fieldAve,
                              "PYTHON_9.3")  # Assigns the basin average

            i += 1
        arcpy.AddMessage("\nSummary table complete.")

        basAveTables.append(tablePath)

        return

    ###########################################################################
    ##  The basin() returns the basin average PMP value for a given duration field.
    ##  If the option for a weighted average is checked in the tool parameter the script
    ##  will weight the grid point values based on proportion of area inside the basin.
    def basinAve(aoiBasin, pmpField):
        pmpPoints = env.scratchGDB + "\\PMP_Points"  # Path of 'PMP_Points' scratch feature class
        if weightedAve:
            arcpy.AddMessage("\tCalculating basin average for " + pmpField +
                             "(weighted)...")
            vectorGridClip = env.scratchGDB + "\\VectorGridClip"  # Path of 'PMP_Points' scratch feature class
            sumstats = env.scratchGDB + "\\SummaryStats"

            dm.MakeFeatureLayer(
                home + "\\Input\Non_Storm_Data.gdb\\Vector_Grid",
                "vgLayer")  # make a feature layer of vector grid cells
            dm.SelectLayerByLocation(
                "vgLayer", "INTERSECT", aoiBasin
            )  # select the vector grid cells that intersect the aoiBasin polygon

            an.Clip("vgLayer", aoiBasin,
                    vectorGridClip)  # clips aoi vector grid to basin
            dm.AddField(
                pmpPoints, "WEIGHT", "DOUBLE"
            )  # adds 'WEIGHT' field to PMP_Points scratch feature class
            dm.MakeFeatureLayer(
                vectorGridClip, "vgClipLayer"
            )  # make a feature layer of basin clipped vector grid cells
            dm.MakeFeatureLayer(
                pmpPoints, "pmpPointsLayer"
            )  # make a feature layer of PMP_Points feature class

            dm.AddJoin("pmpPointsLayer", "ID", "vgClipLayer",
                       "ID")  # joins PMP_Points and vectorGridBasin tables
            dm.CalculateField(
                "pmpPointsLayer", "WEIGHT", "!vectorGridClip.Shape_Area!",
                "PYTHON_9.3"
            )  # Calculates basin area proportion to use as weight for each grid cell.
            dm.RemoveJoin("pmpPointsLayer", "vectorGridClip")

            an.Statistics(pmpPoints, sumstats, [["WEIGHT", "SUM"]], "")
            stats = arcpy.SearchCursor(sumstats)
            pmpWgtAve = pmpField + "_WgtAve"

            for row in stats:
                calc = row.getValue("SUM_WEIGHT")
                express = "(!WEIGHT!/{})* !{}!".format(calc, pmpField)
                i = 0
                for field in arcpy.ListFields(pmpPoints, pmpField):
                    dm.AddField(pmpPoints, pmpWgtAve, "DOUBLE", 2)
                    dm.CalculateField(pmpPoints, pmpWgtAve, express,
                                      "PYTHON_9.3")
                    i += 1
                del stats, row

            an.Statistics(pmpPoints, sumstats, [[pmpWgtAve, "SUM"]], "")
            sumwgtave = "SUM_" + pmpWgtAve
            with arcpy.da.SearchCursor(sumstats, sumwgtave) as stats:
                for row in stats:
                    wgtAve = row[0]
                    return round(wgtAve, 2)

##            na = arcpy.da.TableToNumPyArray(pmpPoints,(pmpField, 'WEIGHT'))                                 # Assign pmpPoints values and weights to Numpy array (na)
##            wgtAve = numpy.average(na[pmpField], weights=na['WEIGHT'])                                         # Calculate weighted average with Numpy average
##            del na
##            return round(wgtAve, 2)

        else:
            arcpy.AddMessage("\tCalculating basin average for " + pmpField +
                             "(not weighted)...")
            sumstats = env.scratchGDB + "\\SummaryStats"
            an.Statistics(pmpPoints, sumstats, [[pmpField, "MEAN"]], "")
            mean = "MEAN_" + pmpField
            with arcpy.da.SearchCursor(sumstats, mean) as stats:
                for row in stats:
                    fieldAve = row[0]
                    return round(fieldAve, 2)

##            na = arcpy.da.TableToNumPyArray(pmpPoints, pmpField)                                            # Assign pmpPoints values to Numpy array (na)
##            fieldAve = numpy.average(na[pmpField])                                                             # Calculates aritmetic mean
##            del na
##            return round(fieldAve, 2)

###########################################################################
##  This portion of the code iterates through each storm feature class in the
##  'Storm_Adj_Factors' geodatabase (evaluating the feature class only within
##  the Local, Tropical, or general feature dataset).  For each duration,
##  at each grid point within the aoi basin, the transpositionality is
##  confirmed.  Then the DAD precip depth is retrieved and applied to the
##  total adjustement factor to yield the total adjusted rainfall.  This
##  value is then sent to the updatePMP() function to update the 'PMP_Points'
##  feature class.
##~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~##

    desc = arcpy.Describe(
        basin)  # Check to ensure AOI input shape is a Polygon. If not - exit.
    basinShape = desc.shapeType
    if desc.shapeType == "Polygon":
        arcpy.AddMessage("\nBasin shape type: " + desc.shapeType)
    else:
        arcpy.AddMessage("\nBasin shape type: " + desc.shapeType)
        arcpy.AddMessage("\nError: Input shapefile must be a polygon!\n")
        sys.exit()

    createPMPfc(
    )  # Call the createPMPfc() function to create the PMP_Points feature class.

    env.workspace = adjFactGDB  # the workspace environment is set to the 'Storm_Adj_Factors' file geodatabase

    aoiSQMI = round(
        getAOIarea(), 2
    )  # Calls the getAOIarea() function to assign area of AOI shapefile to 'aoiSQMI'

    for dur in durList:
        stormList = arcpy.ListFeatureClasses(
            "", "Point", stormType
        )  # List all the total adjustment factor feature classes within the storm type feature dataset.

        arcpy.AddMessage(
            "\n*************************************************************\nEvaluating "
            + dur + "-hour duration...")

        pmpList = []
        driverList = []
        gridRows = arcpy.SearchCursor(env.scratchGDB + "\\PMP_Points")
        try:
            for row in gridRows:
                pmpList.append(
                    0.0
                )  # creates pmpList of empty float values for each grid point to store final PMP values
                driverList.append(
                    "STORM"
                )  # creates driverList of empty text values for each grid point to store final Driver Storm IDs
            del row, gridRows
        except UnboundLocalError:
            arcpy.AddMessage(
                "\n***Error: No data present within basin/AOI area.***\n")
            sys.exit()

        for storm in stormList:
            arcpy.AddMessage("\n\tEvaluating storm: " + storm + "...")
            dm.MakeFeatureLayer(
                storm,
                "stormLayer")  # creates a feature layer for the current storm
            dm.SelectLayerByLocation(
                "stormLayer", "HAVE_THEIR_CENTER_IN", "vgLayer"
            )  # examines only the grid points that lie within the AOI
            gridRows = arcpy.SearchCursor("stormLayer")
            pmpField = "PMP_" + dur
            i = 0
            try:
                dadPrecip = round(dadLookup(storm, dur, aoiSQMI), 3)
                arcpy.AddMessage("\t\t" + dur + "-hour DAD value:  " +
                                 str(dadPrecip) + chr(34))
            except TypeError:  # In no duration exists in the DAD table - move to the next storm
                arcpy.AddMessage("\t***Duration '" + str(dur) +
                                 "-hour' is not present for " + str(storm) +
                                 ".***\n")
                continue
            arcpy.AddMessage(
                "\t\tComparing " + storm +
                " adjusted rainfall values against current driver values...\n")
            for row in gridRows:
                if row.TRANS == 1:  # Only continue if grid point is transpositionable ('1' is transpostionable, '0' is not).
                    try:  # get total adj. factor if duration exists
                        adjRain = round(dadPrecip * row.TAF, 1)
                        if adjRain > pmpList[i]:
                            pmpList[i] = adjRain
                            driverList[i] = storm
                    except RuntimeError:
                        arcpy.AddMessage(
                            "\t\t   *Warning*  Total Adjusted Raifnall value falied to set for row "
                            + str(row.CNT))
                        break
                    del adjRain
                i += 1
            del row
        del storm, stormList, gridRows, dadPrecip
        updatePMP(pmpList, driverList,
                  dur)  # calls function to update "PMP Points" feature class
    del dur, pmpList

    arcpy.AddMessage(
        "\n'PMP_Points' Feature Class 'PMP_XX' fields update complete for all '"
        + stormType + "' storms.")

    outputPMP(stormType, aoiSQMI, outPath)  # calls outputPMP() function

    del aoiSQMI
    return
예제 #20
0
    def basinAve(aoiBasin, pmpField):
        pmpPoints = env.scratchGDB + "\\PMP_Points"  # Path of 'PMP_Points' scratch feature class
        if weightedAve:
            arcpy.AddMessage("\tCalculating basin average for " + pmpField +
                             "(weighted)...")
            vectorGridClip = env.scratchGDB + "\\VectorGridClip"  # Path of 'PMP_Points' scratch feature class
            sumstats = env.scratchGDB + "\\SummaryStats"

            dm.MakeFeatureLayer(
                home + "\\Input\Non_Storm_Data.gdb\\Vector_Grid",
                "vgLayer")  # make a feature layer of vector grid cells
            dm.SelectLayerByLocation(
                "vgLayer", "INTERSECT", aoiBasin
            )  # select the vector grid cells that intersect the aoiBasin polygon

            an.Clip("vgLayer", aoiBasin,
                    vectorGridClip)  # clips aoi vector grid to basin
            dm.AddField(
                pmpPoints, "WEIGHT", "DOUBLE"
            )  # adds 'WEIGHT' field to PMP_Points scratch feature class
            dm.MakeFeatureLayer(
                vectorGridClip, "vgClipLayer"
            )  # make a feature layer of basin clipped vector grid cells
            dm.MakeFeatureLayer(
                pmpPoints, "pmpPointsLayer"
            )  # make a feature layer of PMP_Points feature class

            dm.AddJoin("pmpPointsLayer", "ID", "vgClipLayer",
                       "ID")  # joins PMP_Points and vectorGridBasin tables
            dm.CalculateField(
                "pmpPointsLayer", "WEIGHT", "!vectorGridClip.Shape_Area!",
                "PYTHON_9.3"
            )  # Calculates basin area proportion to use as weight for each grid cell.
            dm.RemoveJoin("pmpPointsLayer", "vectorGridClip")

            an.Statistics(pmpPoints, sumstats, [["WEIGHT", "SUM"]], "")
            stats = arcpy.SearchCursor(sumstats)
            pmpWgtAve = pmpField + "_WgtAve"

            for row in stats:
                calc = row.getValue("SUM_WEIGHT")
                express = "(!WEIGHT!/{})* !{}!".format(calc, pmpField)
                i = 0
                for field in arcpy.ListFields(pmpPoints, pmpField):
                    dm.AddField(pmpPoints, pmpWgtAve, "DOUBLE", 2)
                    dm.CalculateField(pmpPoints, pmpWgtAve, express,
                                      "PYTHON_9.3")
                    i += 1
                del stats, row

            an.Statistics(pmpPoints, sumstats, [[pmpWgtAve, "SUM"]], "")
            sumwgtave = "SUM_" + pmpWgtAve
            with arcpy.da.SearchCursor(sumstats, sumwgtave) as stats:
                for row in stats:
                    wgtAve = row[0]
                    return round(wgtAve, 2)

##            na = arcpy.da.TableToNumPyArray(pmpPoints,(pmpField, 'WEIGHT'))                                 # Assign pmpPoints values and weights to Numpy array (na)
##            wgtAve = numpy.average(na[pmpField], weights=na['WEIGHT'])                                         # Calculate weighted average with Numpy average
##            del na
##            return round(wgtAve, 2)

        else:
            arcpy.AddMessage("\tCalculating basin average for " + pmpField +
                             "(not weighted)...")
            sumstats = env.scratchGDB + "\\SummaryStats"
            an.Statistics(pmpPoints, sumstats, [[pmpField, "MEAN"]], "")
            mean = "MEAN_" + pmpField
            with arcpy.da.SearchCursor(sumstats, mean) as stats:
                for row in stats:
                    fieldAve = row[0]
                    return round(fieldAve, 2)
def get_path_residence_times (in_file, cost_rast, out_raster, t_diff_fld_name, workspace):
    
    if len (out_raster) == 0:
        arcpy.AddError ("Missing argument: out_rast")
        raise Exception
    if len (t_diff_fld_name) == 0:
        t_diff_fld_name = "T_DIFF_HRS"

    arcpy.env.overwriteOutput = True  #  This is underhanded.  It should be an argument.

    if arcpy.env.outputCoordinateSystem is None:
        arcpy.env.outputCoordinateSystem = cost_rast
    arcpy.AddMessage ("coordinate system is %s" % arcpy.env.outputCoordinateSystem.name)

    if len(workspace):
        arcpy.env.workspace = workspace
    if arcpy.env.workspace is None or len(arcpy.env.workspace) == 0:
        arcpy.env.workspace = os.getcwd()

    if '.gdb' in arcpy.env.workspace:
        arcpy.AddError (
            "Worskpace is a geodatabase.  " +
            "This brings too much pain for this script to work.\n" +
            "%s" % arcpy.env.workspace
        )
        raise WorkspaceIsGeodatabase


    r = Raster(cost_rast)
    
    if r.maximum == 0 and r.minimum == 0:
        arcpy.AddMessage ('Cost raster has only zero value.  Cannot calculate cost distances.')
        raise CostRasterIsZero

    size = r.height * r.width * 4
    if size > 2 * 1028 ** 3:
        import struct
        struct_size = struct.calcsize("P") * 8
        if struct_size == 32:
            size_in_gb = float (size) / (1028 ** 3)
            arcpy.AddMessage (
                'Cost raster exceeds 2 GiB in size (%s GiB).  This is too large for a 32 bit NumPy.' % size_in_gb
            )
            raise NumPyArrayExceedsSizeLimits

    if not check_points_are_in_cost_raster(in_file, cost_rast):
        arcpy.AddError ('One or more input points do not intersect the cost raster')
        raise PointNotOnRaster

    arcpy.env.snapRaster = cost_rast
    suffix = None
    wk = arcpy.env.workspace
    if not '.gdb' in wk:
        suffix = '.shp'


    ext = arcpy.env.extent
    if ext is None:
        arcpy.env.extent = r.extent

    arcpy.AddMessage ("Extent is %s" % arcpy.env.extent)

    arcpy.env.cellSize = r.meanCellWidth
    arcpy.AddMessage ("Cell size is %s" % arcpy.env.cellSize)
    cellsize_used = float (arcpy.env.cellSize)
    extent = arcpy.env.extent
    lower_left_coord = extent.lowerLeft
    
    arcpy.AddMessage ('Currently in directory: %s\n' % os.getcwd())
    arcpy.AddMessage ('Workspace is: %s' % arcpy.env.workspace)
    arcpy.AddMessage ("lower left is %s" % lower_left_coord)

    if arcpy.env.mask is None:
        arcpy.AddMessage ("Setting mask to %s" % cost_rast)
        arcpy.env.mask = cost_rast

    #  accumulated transits
    transit_array_accum = arcpy.RasterToNumPyArray (Raster(cost_rast) * 0)

    feat_layer = "feat_layer"
    arcmgt.MakeFeatureLayer(in_file, feat_layer)
    desc = arcpy.Describe (feat_layer)
    oid_fd_name = desc.OIDFieldName
    arcpy.AddMessage("oid_fd_name = %s" % oid_fd_name)

    #  variable name is redundant now??? - should all calls be to oid_fd_name?
    target_fld = oid_fd_name

    proc_layer = "process_layer"
    arcmgt.MakeFeatureLayer(in_file, proc_layer)
    rows = arcpy.SearchCursor(proc_layer)
    last_target = None

    for row_cur in rows:
        transit_time = row_cur.getValue (t_diff_fld_name)

        if last_target is None or transit_time == 0:
            message = 'Skipping %s = %s' % (oid_fd_name, row_cur.getValue(oid_fd_name))
            if transit_time == 0:
                message = message + "  Transit time is zero"
            arcpy.AddMessage(message)
            last_target = row_cur.getValue(target_fld)
            last_oid    = row_cur.getValue(oid_fd_name)
            continue

        arcpy.AddMessage ("Processing %s %i" % (oid_fd_name, row_cur.getValue(oid_fd_name)))

        arcmgt.SelectLayerByAttribute(
            feat_layer,
            "NEW_SELECTION",
            '%s = %s' % (target_fld, last_target)
        )
        backlink_rast  = arcpy.CreateScratchName("backlink")
        path_dist_rast = PathDistance(feat_layer, cost_rast, out_backlink_raster = backlink_rast)

        #  extract the distance from the last point
        shp = row_cur.shape
        centroid = shp.centroid
        (x, y) = (centroid.X, centroid.Y)
        result = arcmgt.GetCellValue(path_dist_rast, "%s %s" % (x, y), "1")
        res_val = result.getOutput(0)
        if res_val == "NoData":
            this_oid = row_cur.getValue(oid_fd_name)
            arcpy.AddMessage ("Got nodata for coordinate (%s, %s)" % (x, y))
            arcpy.AddMessage ("Is the path between features %s and %s wholly contained by the cost raster?" % (last_oid, this_oid))
            pras_name = "pth_%s_%s.tif" % (last_oid, this_oid)
            arcpy.AddMessage ("Attempting to save path raster as %s" % pras_name)
            try:
                path_dist_rast.save(pras_name)
            except Exception as e:
                arcpy.AddMessage (e)
            raise PathDistanceIsNoData
        try:
            path_distance = float (res_val)
        except:
            #  kludge around locale/radix issues 
            if res_val.find(","):
                res_val = res_val.replace(",", ".")
                path_distance = float (res_val)
            else:
                raise
        arcpy.AddMessage("Path distance is %s\nTransit time is %s" % (path_distance, transit_time))

        #  get a raster of the path from origin to destination
        condition = '%s in (%i, %i)' % (oid_fd_name, last_oid, row_cur.getValue(oid_fd_name))
        dest_layer = "dest_layer" + str (last_oid)
        arcmgt.MakeFeatureLayer(in_file, dest_layer, where_clause = condition)

        count = arcmgt.GetCount(dest_layer)
        count = int (count.getOutput(0))
        if count == 0:
            raise NoFeatures("No features selected.  Possible coordinate system issues.\n" + condition)

        try:
            path_cost_rast = CostPath(dest_layer, path_dist_rast, backlink_rast)
            #path_dist_rast.save("xx_pr" + str (last_oid))
        except Exception as e:
            raise

        try:
            pcr_mask       = 1 - IsNull (path_cost_rast)
            #pcr_mask.save ("xx_pcr_mask" + str (last_oid))
            dist_masked    = path_dist_rast * pcr_mask
            path_array     = arcpy.RasterToNumPyArray(dist_masked, nodata_to_value = -9999)
            path_array_idx = numpy.where(path_array > 0)
            transit_array  = numpy.zeros_like(path_array)  #  past experience suggests we might need to use a different approach to guarantee we get zeroes
        except:
            raise

        path_sum = None
        arcpy.AddMessage ("processing %i cells of path raster" % (len(path_array_idx[0])))

        if path_distance == 0 or not len(path_array_idx[0]):
            path_sum = 1 #  stayed in the same cell
            mask_array = arcpy.RasterToNumPyArray(pcr_mask, nodata_to_value = -9999)
            mask_array_idx = numpy.where(mask_array == 1)
            i = mask_array_idx[0][0]
            j = mask_array_idx[1][0]
            transit_array[i][j] = path_sum
        else:
            row_count = len (path_array) 
            col_count = len (path_array[0])

            for idx in range (len(path_array_idx[0])):
                i = path_array_idx[0][idx]
                j = path_array_idx[1][idx]
                val = path_array[i][j]
                nbrs = []
                for k in (i-1, i, i+1):
                    if k < 0 or k >= row_count:
                        continue
                    checkrow = path_array[k]
                    for l in (j-1, j, j+1):
                        if l < 0 or l >= col_count:
                            continue
                        if k == i and j == l:
                            continue  #  don't check self
                        checkval = checkrow[l]
                        #  negs are nodata, and this way we
                        #  don't need to care what that value is
                        if checkval >= 0:
                            diff = val - checkval
                            if diff > 0:
                                nbrs.append(diff)
                                #arcpy.AddMessage ("Check and diff vals are %s %s" % (checkval, diff))
                diff = min (nbrs)
                #arcpy.AddMessage ("Diff  val is %s" % diff)
                transit_array[i][j] = diff

            path_sum = path_array.max()  #  could use path_distance?
            #arcpy.AddMessage ("path_array.max is %s" % path_sum)

        #  sometimes we get a zero path_sum even when the path_distance is non-zero
        if path_sum == 0:
            path_sum = 1

        #  Increment the cumulative transit array by the fraction of the
        #  transit time spent in each cell.
        #  Use path_sum because it corrects for cases where we stayed in the same cell.
        transit_array_accum = transit_array_accum + ((transit_array / path_sum) * transit_time)

        #xx = arcpy.NumPyArrayToRaster (transit_array, lower_left_coord, cellsize_used, cellsize_used, 0)
        #tmpname = "xx_t_arr_" + str (last_oid)
        #print "Saving transit array to %s" % tmpname
        #xx.save (tmpname)


        try:
            arcmgt.Delete(backlink_rast)
            arcmgt.Delete(dest_layer)
        except Exception as e:
            arcpy.AddMessage (e)

        #  getting off-by-one errors when using the environment, so use this directly
        ext = path_cost_rast.extent
        lower_left_coord = ext.lowerLeft

        last_target = row_cur.getValue(target_fld)
        last_oid    = row_cur.getValue(oid_fd_name)

    #  need to use env settings to get it to be the correct size
    try:
        arcpy.AddMessage ("lower left is %s" % lower_left_coord)
        xx = arcpy.NumPyArrayToRaster (transit_array_accum, lower_left_coord, cellsize_used, cellsize_used, 0)
        print "Saving to %s" % out_raster
        xx.save (out_raster)
    except:
        raise


    print "Completed"

    return ()
def classify_lakes(nhd,
                   out_feature_class,
                   exclude_intermit_flowlines=False,
                   debug_mode=False):
    if debug_mode:
        arcpy.env.overwriteOutput = True
        temp_gdb = cu.create_temp_GDB('classify_lake_connectivity')
        arcpy.env.workspace = temp_gdb
        arcpy.AddMessage('Debugging workspace located at {}'.format(temp_gdb))

    else:
        arcpy.env.workspace = 'in_memory'

    if arcpy.Exists("temp_fc"):
        print("There is a problem here.")
        raise Exception

    # Tool temporary feature classes
    temp_fc = "temp_fc"
    csiwaterbody_10ha = "csiwaterbody_10ha"
    nhdflowline_filtered = "nhdflowline_filtered"
    dangles = "dangles"
    start = "start"
    end = "end"
    startdangles = "startdangles"
    enddangles = "enddangles"
    non_artificial_end = "non_artificial_end"
    flags_10ha_lake_junctions = "flags_10ha_lake_junctions"
    midvertices = "midvertices"
    non10vertices = "non10vertices"
    non10junctions = "non10junctions"
    all_non_flag_points = "all_non_flag_points"
    barriers = "barriers"
    trace1_junctions = "trace1_junctions"
    trace1_flowline = "trace1_flowline"
    trace2_junctions = "trace2junctions"
    trace2_flowline = "trace2_flowline"

    # Clean up workspace in case of bad exit from prior run in same session.
    this_tool_layers = [
        "dangles_lyr", "nhdflowline_lyr", "junction_lyr", "midvertices_lyr",
        "all_non_flag_points_lyr", "non10vertices_lyr", "out_fc_lyr", "trace1",
        "trace2"
    ]
    this_tool_temp = [
        temp_fc, csiwaterbody_10ha, nhdflowline_filtered, dangles, start, end,
        startdangles, enddangles, non_artificial_end,
        flags_10ha_lake_junctions, midvertices, non10vertices, non10junctions,
        all_non_flag_points, barriers, trace1_junctions, trace1_flowline,
        trace2_junctions, trace2_flowline
    ]
    for item in this_tool_layers + this_tool_temp:
        try:
            DM.Delete(item)
        except:
            pass

    # Local variables:
    nhdflowline = os.path.join(nhd, "Hydrography", "NHDFLowline")
    nhdjunction = os.path.join(nhd, "Hydrography", "HYDRO_NET_Junctions")
    nhdwaterbody = os.path.join(nhd, "Hydrography", "NHDWaterbody")
    network = os.path.join(nhd, "Hydrography", "HYDRO_NET")

    # Get lakes, ponds and reservoirs over a hectare.
    #csi_population_filter = '''"AreaSqKm" >=0.01 AND\
    #"FCode" IN (39000,39004,39009,39010,39011,39012,43600,43613,43615,43617,43618,43619,43621)'''
    all_lakes_reservoirs_filter = '''"FType" IN (390, 436)'''

    # Can't see why we shouldn't just attribute all lakes and reservoirs
    # arcpy.Select_analysis(nhdwaterbody, "csiwaterbody", lake_population_filter)
    arcpy.AddMessage("Initializing output.")
    if exclude_intermit_flowlines:
        DM.CopyFeatures(out_feature_class, temp_fc)
        DM.Delete(out_feature_class)
    else:
        arcpy.Select_analysis(nhdwaterbody, temp_fc,
                              all_lakes_reservoirs_filter)

    # Get lakes, ponds and reservoirs over 10 hectares.
    lakes_10ha_filter = '''"AreaSqKm" >= 0.1 AND "FType" IN (390, 436)'''
    arcpy.Select_analysis(nhdwaterbody, csiwaterbody_10ha, lakes_10ha_filter)

    # Exclude intermittent flowlines, if requested
    if exclude_intermit_flowlines:
        flowline_where_clause = '''"FCode" NOT IN (46003,46007)'''
        nhdflowline = arcpy.Select_analysis(nhdflowline, nhdflowline_filtered,
                                            flowline_where_clause)

    # Make dangle points at end of nhdflowline
    DM.FeatureVerticesToPoints(nhdflowline, dangles, "DANGLE")
    DM.MakeFeatureLayer(dangles, "dangles_lyr")

    # Isolate start dangles from end dangles.
    DM.FeatureVerticesToPoints(nhdflowline, start, "START")
    DM.FeatureVerticesToPoints(nhdflowline, end, "END")

    DM.SelectLayerByLocation("dangles_lyr", "ARE_IDENTICAL_TO", start)
    DM.CopyFeatures("dangles_lyr", startdangles)
    DM.SelectLayerByLocation("dangles_lyr", "ARE_IDENTICAL_TO", end)
    DM.CopyFeatures("dangles_lyr", enddangles)

    # Special handling for lakes that have some intermittent flow in and some permanent
    if exclude_intermit_flowlines:
        DM.MakeFeatureLayer(nhdflowline, "nhdflowline_lyr")
        DM.SelectLayerByAttribute("nhdflowline_lyr", "NEW_SELECTION",
                                  '''"WBArea_Permanent_Identifier" is null''')
        DM.FeatureVerticesToPoints("nhdflowline_lyr", non_artificial_end,
                                   "END")
        DM.SelectLayerByAttribute("nhdflowline_lyr", "CLEAR_SELECTION")

    arcpy.AddMessage("Found source area nodes.")

    # Get junctions from lakes >= 10 hectares.
    DM.MakeFeatureLayer(nhdjunction, "junction_lyr")
    DM.SelectLayerByLocation("junction_lyr", "INTERSECT", csiwaterbody_10ha,
                             XY_TOLERANCE, "NEW_SELECTION")

    DM.CopyFeatures("junction_lyr", flags_10ha_lake_junctions)
    arcpy.AddMessage("Found lakes >= 10 ha.")

    # Make points shapefile and layer at flowline vertices to act as potential flags and/or barriers.
    arcpy.AddMessage("Tracing...")
    DM.FeatureVerticesToPoints(nhdflowline, midvertices, "MID")
    DM.MakeFeatureLayer(midvertices, "midvertices_lyr")

    # Get vertices that are not coincident with 10 hectare lake junctions.
    DM.SelectLayerByLocation("midvertices_lyr", "INTERSECT",
                             flags_10ha_lake_junctions, "", "NEW_SELECTION")
    DM.SelectLayerByLocation("midvertices_lyr", "INTERSECT",
                             flags_10ha_lake_junctions, "", "SWITCH_SELECTION")
    DM.CopyFeatures("midvertices_lyr", non10vertices)

    # Get junctions that are not coincident with 10 hectare lake junctions.
    DM.SelectLayerByLocation("junction_lyr", "INTERSECT",
                             flags_10ha_lake_junctions, "", "NEW_SELECTION")
    DM.SelectLayerByLocation("junction_lyr", "INTERSECT",
                             flags_10ha_lake_junctions, "", "SWITCH_SELECTION")
    DM.CopyFeatures("junction_lyr", non10junctions)

    # Merge non10vertices with non10junctions
    DM.Merge([non10junctions, non10vertices],
             all_non_flag_points)  # inputs both point fc in_memory
    DM.MakeFeatureLayer(all_non_flag_points, "all_non_flag_points_lyr")

    # Tests the counts...for some reason I'm not getting stable behavior from the merge.
    mid_n = int(DM.GetCount(non10vertices).getOutput(0))
    jxn_n = int(DM.GetCount(non10junctions).getOutput(0))
    merge_n = int(DM.GetCount(all_non_flag_points).getOutput(0))
    if merge_n < mid_n + jxn_n:
        arcpy.AddWarning(
            "The total number of flags ({0}) is less than the sum of the input junctions ({1}) "
            "and input midpoints ({2})".format(merge_n, jxn_n, mid_n))

    # For tracing barriers, select all_non_flag_points points that intersect a 10 ha lake.
    DM.SelectLayerByLocation("all_non_flag_points_lyr", "INTERSECT",
                             csiwaterbody_10ha, XY_TOLERANCE, "NEW_SELECTION")
    DM.CopyFeatures("all_non_flag_points_lyr", barriers)

    # Trace1-Trace downstream to first barrier (junctions+midvertices in 10 ha lake) starting from flags_10ha_lake_junctions flag points.
    DM.TraceGeometricNetwork(network, "trace1", flags_10ha_lake_junctions,
                             "TRACE_DOWNSTREAM", barriers)

    # Save trace1 flowlines and junctions to layers on disk.
    DM.CopyFeatures("trace1\HYDRO_NET_Junctions",
                    trace1_junctions)  # extra for debugging
    DM.CopyFeatures("trace1\NHDFlowline", trace1_flowline)

    # Select vertice midpoints that intersect trace1 flowlines selection for new flags for trace2.
    DM.MakeFeatureLayer(non10vertices, "non10vertices_lyr")
    DM.SelectLayerByLocation("non10vertices_lyr", "INTERSECT", trace1_flowline,
                             "", "NEW_SELECTION")

    # Trace2-Trace downstream from midpoints of flowlines that intersect the selected flowlines from trace1.
    DM.TraceGeometricNetwork(network, "trace2", "non10vertices_lyr",
                             "TRACE_DOWNSTREAM")

    # Save trace1 flowlines and junctions to layers and then shapes on disk.
    DM.CopyFeatures("trace2\HYDRO_NET_Junctions", trace2_junctions)
    DM.CopyFeatures("trace2\NHDFlowline",
                    trace2_flowline)  # extra for debugging
    arcpy.AddMessage("Done tracing.")

    # Make shapefile for seepage lakes. (Ones that don't intersect flowlines)
    if exclude_intermit_flowlines:
        class_field_name = "Lake_Connectivity_Permanent"
    else:
        class_field_name = "Lake_Connectivity_Class"
    DM.AddField(temp_fc, class_field_name, "TEXT", field_length=13)
    DM.MakeFeatureLayer(temp_fc, "out_fc_lyr")
    DM.SelectLayerByLocation("out_fc_lyr", "INTERSECT", nhdflowline,
                             XY_TOLERANCE, "NEW_SELECTION")
    DM.SelectLayerByLocation("out_fc_lyr", "INTERSECT", nhdflowline, "",
                             "SWITCH_SELECTION")
    DM.CalculateField("out_fc_lyr", class_field_name, """'Isolated'""",
                      "PYTHON")

    # New type of "Isolated" classification, mostly for "permanent" but there were some oddballs in "maximum" too
    DM.SelectLayerByLocation("out_fc_lyr", "INTERSECT", startdangles,
                             XY_TOLERANCE, "NEW_SELECTION")
    DM.SelectLayerByLocation("out_fc_lyr", "INTERSECT", enddangles,
                             XY_TOLERANCE, "SUBSET_SELECTION")
    DM.CalculateField("out_fc_lyr", class_field_name, """'Isolated'""",
                      "PYTHON")

    # Get headwater lakes.
    DM.SelectLayerByLocation("out_fc_lyr", "INTERSECT", startdangles,
                             XY_TOLERANCE, "NEW_SELECTION")
    DM.SelectLayerByAttribute(
        "out_fc_lyr", "REMOVE_FROM_SELECTION",
        '''"{}" = 'Isolated' '''.format(class_field_name))
    DM.CalculateField("out_fc_lyr", class_field_name, """'Headwater'""",
                      "PYTHON")

    # Select csiwaterbody that intersect trace2junctions
    arcpy.AddMessage("Beginning connectivity attribution...")
    DM.SelectLayerByLocation("out_fc_lyr", "INTERSECT", trace2_junctions,
                             XY_TOLERANCE, "NEW_SELECTION")
    DM.CalculateField("out_fc_lyr", class_field_name, """'DrainageLk'""",
                      "PYTHON")

    # Get stream drainage lakes. Either unassigned so far or convert "Headwater" if a permanent stream flows into it,
    # which is detected with "non_artificial_end"
    DM.SelectLayerByAttribute("out_fc_lyr", "NEW_SELECTION",
                              '''"{}" IS NULL'''.format(class_field_name))
    DM.CalculateField("out_fc_lyr", class_field_name, """'Drainage'""",
                      "PYTHON")
    if exclude_intermit_flowlines:
        DM.SelectLayerByAttribute(
            "out_fc_lyr", "NEW_SELECTION",
            '''"{}" = 'Headwater' '''.format(class_field_name))
        DM.SelectLayerByLocation("out_fc_lyr", "INTERSECT", non_artificial_end,
                                 XY_TOLERANCE, "SUBSET_SELECTION")
        DM.CalculateField("out_fc_lyr", class_field_name, """'Drainage'""",
                          "PYTHON")

        # Prevent 'upgrades' due to very odd flow situations and artifacts of bad digitization. The effects of these
        # are varied--to avoid confusion, just keep the class  assigned with all flowlines

        # 1--Purely hypothetical, not seen in testing
        DM.SelectLayerByAttribute(
            "out_fc_lyr", "NEW_SELECTION",
            '''"Lake_Connectivity_Class" = 'Isolated' AND "Lake_Connectivity_Permanent" <> 'Isolated' '''
        )
        DM.CalculateField("out_fc_lyr", class_field_name, """'Isolated'""",
                          "PYTHON")

        # 2--Headwater to Drainage upgrade seen in testing with odd multi-inlet flow situation
        DM.SelectLayerByAttribute(
            "out_fc_lyr", "NEW_SELECTION",
            '''"Lake_Connectivity_Class" = 'Headwater' AND "Lake_Connectivity_Permanent" IN ('Drainage', 'DrainageLk') '''
        )
        DM.CalculateField("out_fc_lyr", class_field_name, """'Headwater'""",
                          "PYTHON")

        # 3--Drainage to DrainageLk upgrade seen in testing when intermittent stream segments were used
        # erroneously instead of artificial paths
        DM.SelectLayerByAttribute(
            "out_fc_lyr", "NEW_SELECTION",
            '''"Lake_Connectivity_Class" = 'Drainage' AND "Lake_Connectivity_Permanent" = 'DrainageLk' '''
        )
        DM.CalculateField("out_fc_lyr", class_field_name, """'Drainage'""",
                          "PYTHON")
        DM.SelectLayerByAttribute("out_fc_lyr", "CLEAR_SELECTION")

        # Add change flag for users
        DM.AddField(temp_fc,
                    "Lake_Connectivity_Fluctuates",
                    "Text",
                    field_length="1")
        flag_codeblock = """def flag_calculate(arg1, arg2):
            if arg1 == arg2:
                return 'N'
            else:
                return 'Y'"""
        expression = 'flag_calculate(!Lake_Connectivity_Class!, !Lake_Connectivity_Permanent!)'
        DM.CalculateField(temp_fc, "Lake_Connectivity_Fluctuates", expression,
                          "PYTHON", flag_codeblock)

    # Project output once done with both. Switching CRS earlier causes trace problems.
    if not exclude_intermit_flowlines:
        DM.CopyFeatures(temp_fc, out_feature_class)
    else:
        DM.Project(temp_fc, out_feature_class, arcpy.SpatialReference(102039))

    # Clean up
    if not debug_mode:
        for item in this_tool_layers + this_tool_temp:
            if arcpy.Exists(item):
                DM.Delete(item)

    if not debug_mode:
        DM.Delete("trace1")
        DM.Delete("trace2")
    arcpy.AddMessage("{} classification is complete.".format(class_field_name))
예제 #23
0
def mergeDualCarriageways():
    """Collapse dual carriageways and turning circles in single, striagt-line roadways, the 
	tools that achieve these effects are run on each route separately then the routes are 
	added back to a single feature class as this yields better results"""

    generateMatchCode()

    # create at feature class to store all of the outputs
    geom_type = 'POLYLINE'
    template = distinct_routes_src
    oregon_spn = arcpy.SpatialReference(2913)
    management.CreateFeatureclass(os.path.dirname(collapsed_routes),
                                  os.path.basename(collapsed_routes),
                                  geom_type,
                                  template,
                                  spatial_reference=oregon_spn)

    # make a feature layer of the source routes so that selections can be made on it
    distinct_rte_lyr = 'distinct_transit_routes'
    management.MakeFeatureLayer(distinct_routes, distinct_rte_lyr)

    route_service_list = getRouteServicePairs()
    temp_merge = os.path.join(temp_shp_dir, 'temp_merge.shp')
    temp_collapse = os.path.join(temp_shp_dir, 'temp_collapse.shp')

    route_fields = ['Shape@', 'route_id', 'serv_level', 'route_type']
    i_cursor = da.InsertCursor(collapsed_routes, route_fields)

    for route, service in route_service_list:
        select_type = 'NEW_SELECTION'
        where_clause = """"route_id" = {0} AND "serv_level" = '{1}'""".format(
            route, service)

        management.SelectLayerByAttribute(distinct_rte_lyr, select_type,
                                          where_clause)

        # merge dual carriageways
        merge_field = 'merge_id'  # '0' in this field means won't be merged
        merge_distance = 100  # feet
        cartography.MergeDividedRoads(distinct_rte_lyr, merge_field,
                                      merge_distance, temp_merge)

        # collapse turing circles
        collapse_distance = 550
        cartography.CollapseRoadDetail(temp_merge, collapse_distance,
                                       temp_collapse)

        with da.SearchCursor(temp_collapse, route_fields) as s_cursor:
            for row in s_cursor:
                i_cursor.insertRow(row)

    del i_cursor

    # now merge contiguous line segments with common attributes, now that dual carriage-
    # ways have been collapsed the data can be reduced to fewer segments
    dissolve_fields = ['route_id', 'serv_level', 'route_type']
    geom_class = 'SINGLE_PART'
    line_handling = 'UNSPLIT_LINES'
    management.Dissolve(collapsed_routes,
                        dissolved_routes,
                        dissolve_fields,
                        multi_part=geom_class,
                        unsplit_lines=line_handling)
예제 #24
0
temp = r"memory"
env.workspace = temp

#Load layers
gis = GIS("https://sfsu.maps.arcgis.com/sharing")
dsm = gis.content.get("b690f8d64d0145f1baf74d73690da154")
dem = gis.content.get("1adaa794cbf2477da2660f190c785fa7")
veg = gis.content.get("6341228ec82a4bfbaf52d977a14e99ce")
b = time.perf_counter() / 60
print(f"Layers loaded: {round(b - a, 2)} minutes")

#Load vegetation layer
veg2 = veg.layers[0]
veg3 = veg2.url
veg4 = r"memory\veg"
DM.MakeFeatureLayer(veg3, veg4)
c = time.perf_counter() / 60
print(f"Veg feature generated: {round(c - b, 2)} minutes")

#Select vegetation type
selection = 'Forest & Woodland'  #input habitat type here
where = "LIFEFORM = '" + selection + "'"
DM.SelectLayerByAttribute(veg4, "NEW_SELECTION", where, None)
d = time.perf_counter() / 60
print(f"Veg type selected: {round(d - c, 2)} minutes")

#Make conditional vegetation layer raster
vegCon = r"memory\veg2"
path = script + r"\veg.shp"
CO.FeatureClassToShapefile(veg4, script)
CO.FeatureToRaster(path, "LIFEFORM", vegCon, 30)
예제 #25
0
def process_zone(zone_fc, output, zone_name, zone_id_field, zone_name_field,
                 other_keep_fields, clip_hu8, lagosne_name):
    # dissolve fields by the field that zone_id is based on (the field that identifies a unique zone)
    dissolve_fields = [
        f for f in "{}, {}, {}".format(zone_id_field, zone_name_field,
                                       other_keep_fields).split(', ')
        if f != ''
    ]
    print("Dissolving...")
    dissolve1 = DM.Dissolve(zone_fc, 'dissolve1', dissolve_fields)

    # update name field to match our standard
    DM.AlterField(dissolve1, zone_name_field, 'name')

    # original area

    DM.AddField(dissolve1, 'originalarea', 'DOUBLE')
    DM.CalculateField(dissolve1, 'originalarea', '!shape.area@hectares!',
                      'PYTHON')

    #clip
    print("Clipping...")
    clip = AN.Clip(dissolve1, MASTER_CLIPPING_POLY, 'clip')
    if clip_hu8 == 'Y':
        final_clip = AN.Clip(clip, HU8_OUTPUT, 'final_clip')
    else:
        final_clip = clip

    print("Selecting...")
    # calc new area, orig area pct, compactness
    DM.AddField(final_clip, 'area_ha', 'DOUBLE')
    DM.AddField(final_clip, 'originalarea_pct', 'DOUBLE')
    DM.AddField(final_clip, 'compactness', 'DOUBLE')
    DM.JoinField(final_clip, zone_id_field, dissolve1, zone_id_field,
                 'originalarea_pct')

    uCursor_fields = [
        'area_ha', 'originalarea_pct', 'originalarea', 'compactness',
        'SHAPE@AREA', 'SHAPE@LENGTH'
    ]
    with arcpy.da.UpdateCursor(final_clip, uCursor_fields) as uCursor:
        for row in uCursor:
            area, orig_area_pct, orig_area, comp, shape_area, shape_length = row
            area = shape_area / 10000  # convert from m2 to hectares
            orig_area_pct = round(100 * area / orig_area, 2)
            comp = 4 * 3.14159 * shape_area / (shape_length**2)
            row = (area, orig_area_pct, orig_area, comp, shape_area,
                   shape_length)
            uCursor.updateRow(row)

    # if zones are present with <5% of original area and a compactness measure of <.2 (ranges from 0-1)
    # AND ALSO they are no bigger than 500 sq. km. (saves Chippewa County and a WWF), filter out
    # save eliminated polygons to temp database as a separate layer for inspection

    # Different processing for HU4 and HU8, so that they match the extent of HU8 more closely but still throw out tiny slivers
    # County also only eliminated if a tiny, tiny, tiny sliver (so: none should be eliminated)
    if zone_name not in ('hu4', 'hu12', 'county'):
        selected = AN.Select(
            final_clip, 'selected',
            "originalarea_pct >= 5 OR compactness >= .2 OR area_ha > 50000")
        not_selected = AN.Select(
            final_clip, '{}_not_selected'.format(output),
            "originalarea_pct < 5 AND compactness < .2 AND area_ha < 50000")

    else:
        selected = final_clip
    # eliminate small slivers, re-calc area fields, add perimeter and multipart flag
    # leaves the occasional errant sliver but some areas over 25 hectares are more valid so this is
    # CONSERVATIVE
    print("Trimming...")
    trimmed = DM.EliminatePolygonPart(selected,
                                      'trimmed',
                                      'AREA',
                                      '25 Hectares',
                                      part_option='ANY')

    # gather up a few calculations into one cursor because this is taking too long over the HU12 layer
    DM.AddField(trimmed, 'perimeter_m', 'DOUBLE')
    DM.AddField(trimmed, 'multipart', 'TEXT', field_length=1)
    uCursor_fields = [
        'area_ha', 'originalarea_pct', 'originalarea', 'perimeter_m',
        'multipart', 'SHAPE@'
    ]
    with arcpy.da.UpdateCursor(trimmed, uCursor_fields) as uCursor:
        for row in uCursor:
            area, orig_area_pct, orig_area, perim, multipart, shape = row
            area = shape.area / 10000  # convert to hectares from m2
            orig_area_pct = round(100 * area / orig_area, 2)
            perim = shape.length

            # multipart flag calc
            if shape.isMultipart:
                multipart = 'Y'
            else:
                multipart = 'N'
            row = (area, orig_area_pct, orig_area, perim, multipart, shape)
            uCursor.updateRow(row)

    # delete intermediate fields
    DM.DeleteField(trimmed, 'compactness')
    DM.DeleteField(trimmed, 'originalarea')

    print("Zone IDs....")
    # link to LAGOS-NE zone IDs
    DM.AddField(trimmed, 'zoneid', 'TEXT', field_length=40)
    trimmed_lyr = DM.MakeFeatureLayer(trimmed, 'trimmed_lyr')
    if lagosne_name:
        # join to the old master GDB path on the same master field and copy in the ids
        old_fc = os.path.join(LAGOSNE_GDB, lagosne_name)
        old_fc_lyr = DM.MakeFeatureLayer(old_fc, 'old_fc_lyr')
        if lagosne_name == 'STATE' or lagosne_name == 'COUNTY':
            DM.AddJoin(trimmed_lyr, zone_id_field, old_fc_lyr, 'FIPS')
        else:
            DM.AddJoin(trimmed_lyr, zone_id_field, old_fc_lyr,
                       zone_id_field)  # usually works because same source data

        # copy
        DM.CalculateField(trimmed_lyr, 'zoneid',
                          '!{}.ZoneID!.lower()'.format(lagosne_name), 'PYTHON')
        DM.RemoveJoin(trimmed_lyr)

    # generate new zone ids
    old_ids = [row[0] for row in arcpy.da.SearchCursor(trimmed, 'zoneid')]
    with arcpy.da.UpdateCursor(trimmed, 'zoneid') as cursor:
        counter = 1
        for row in cursor:
            if not row[
                    0]:  # if no existing ID borrowed from LAGOS-NE, assign a new one
                new_id = '{name}_{num}'.format(name=zone_name, num=counter)

                # ensures new ids don't re-use old numbers but fills in all positive numbers eventually
                while new_id in old_ids:
                    counter += 1
                    new_id = '{name}_{num}'.format(name=zone_name, num=counter)
                row[0] = new_id
                cursor.updateRow(row)
                counter += 1

    print("Edge flags...")
    # add flag fields
    DM.AddField(trimmed, 'onlandborder', 'TEXT', field_length=2)
    DM.AddField(trimmed, 'oncoast', 'TEXT', field_length=2)

    # identify border zones
    border_lyr = DM.MakeFeatureLayer(LAND_BORDER, 'border_lyr')
    DM.SelectLayerByLocation(trimmed_lyr, 'INTERSECT', border_lyr)
    DM.CalculateField(trimmed_lyr, 'onlandborder', "'Y'", 'PYTHON')
    DM.SelectLayerByAttribute(trimmed_lyr, 'SWITCH_SELECTION')
    DM.CalculateField(trimmed_lyr, 'onlandborder', "'N'", 'PYTHON')

    # identify coastal zones
    coastal_lyr = DM.MakeFeatureLayer(COASTLINE, 'coastal_lyr')
    DM.SelectLayerByLocation(trimmed_lyr, 'INTERSECT', coastal_lyr)
    DM.CalculateField(trimmed_lyr, 'oncoast', "'Y'", 'PYTHON')
    DM.SelectLayerByAttribute(trimmed_lyr, 'SWITCH_SELECTION')
    DM.CalculateField(trimmed_lyr, 'oncoast', "'N'", 'PYTHON')

    print("State assignment...")
    # State?
    DM.AddField(trimmed, "state", 'text', field_length='2')
    state_center = arcpy.SpatialJoin_analysis(
        trimmed,
        STATE_FC,
        'state_center',
        join_type='KEEP_COMMON',
        match_option='HAVE_THEIR_CENTER_IN')
    state_intersect = arcpy.SpatialJoin_analysis(trimmed,
                                                 STATE_FC,
                                                 'state_intersect',
                                                 match_option='INTERSECT')
    state_center_dict = {
        row[0]: row[1]
        for row in arcpy.da.SearchCursor(state_center, ['ZoneID', 'STUSPS'])
    }
    state_intersect_dict = {
        row[0]: row[1]
        for row in arcpy.da.SearchCursor(state_intersect, ['ZoneID', 'STUSPS'])
    }
    with arcpy.da.UpdateCursor(trimmed, ['ZoneID', 'state']) as cursor:
        for updateRow in cursor:
            keyValue = updateRow[0]
            if keyValue in state_center_dict:
                updateRow[1] = state_center_dict[keyValue]
            else:
                updateRow[1] = state_intersect_dict[keyValue]
            cursor.updateRow(updateRow)

    # glaciation status?
    # TODO as version 0.6

    # preface the names with the zones
    DM.DeleteField(trimmed, 'ORIG_FID')
    fields = [
        f.name for f in arcpy.ListFields(trimmed, '*')
        if f.type not in ('OID',
                          'Geometry') and not f.name.startswith('Shape_')
    ]
    for f in fields:
        new_fname = '{zn}_{orig}'.format(zn=zone_name, orig=f).lower()
        try:
            DM.AlterField(trimmed, f, new_fname, clear_field_alias='TRUE')
        # sick of debugging the required field message-I don't want to change required fields anyway
        except:
            pass

    DM.CopyFeatures(trimmed, output)

    # cleanup
    lyr_objects = [
        lyr_object for var_name, lyr_object in locals().items()
        if var_name.endswith('lyr')
    ]
    temp_fcs = arcpy.ListFeatureClasses('*')
    for l in lyr_objects + temp_fcs:
        DM.Delete(l)
예제 #26
0
    def doFishnet(self):
        #### Initial Data Assessment ####
        printOHSSection(84428, prependNewLine=True)
        printOHSSubject(84431, addNewLine=False)

        #### Find Unique Locations ####
        msg = ARCPY.GetIDMessage(84441)
        ARCPY.SetProgressor("default", msg)
        initCount = UTILS.getCount(self.ssdo.inputFC)
        self.checkIncidents(initCount)
        collectedPointFC = UTILS.returnScratchName("Collect_InitTempFC")
        collInfo = EVENTS.collectEvents(self.ssdo, collectedPointFC)
        self.cleanUpList.append(collectedPointFC)
        collSSDO = SSDO.SSDataObject(collectedPointFC,
                                     explicitSpatialRef=self.ssdo.spatialRef,
                                     useChordal=True)
        collSSDO.obtainDataGA(collSSDO.oidName)
        #################################

        if self.boundaryFC:
            #### Assure Boundary FC Has Area and Obtain Chars ####
            self.checkBoundary()

        #### Location Outliers ####
        lo = UTILS.LocationInfo(collSSDO,
                                concept="EUCLIDEAN",
                                silentThreshold=True,
                                stdDeviations=3)
        printOHSLocationalOutliers(lo, aggType=self.aggType)

        #### Agg Header ####
        printOHSSection(84444)
        if self.boundaryFC:
            extent = self.boundExtent
            forMercExtent = self.boundExtent
            countMSGNumber = 84453

        else:
            countMSGNumber = 84452
            extent = None
            forMercExtent = collSSDO.extent

        if collSSDO.useChordal:
            extentFC_GCS = UTILS.returnScratchName("TempGCS_Extent")
            extentFC_Merc = UTILS.returnScratchName("TempMercator_Extent")
            points = NUM.array([[forMercExtent.XMin, forMercExtent.YMax],
                                [forMercExtent.XMax, forMercExtent.YMin]])
            UTILS.createPointFC(extentFC_GCS,
                                points,
                                spatialRef=collSSDO.spatialRef)
            DM.Project(extentFC_GCS, extentFC_Merc, mercatorProjection)
            d = ARCPY.Describe(extentFC_Merc)
            extent = d.extent
            fishOutputCoords = mercatorProjection
        else:
            fishOutputCoords = self.ssdo.spatialRef

        #### Fish Subject ####
        printOHSSubject(84449, addNewLine=False)
        dist = scaleDecision(lo.nonZeroAvgDist, lo.nonZeroMedDist)
        area = 0.0

        #### Construct Fishnet ####
        fish = UTILS.FishnetInfo(collSSDO, area, extent, explicitCellSize=dist)
        dist = fish.quadLength
        snap = self.ssdo.distanceInfo.linearUnitString(dist)

        #### Cell Size Answer ####
        snapStr = self.ssdo.distanceInfo.printDistance(dist)
        msg = ARCPY.GetIDMessage(84450).format(snapStr)
        printOHSAnswer(msg)
        self.fish = fish

        #### Fishnet Count Subject ####
        printOHSSubject(84451, addNewLine=False)

        #### Create Temp Fishnet Grid ####
        gridFC = UTILS.returnScratchName("Fishnet_TempFC")
        self.cleanUpList.append(gridFC)

        #### Apply Output Coords to Create Fishnet ####
        oldSpatRef = ARCPY.env.outputCoordinateSystem
        ARCPY.env.outputCoordinateSystem = fishOutputCoords

        #### Fish No Extent ####
        oldExtent = ARCPY.env.extent
        ARCPY.env.extent = ""

        #### Apply Max XY Tolerance ####
        fishWithXY = UTILS.funWithXYTolerance(DM.CreateFishnet,
                                              self.ssdo.distanceInfo)

        #### Execute Fishnet ####
        fishWithXY(gridFC, self.fish.origin, self.fish.rotate,
                   self.fish.quadLength, self.fish.quadLength,
                   self.fish.numRows, self.fish.numCols, self.fish.corner,
                   "NO_LABELS", self.fish.extent, "POLYGON")

        #### Project Back to GCS if Use Chordal ####
        if collSSDO.useChordal:
            gridFC_ProjBack = UTILS.returnScratchName("TempFC_Proj")
            DM.Project(gridFC, gridFC_ProjBack, collSSDO.spatialRef)
            UTILS.passiveDelete(gridFC)
            gridFC = gridFC_ProjBack

        #### Set Env Output Coords Back ####
        ARCPY.env.outputCoordinateSystem = oldSpatRef

        #### Create Empty Field Mappings to Ignore Atts ####
        fieldMap = ARCPY.FieldMappings()
        fieldMap.addTable(self.ssdo.inputFC)
        fieldMap.removeAll()

        #### Fishnet Count Answer ####
        printOHSAnswer(ARCPY.GetIDMessage(countMSGNumber))

        #### Create Weighted Fishnet Grid ####
        tempFC = UTILS.returnScratchName("Optimized_TempFC")
        self.cleanUpList.append(tempFC)
        joinWithXY = UTILS.funWithXYTolerance(ANA.SpatialJoin,
                                              self.ssdo.distanceInfo)
        joinWithXY(gridFC, self.ssdo.inputFC, tempFC, "JOIN_ONE_TO_ONE",
                   "KEEP_ALL", "EMPTY")

        #### Clean Up Temp FCs ####
        UTILS.passiveDelete(gridFC)

        #### Remove Locations Outside Boundary FC ####
        featureLayer = "ClippedPointFC"
        DM.MakeFeatureLayer(tempFC, featureLayer)
        if self.boundaryFC:
            msg = ARCPY.GetIDMessage(84454)
            ARCPY.SetProgressor("default", msg)
            DM.SelectLayerByLocation(featureLayer, "INTERSECT",
                                     self.boundaryFC, "#", "NEW_SELECTION")
            DM.SelectLayerByLocation(featureLayer, "INTERSECT", "#", "#",
                                     "SWITCH_SELECTION")
            DM.DeleteFeatures(featureLayer)
        else:
            if additionalZeroDistScale == "ALL":
                msg = ARCPY.GetIDMessage(84455)
                ARCPY.SetProgressor("default", msg)
                DM.SelectLayerByAttribute(featureLayer, "NEW_SELECTION",
                                          '"Join_Count" = 0')
                DM.DeleteFeatures(featureLayer)

            else:
                distance = additionalZeroDistScale * fish.quadLength
                distanceStr = self.ssdo.distanceInfo.linearUnitString(
                    distance, convert=True)
                nativeStr = self.ssdo.distanceInfo.printDistance(distance)
                msg = "Removing cells further than %s from input pointsd...."
                ARCPY.AddMessage(msg % nativeStr)
                DM.SelectLayerByLocation(featureLayer, "INTERSECT",
                                         self.ssdo.inputFC, distanceStr,
                                         "NEW_SELECTION")
                DM.SelectLayerByLocation(featureLayer, "INTERSECT", "#", "#",
                                         "SWITCH_SELECTION")
                DM.DeleteFeatures(featureLayer)

        DM.Delete(featureLayer)
        del collSSDO

        ARCPY.env.extent = oldExtent
        self.createAnalysisSSDO(tempFC, "JOIN_COUNT")