Ejemplo n.º 1
0
    def createPMPfc():

        arcpy.AddMessage(
            "\nCreating feature class: 'PMP_Points' in Scratch.gdb...")
        dm.MakeFeatureLayer(
            home + "\\Input\Non_Storm_Data.gdb\Vector_Grid",
            "vgLayer")  # make a feature layer of vector grid cells
        dm.SelectLayerByLocation(
            "vgLayer", "INTERSECT", aoiBasin
        )  # select the vector grid cells that intersect the aoiBasin polygon
        dm.MakeFeatureLayer(home + "\\Input\Non_Storm_Data.gdb\Grid_Points",
                            "gpLayer")  # make a feature layer of grid points
        dm.SelectLayerByLocation(
            "gpLayer", "HAVE_THEIR_CENTER_IN", "vgLayer"
        )  # select the grid points within the vector grid selection
        con.FeatureClassToFeatureClass(
            "gpLayer", env.scratchGDB,
            "PMP_Points")  # save feature layer as "PMP_Points" feature class
        arcpy.AddMessage("(" + str(dm.GetCount("gpLayer")) +
                         " grid points will be analyzed)\n")

        # Add PMP Fields
        for dur in durList:
            arcpy.AddMessage("\t...adding field: PMP_" + str(dur))
            dm.AddField(env.scratchGDB + "\\PMP_Points", "PMP_" + dur,
                        "DOUBLE")

        # Add STORM Fields (this string values identifies the driving storm by SPAS ID number)
        for dur in durList:
            arcpy.AddMessage("\t...adding field: STORM_" + str(dur))
            dm.AddField(env.scratchGDB + "\\PMP_Points", "STORM_" + dur,
                        "TEXT", "", "", 16)

        return
Ejemplo n.º 2
0
def process_ws(ws_fc, zone_name):

    # generate new zone ids
    DM.AddField(ws_fc, 'zoneid', 'TEXT', field_length=10)
    DM.CalculateField(ws_fc, 'zoneid', '!lagoslakeid!', 'PYTHON')
    ws_fc_lyr = DM.MakeFeatureLayer(ws_fc)

    # multipart
    DM.AddField(ws_fc, 'ismultipart', 'TEXT', field_length=2)
    with arcpy.da.UpdateCursor(ws_fc, ['ismultipart', 'SHAPE@']) as u_cursor:
        for row in u_cursor:
            if row[1].isMultipart:
                row[0] = 'Y'
            else:
                row[0] = 'N'
            u_cursor.updateRow(row)


    print("Edge flags...")
    # add flag fields
    DM.AddField(ws_fc, 'onlandborder', 'TEXT', field_length = 2)
    DM.AddField(ws_fc, 'oncoast', 'TEXT', field_length = 2)

    # identify border zones
    border_lyr = DM.MakeFeatureLayer(LAND_BORDER, 'border_lyr')
    DM.SelectLayerByLocation(ws_fc_lyr, 'INTERSECT', border_lyr)
    DM.CalculateField(ws_fc_lyr, 'onlandborder', "'Y'", 'PYTHON')
    DM.SelectLayerByAttribute(ws_fc_lyr, 'SWITCH_SELECTION')
    DM.CalculateField(ws_fc_lyr, 'onlandborder' ,"'N'", 'PYTHON')

    # identify coastal zones
    coastal_lyr = DM.MakeFeatureLayer(COASTLINE, 'coastal_lyr')
    DM.SelectLayerByLocation(ws_fc_lyr, 'INTERSECT', coastal_lyr)
    DM.CalculateField(ws_fc_lyr, 'oncoast', "'Y'", 'PYTHON')
    DM.SelectLayerByAttribute(ws_fc_lyr, 'SWITCH_SELECTION')
    DM.CalculateField(ws_fc_lyr, 'oncoast' ,"'N'", 'PYTHON')

    print("State assignment...")
    # States
    state_geo = r'D:\Continental_Limnology\Data_Working\LAGOS_US_GIS_Data_v0.6.gdb\Spatial_Classifications\state'
    find_states(ws_fc, STATES_GEO)
    # glaciation status?
    calc_glaciation(ws_fc, 'zoneid')

    # preface the names with the zones
    DM.DeleteField(ws_fc, 'ORIG_FID')
    fields = [f.name for f in arcpy.ListFields(ws_fc, '*') if f.type not in ('OID', 'Geometry') and not f.name.startswith('Shape_')]
    for f in fields:
        new_fname = '{zn}_{orig}'.format(zn=zone_name, orig = f).lower()
        try:
            DM.AlterField(ws_fc, f, new_fname, clear_field_alias = 'TRUE')
        # sick of debugging the required field message-I don't want to change required fields anyway
        except:
            pass

    # cleanup
    lyr_objects = [lyr_object for var_name, lyr_object in locals().items() if var_name.endswith('lyr')]
    for l in lyr_objects:
        DM.Delete(l)
Ejemplo n.º 3
0
    def outputPMP(type, area, outPath):
        desc = arcpy.Describe(basin)
        basinName = desc.baseName
        pmpPoints = env.scratchGDB + "\\PMP_Points"  # Location of 'PMP_Points' feature class which will provide data for output

        outType = type[:1]
        outArea = str(int(round(area, 0))) + "sqmi"
        outFC = outType + "_" + outArea  #I don't think I need this.....
        arcpy.AddMessage("\nCopying PMP_Points feature class to " + outFC +
                         "...")  #outFC might be replaced with outpath...
        dm.Merge(
            pmpPoints, outPath
        )  # merge the scratch feature layer(s) of vector grid cells into the outputs

        arcpy.AddMessage("\nCreating Basin Summary Table...")
        tableName = type + "_PMP_Basin_Average" + "_" + outArea
        tablePath = env.scratchGDB + "\\" + tableName
        dm.CreateTable(env.scratchGDB, tableName)  # Create blank table
        cursor = arcpy.da.InsertCursor(
            tablePath,
            "*")  # Create Insert cursor and add a blank row to the table
        cursor.insertRow([0])
        del cursor

        dm.AddField(tablePath, "STORM_TYPE", "TEXT", "", "", 10,
                    "Storm Type")  # Create "Storm Type" field
        dm.CalculateField(tablePath, "STORM_TYPE", "'" + type + "'",
                          "PYTHON_9.3")  # populate storm type field

        i = 0
        for field in arcpy.ListFields(
                pmpPoints, "PMP_*"
        ):  # Add fields for each PMP duration and calculate the basin average
            fieldName = field.name
            fieldAve = basinAve(
                basin, fieldName
            )  # Calls the basinAve() function - returns the average (weighted or not)
            dm.AddField(tablePath, fieldName, "DOUBLE", "",
                        2)  # Add duration field
            dm.CalculateField(tablePath, fieldName, fieldAve,
                              "PYTHON_9.3")  # Assigns the basin average

            i += 1
        arcpy.AddMessage("\nSummary table complete.")

        basAveTables.append(tablePath)

        return
def createBoundingBoxPolygon(mxd_path, bkmk_name, out_fc):
	"""Create a polygon that from the coordinates of a bookmark bounding box"""

	geom_type = 'POLYGON'
	oregon_spn = arcpy.SpatialReference(2913)
	management.CreateFeatureclass(os.path.dirname(out_fc),
		os.path.basename(out_fc), geom_type, spatial_reference=oregon_spn)

	name_field, f_type = 'name', 'TEXT'
	management.AddField(out_fc, name_field, f_type)

	# drop defualt field
	drop_field = 'Id'
	management.DeleteField(out_fc, drop_field)

	i_fields = ['Shape@', name_field]
	i_cursor = da.InsertCursor(out_fc, i_fields)

	mxd = mapping.MapDocument(mxd_path)
	for bkmk in arcpy.mapping.ListBookmarks(mxd, bkmk_name):
		extent = bkmk.extent
		pt_array = arcpy.Array()

		pt_array.add(arcpy.Point(extent.XMin, extent.YMin))
		pt_array.add(arcpy.Point(extent.XMin, extent.YMax))
		pt_array.add(arcpy.Point(extent.XMax, extent.YMax))
		pt_array.add(arcpy.Point(extent.XMax, extent.YMin))
		# add first point again to close polygon
		pt_array.add(arcpy.Point(extent.XMin, extent.YMin))

		i_cursor.insertRow((arcpy.Polygon(pt_array), bkmk.name))

	del i_cursor
Ejemplo n.º 5
0
def create_points_feature_class(fc, sr=None):

    arcpy.env.addOutputsToMap = False

    sr = sr or arcpy.env.outputCoordinateSystem
    if sr is None:
        arcpy.AddError('No spatial reference system.')
        return None

    scratch_fc = os.path.join(arcpy.env.scratchWorkspace, os.path.basename(fc))

    mgmt.CreateFeatureclass(*os.path.split(scratch_fc),
                            'POINT',
                            spatial_reference=sr)
    mgmt.AddField(scratch_fc, 'ELEVATION', 'DOUBLE')
    mgmt.AddField(scratch_fc, 'TIME', 'TEXT', field_length=64)
    mgmt.AddField(scratch_fc, 'NAME', 'TEXT', field_length=64)
    mgmt.AddField(scratch_fc, 'DESCRIPTION', 'TEXT', field_length=64)
    mgmt.AddField(scratch_fc, 'SYMBOL', 'TEXT', field_length=64)
    mgmt.AddField(scratch_fc, 'TYPE', 'TEXT', field_length=64)
    mgmt.AddField(scratch_fc, 'SAMPLES', 'LONG')

    if fc != scratch_fc:
        mgmt.Copy(scratch_fc, fc)
        mgmt.Delete(scratch_fc)

    return fc
def deduplicate(merged_file, rule_dictionary, unique_id='lagoslakeid'):
    order_fields = []
    sort_fields = []
    for i in range(1, len(rule_dictionary) + 1):  # priority order
        rule = rule_dictionary[i]
        if rule['rule'] == 'min':
            order_fields.append('{} asc'.format(rule['field']))
        elif rule['rule'] == 'max':
            order_fields.append('{} desc'.format(rule['field']))
        else:
            sort_field = '{}_SORT'.format(rule['field'])
            sort_fields.append(sort_field)
            if not arcpy.ListFields(merged_file, sort_field):
                DM.AddField(merged_file, sort_field, 'SHORT')
            # Calculate new sort field with numeric order based on custom sort order
            with arcpy.da.UpdateCursor(merged_file,
                                       [rule['field'], sort_field]) as cursor:
                for row in cursor:
                    row[1] = rule['sort'].index(row[0])
                    cursor.updateRow(row)

            order_fields.append('{} asc'.format(sort_field))
    order_by_clause = 'ORDER BY {}'.format(', '.join(order_fields))
    print(order_by_clause)

    print("Finding duplicate ids...")
    freq = arcpy.Frequency_analysis(merged_file, 'in_memory/freq', unique_id)
    dupe_ids = [
        row[0] for row in arcpy.da.SearchCursor(freq, unique_id,
                                                '''"FREQUENCY" > 1''')
    ]

    for id in dupe_ids:
        if arcpy.ListFields(merged_file,
                            '{}*'.format(unique_id))[0].type == 'String':
            filter = '''{} = '{}' '''.format(unique_id, id)
        else:
            filter = '''{} = {} '''.format(unique_id, id)
        with arcpy.da.UpdateCursor(
                merged_file, '*', filter,
                sql_clause=(None, order_by_clause)) as dupes_cursor:
            counter = 0
            # Deletes all but the first sorted row.
            for dupe_row in dupes_cursor:
                print(dupe_row)
                time.sleep(.1)
                if counter != 0:
                    print("DUPLICATE")
                    dupes_cursor.deleteRow()
                counter += 1
        print(' ')

    arcpy.Delete_management('in_memory/freq')
    for f in sort_fields:
        DM.DeleteField(merged_file, f)
Ejemplo n.º 7
0
def educ_to_xy(layer, out_dir, scaling_factor):
    levels = ["less_than_hs", "high_school", "some_college", "bachelors", "graduate"]
    for level in levels:
        trunc_level = level[0:10]
        # Add and calculate the new field
        field_name = level + str(scaling_factor)
        mg.AddField(layer, field_name, "SHORT")
        expr = "!{}! / {}".format(trunc_level, scaling_factor)
        mg.CalculateField(layer, field_name, expr)
        # Generate the dots
        out_dots = level + str(scaling_factor)
        mg.CreateRandomPoints(out_path = out_dir, out_name = out_dots, 
                              constraining_feature_class = layer, 
                              number_of_points_or_field = field_name)
def createExtentFeatureClass():
    """Create a feature class to the hold the map extent geometries"""

    geom_type = 'POLYGON'
    oregon_spn = arcpy.SpatialReference(2913)

    management.CreateFeatureclass(path.dirname(pylon_extents),
                                  path.basename(pylon_extents),
                                  geom_type,
                                  spatial_reference=oregon_spn)

    f_type = 'TEXT'
    management.AddField(pylon_extents, name_field, f_type)

    drop_field = 'Id'
    management.DeleteField(pylon_extents, drop_field)
def generateMatchCode():
    """Generate a code for the collapse dual carriageway tool that will indicate if two
	segments are eligible to be snapped to each other"""

    service_dict = {'frequent': 1, 'standard': 2, 'rush-hour': 3}

    # create a copy of service_level_routes.shp so that the original is not modified
    management.CopyFeatures(serv_level_routes_src, serv_level_routes)

    merge_field, f_type = 'merge_id', 'LONG'
    management.AddField(serv_level_routes, merge_field, f_type)

    u_fields = ['serv_level', 'route_type', merge_field]
    with da.UpdateCursor(serv_level_routes, u_fields) as u_cursor:
        for service, r_type, merge in u_cursor:
            # match field must be of type int
            merge = int(str(service_dict[service]) + str(int(r_type)))
            u_cursor.updateRow((service, r_type, merge))
Ejemplo n.º 10
0
def generateMatchCode():
    """Generate a code for the collapse dual carriageway tool that will indicate if two
	segments are eligible to be snapped to each other"""

    service_dict = {'frequent': 1, 'standard': 2, 'rush-hour': 3}

    # create a copy of distinct_routes.shp so that the original is not modified
    management.CopyFeatures(distinct_routes_src, distinct_routes)

    merge_field, f_type = 'merge_id', 'LONG'
    management.AddField(distinct_routes, merge_field, f_type)

    u_fields = ['route_id', 'serv_level', merge_field]
    with da.UpdateCursor(distinct_routes, u_fields) as u_cursor:
        for route, service, merge in u_cursor:
            # create a unique id based on frequency and route that is an integer
            merge = int(str(int(route)) + '000' + str(service_dict[service]))
            u_cursor.updateRow((route, service, merge))
Ejemplo n.º 11
0
def validateGeometry():
    """Check for geometry errors and multipart features that may have been introduced
	in the manual editing process of creating the offsets"""

    # Check for geometry errors, this tool doesn't flag a lot of the aspects I'm
    # interested in, thus the other steps below
    error_table = os.path.join(temp_dir, 'carto_errors.dbf')
    management.CheckGeometry(offset_routes, error_table)

    # Identify any multipart features
    multipart_dump = os.path.join(temp_dir, 'multipart_dump.shp')
    management.MultipartToSinglepart(offset_routes, multipart_dump)

    multipart_dict = {}
    dump_fields = ['OID@', 'ORIG_FID']
    with da.SearchCursor(multipart_dump, dump_fields) as s_cursor:
        for oid, orig_id in s_cursor:
            if orig_id not in multipart_dict:
                multipart_dict[orig_id] = 1
            else:
                multipart_dict[orig_id] += 1

    print "Features with the following fid's are multipart:"
    for orig_id, count in multipart_dict.iteritems():
        if count > 1:
            print orig_id

    # Find other errors like shared geometries and deadends using the merge divided
    # roads tool, I'm not actually interested in the output of this tool but rather the
    # validation output that it generates
    merge_field, field_type = 'merge_id', 'LONG'
    management.AddField(offset_routes, merge_field, field_type)

    # validation output will be logged here (not that user name portion may be variable):
    # C:\Users\humphrig\AppData\Local\ESRI\Geoprocessing
    merge_distance = 100  # feet
    validation_merge = os.path.join('in_memory', 'validation_merge')
    cartography.MergeDividedRoads(offset_routes, merge_field, merge_distance,
                                  validation_merge)

    # drop the merge field as it no longer needed
    management.DeleteField(offset_routes, merge_field)
Ejemplo n.º 12
0
def calc_glaciation(fc, zone_field):
    # tab area
    g_field = '{}_glaciatedlatewisc'.format(os.path.basename(fc))
    AN.TabulateIntersection(fc, zone_field, GLACIAL_EXTENT, 'in_memory/glacial_tab')
    glacial_pct = {r[0]:r[1] for r in arcpy.da.SearchCursor('in_memory/glacial_tab', [zone_field, 'PERCENTAGE'])}
    DM.AddField(fc, g_field, 'TEXT', field_length=20)
    with arcpy.da.UpdateCursor(fc, [zone_field, g_field]) as u_cursor:
        for row in u_cursor:
            zoneid, glaciation = row
            if zoneid not in glacial_pct:
                glaciation = 'Not_Glaciated'
            else:
                if glacial_pct[zoneid] >=99.99:
                    glaciation = 'Glaciated'
                elif glacial_pct[zoneid] < 0.01:
                    glaciation = 'Not_Glaciated'
                else:
                    glaciation = 'Partially_Glaciated'
            u_cursor.updateRow((zoneid, glaciation))
    DM.Delete('in_memory/glacial_tab')
Ejemplo n.º 13
0
def createInsetBox():
    """The bus mall inset covers a portion of the city center map so that
	needs to be reflected in the inset box, using the inflection point and the
	city center bound box create an fc that contains the inset box"""

    inflect_pt = {'x': 7649075, 'y': 686384}
    bkmk_dict = getBookmarkBbox(city_center_mxd, city_center_bkmk)

    geom_type = 'POLYGON'
    oregon_spn = arcpy.SpatialReference(2913)
    management.CreateFeatureclass(os.path.dirname(inset_box),
                                  os.path.basename(inset_box),
                                  geom_type,
                                  spatial_reference=oregon_spn)

    f_name, f_type = 'name', 'TEXT'
    management.AddField(inset_box, f_name, f_type)

    drop_field = 'Id'
    arcpy.management.DeleteField(inset_box, drop_field)

    i_fields = ['Shape@', f_name]
    i_cursor = da.InsertCursor(inset_box, i_fields)

    ap_array = arcpy.Array()
    ap_array.add(arcpy.Point(bkmk_dict['x-min'], bkmk_dict['y-min']))
    ap_array.add(arcpy.Point(bkmk_dict['x-min'], bkmk_dict['y-max']))
    ap_array.add(arcpy.Point(bkmk_dict['x-max'], bkmk_dict['y-max']))
    ap_array.add(arcpy.Point(bkmk_dict['x-max'], inflect_pt['y']))
    ap_array.add(arcpy.Point(inflect_pt['x'], inflect_pt['y']))
    ap_array.add(arcpy.Point(inflect_pt['x'], bkmk_dict['y-min']))
    # add first point again to close polygon
    ap_array.add(arcpy.Point(bkmk_dict['x-min'], bkmk_dict['y-min']))

    i_cursor.insertRow((arcpy.Polygon(ap_array), 'Portland City Center'))

    del i_cursor
Ejemplo n.º 14
0
def lake_from_to(nhd_subregion_gdb, output_table):
    arcpy.env.workspace = 'in_memory'
    waterbody0 = os.path.join(nhd_subregion_gdb, 'NHDWaterbody')
    network = os.path.join(nhd_subregion_gdb, 'Hydrography', 'HYDRO_NET')
    junctions0 = os.path.join(nhd_subregion_gdb, 'HYDRO_NET_Junctions')

    # use layers for selections. We will only work with lakes over 1 hectare for this tool.
    waterbody = DM.MakeFeatureLayer(waterbody0,
                                    'waterbody',
                                    where_clause=LAGOS_LAKE_FILTER)
    num_wbs = int(arcpy.GetCount_management(waterbody).getOutput(0))
    junctions = DM.MakeFeatureLayer(junctions0, 'junctions')

    DM.SelectLayerByLocation(junctions, 'INTERSECT', waterbody, '1 Meters',
                             'NEW_SELECTION')
    junctions_1ha = DM.MakeFeatureLayer(junctions, 'junctions_1ha')

    # insert results into output table
    DM.CreateTable(os.path.dirname(output_table),
                   os.path.basename(output_table))
    DM.AddField(output_table, 'FROM_PERMANENT_ID', 'TEXT', field_length=40)
    DM.AddField(output_table, 'TO_PERMANENT_ID', 'TEXT', field_length=40)

    # create a dictionary to hold results in memory
    results = []

    counter = 0
    progress = .01
    arcpy.AddMessage("Starting network tracing...")
    with arcpy.da.SearchCursor(waterbody, 'Permanent_Identifier') as cursor:
        for row in cursor:
            # set up a progress printer
            counter += 1
            if counter >= float(num_wbs) * progress:
                progress += .01
                arcpy.AddMessage("{}% complete...".format(
                    round(progress * 100), 1))

            # select this lake
            id = row[0]
            where_clause = """"{0}" = '{1}'""".format('Permanent_Identifier',
                                                      id)
            this_waterbody = DM.MakeFeatureLayer(waterbody, 'this_waterbody',
                                                 where_clause)

            # select junctions overlapping this lake. only the downstream one matters, rest have no effect
            DM.SelectLayerByLocation(junctions_1ha, 'INTERSECT',
                                     this_waterbody, '1 Meters')
            count_junctions = int(
                arcpy.GetCount_management(junctions_1ha).getOutput(0))
            if count_junctions == 0:
                # add a row with no "TO" lake to the results
                results.append({'FROM': id, 'TO': None})
            else:
                # copy with selection on
                this_junctions = DM.MakeFeatureLayer(junctions_1ha,
                                                     'this_junctions')
                DM.TraceGeometricNetwork(network, 'downstream', this_junctions,
                                         'TRACE_DOWNSTREAM')
                # select lakes that intersect the downstream network with a tolerance of 1 meters
                DM.SelectLayerByLocation(waterbody, 'INTERSECT',
                                         'downstream/NHDFlowline', '1 Meters',
                                         'NEW_SELECTION')
                # remove this lake
                DM.SelectLayerByAttribute(waterbody, 'REMOVE_FROM_SELECTION',
                                          where_clause)
                # get the count, if it's 0 then there should be no table entry or something?
                count_waterbody = int(
                    arcpy.GetCount_management(waterbody).getOutput(0))
                # copy those into the table that you're storing stuff in
                if count_waterbody == 0:
                    # add a row with no "TO" lake to the results
                    results.append({'FROM': id, 'TO': None})
                else:
                    # for each ID, how am I getting those
                    to_ids = [
                        row[0] for row in arcpy.da.SearchCursor(
                            waterbody, 'Permanent_Identifier')
                    ]
                    for to_id in to_ids:
                        result = {'FROM': id, 'TO': to_id}
                        results.append(result)

                # delete all the intermediates
            DM.SelectLayerByAttribute(waterbody, 'CLEAR_SELECTION')
            for item in [this_waterbody, this_junctions, 'downstream']:
                DM.Delete(item)

    # insert the results in the table
    insert_cursor = arcpy.da.InsertCursor(
        output_table, ['FROM_PERMANENT_ID', 'TO_PERMANENT_ID'])
    for result in results:
        insert_cursor.insertRow([result['FROM'], result['TO']])

    # delete everything
    for item in [waterbody, junctions, junctions_1ha, 'in_memory']:
        DM.Delete(item)
    arcpy.AddMessage("Completed.")
Ejemplo n.º 15
0
def georeference_lakes(
    lake_points_fc,
    out_fc,
    lake_id_field,
    lake_name_field,
    lake_county_field='',
    state='',
    master_gdb=r'C:\Users\smithn78\Dropbox\CL_HUB_GEO\Lake_Georeferencing\Masters_for_georef.gdb'
):
    """
    Evaluate water quality sampling point locations and either assign the point to a lake polygon or flag the
    point for manual review.
    :param lake_points_fc:
    :param out_fc:
    :param lake_id_field:
    :param lake_name_field:
    :param lake_county_field:
    :param state:
    :param master_gdb: Location of master geodatabase used for linking
    :return:
    """
    master_lakes_fc = os.path.join(master_gdb, MASTER_LAKES_FC)
    master_lakes_lines = os.path.join(master_gdb, MASTER_LAKES_LINES)
    master_streams_fc = os.path.join(master_gdb, MASTER_STREAMS_FC)
    master_xwalk = os.path.join(master_gdb, MASTER_XWALK)

    # setup
    arcpy.AddMessage("Joining...")
    state = state.upper()
    if state not in STATES:
        raise ValueError('Use the 2-letter state code abbreviation')
    arcpy.env.workspace = 'in_memory'
    out_short = os.path.splitext(os.path.basename(out_fc))[0]
    join1 = '{}_1'.format(out_short)
    join2 = '{}_2'.format(out_short)
    join3 = '{}_3'.format(out_short)
    join3_select = join3 + '_select'
    join4 = '{}_4'.format(out_short)
    join5 = '{}_5'.format(out_short)
    joinx = '{}_x'.format(out_short)

    county_name_results = arcpy.ListFields(
        lake_points_fc, '{}*'.format(lake_county_field))[0].name
    if lake_county_field and not lake_county_field in county_name_results:
        print('{} field does not exist in dataset.'.format(lake_county_field))
        raise Exception

    point_fields = [f.name for f in arcpy.ListFields(lake_points_fc)]

    # update the lake id to a text field if not already
    lake_id_field_type = arcpy.ListFields(lake_points_fc,
                                          lake_id_field)[0].type
    if lake_id_field_type != 'String':
        temp_id_field = '{}_t'.format(lake_id_field)
        arcpy.AddField_management(lake_points_fc, '{}_t'.format(lake_id_field),
                                  'TEXT', '255')
        expr = '!{}!'.format(lake_id_field)
        arcpy.CalculateField_management(lake_points_fc, temp_id_field, expr,
                                        'PYTHON')
        arcpy.DeleteField_management(lake_points_fc, lake_id_field)
        arcpy.AlterField_management(lake_points_fc,
                                    temp_id_field,
                                    new_field_name=lake_id_field)

    # Try to make some spatial connections and fulfill some logic to assign a link
    join1 = AN.SpatialJoin(lake_points_fc,
                           master_lakes_fc,
                           join1,
                           'JOIN_ONE_TO_MANY',
                           'KEEP_ALL',
                           match_option='INTERSECT')
    join2 = AN.SpatialJoin(join1,
                           master_streams_fc,
                           join2,
                           'JOIN_ONE_TO_MANY',
                           'KEEP_ALL',
                           match_option='INTERSECT')
    join3 = AN.SpatialJoin(join2,
                           master_lakes_fc,
                           join3,
                           'JOIN_ONE_TO_MANY',
                           'KEEP_ALL',
                           match_option='INTERSECT',
                           search_radius='10 meters')
    join4 = AN.SpatialJoin(join3,
                           master_lakes_fc,
                           join4,
                           'JOIN_ONE_TO_MANY',
                           'KEEP_ALL',
                           match_option='INTERSECT',
                           search_radius='100 meters')

    # setup for editing lake assignment values
    DM.AddField(join4, 'Auto_Comment', 'TEXT', field_length=100)
    DM.AddField(join4, 'Manual_Review', 'SHORT')
    DM.AddField(join4, 'Shared_Words', 'TEXT', field_length=100)
    DM.AddField(join4, 'Linked_lagoslakeid', 'LONG')
    DM.AddField(join4, 'GEO_Discovered_Name', 'TEXT', field_length=255)
    DM.AddField(join4, 'Duplicate_Candidate', 'TEXT', field_length=1)
    DM.AddField(join4, 'Is_Legacy_Link', 'TEXT', field_length=1)

    update_fields = [
        lake_id_field,
        lake_name_field,
        MASTER_LAKE_ID,
        MASTER_GNIS_NAME,  # 0m match
        'PERMANENT_IDENTIFIER_1',
        'GNIS_NAME_1',  # stream match
        MASTER_LAKE_ID + '_1',
        MASTER_GNIS_NAME + '_12',  # 10m match
        MASTER_LAKE_ID + '_12',
        MASTER_GNIS_NAME + '_12_13',  # 100m match
        'Auto_Comment',
        'Manual_Review',
        'Shared_Words',
        'Linked_lagoslakeid'
    ]

    # use a cursor to go through each point and evaluate its assignment
    cursor = arcpy.da.UpdateCursor(join4, update_fields)
    arcpy.AddMessage("Calculating link status...")
    for row in cursor:
        id, name, mid_0, mname_0, stream_id, streamname_0, mid_10, mname_10, mid_100, mname_100, comment, review, words, lagosid = row
        if mid_0 is not None:  # if the point is directly in a polygon
            if name and mname_0:
                words = lagosGIS.list_shared_words(name,
                                                   mname_0,
                                                   exclude_lake_words=False)
            comment = 'Exact location link'
            lagosid = mid_0
            review = -1
        elif mid_0 is None and mid_10 is not None:  # if the point is only within 10m of a lake
            if name and mname_10:
                words = lagosGIS.list_shared_words(name,
                                                   mname_10,
                                                   exclude_lake_words=False)
            if words:
                comment = 'Linked by common name and location'
                lagosid = mid_10
                review = -1
            else:
                comment = 'Linked by common location'
                lagosid = mid_10
                review = 1
        elif mid_0 is None and mid_10 is None:
            if stream_id is not None:  # if there is a stream match
                comment = 'Not linked because represented as river in NHD'
                review = 2
            else:
                if mid_100 is not None:  # if the point is only within 100m of lake(s)
                    if name and mname_100:
                        words = lagosGIS.list_shared_words(
                            name, mname_100, exclude_lake_words=True)
                # TODO: Frequency check
                    if words:
                        comment = 'Linked by common name and location'
                        lagosid = mid_100
                        review = 1
                    else:
                        comment = 'Linked by common location'
                        lagosid = mid_100
                        review = 2
        cursor.updateRow(
            (id, name, mid_0, mname_0, stream_id, streamname_0, mid_10,
             mname_10, mid_100, mname_100, comment, review, words, lagosid))

    # # So I haven't been able to get the county logic to work and it hasn't been that important yet, ignore for now
    # Select down to a minimum set because we're about to join on county, which will create lots of duplicate matches
    # Then join calculated results back to full set
    # if lake_county_field:
    #     join5 = AN.Select(join4, join5, 'Manual_Review IS NULL')
    #     lakes_state = AN.Select(MASTER_LAKES_FC, 'lakes_state', "{0} = '{1}'".format(MASTER_STATE_NAME, state))
    #     lakes_state_lyr = DM.MakeFeatureLayer(lakes_state, 'lakes_state_lyr')
    #     join5_lyr = DM.MakeFeatureLayer(join5, 'join5_lyr')
    #     DM.AddJoin(join5_lyr, lake_county_field, lakes_state_lyr, MASTER_COUNTY_NAME)
    #     join5_with_county = DM.CopyFeatures(join5_lyr, 'join5_with_cty')
    #     j5 = 'DEDUPED_CA_SWAMP_data_linked_5.'
    #
    #     county_update_fields = [j5 + lake_id_field, j5 + lake_name_field, j5 + lake_county_field,
    #                             'lakes_state.' + MASTER_LAKE_ID, 'lakes_state.' + MASTER_GNIS_NAME, 'lakes_state.' + MASTER_COUNTY_NAME,
    #                             j5 + 'Auto_Comment', j5 + 'Manual_Review', j5 + 'Shared_Words',
    #                             j5 + 'Linked_lagoslakeid']
    #     with arcpy.da.UpdateCursor(join5_lyr, county_update_fields) as cursor:
    #         for row in cursor:
    #             id, name, county, mid_cty, mname_cty, mcounty, comment, review, words, lagosid = row
    #             if county is not None and mcounty is not None:
    #                 if name and mname_cty:
    #                     words = lagosGIS.list_shared_words(name, mname_cty, exclude_lake_words=True)
    #                 if words:
    #                     comment = 'PRELIMINARY: Linked by common name and location'
    #                     lagosid = mid_cty
    #                     review = 2
    #             cursor.updateRow((id, name, county, mid_cty, mname_cty, mcounty, comment, review, words, lagosid))
    #     DM.RemoveJoin(join5_lyr)
    #     join5_with_county = DM.CopyFeatures(join5_lyr, 'join5_with_county')
    #
    #     # join5 = DM.JoinField(join5, lake_county_field, lakes_state, MASTER_COUNTY_NAME,
    #                          fields = [MASTER_COUNTY_NAME, MASTER_LAKE_ID, MASTER_GNIS_NAME])
    #
    #     # This is a long way to make a join
    #     join_dict = {}
    #     with arcpy.da.SearchCursor(lakes_state, [MASTER_COUNTY_NAME, MASTER_LAKE_ID, MASTER_GNIS_NAME]) as cursor:
    #         for row in cursor:
    #             join_value, val1, val2 = row
    #             join_dict[join_value] = [val1, val2]
    #
    #     arcpy.AddField_management(join5, MASTER_LAKE_ID + 'cntyj', 'LONG')
    #     arcpy.AddField_management(join5, MASTER_GNIS_NAME + 'cntyj', 'TEXT', 255)
    #
    #     with arcpy.da.SearchCursor(join5, [lake_county_field, MASTER_LAKE_ID + 'cntyj', MASTER_GNIS_NAME + 'cntyj']) as cursor:
    #         for row in cursor:
    #             key_value = row[0]
    #             words = lagosGIS.list_shared_words()
    #             if join_dict.has_key(key_value):
    #                 row[1] = join_dict[key_value][0]
    #                 row[2] = join_dict[key_value][1]
    #             else:
    #                 row[1] = None
    #                 row[2] = None
    #             cursor.updateRow(row)
    #
    #
    #     county_update_fields = [lake_id_field, lake_name_field, lake_county_field,
    #                 MASTER_LAKE_ID + '_12_13_14', MASTER_GNIS_NAME + '_12_13',  MASTER_COUNTY_NAME + '_12_13', # county
    #                  'Auto_Comment', 'Manual_Review', 'Shared_Words',
    #                  'Linked_lagoslakeid']
    #     cursor = arcpy.da.UpdateCursor(join5, county_update_fields)
    #     for row in cursor:
    #         id, name, county, lagosid_cty, lagosname_cty, mcounty, comment, mreview, words, linked_lagosid = row
    #         if mcounty is not None:
    #             words = lagosGIS.list_shared_words()
    # else:
    #     join5 = join4
    #

    if state in LAGOSNE_STATES:
        DM.JoinField(join4, lake_id_field, master_xwalk, 'lagosne_legacyid',
                     ['lagoslakeid', 'lagos_lakename', 'lagos_state'])
        update_fields = [
            lake_id_field,
            lake_name_field,
            MASTER_LAKE_ID + '_12_13',
            'lagos_lakename',
            'lagos_state',  # crosswalk match
            'Auto_Comment',
            'Manual_Review',
            'Shared_Words',
            'Linked_lagoslakeid',
            'Is_Legacy_Link'
        ]

        with arcpy.da.UpdateCursor(join4, update_fields) as uCursor:
            for uRow in uCursor:
                id, name, mid_x, mname_x, state_x, comment, review, words, lagosid, legacy_flag = uRow
                # fields are populated already from links above. Revise only if legacy links
                if mid_x is not None:
                    if state == state_x:
                        legacy_flag = 'Y'  # set to Y regardless of whether using legacy comment if state matches
                    if comment != 'Exact location link':
                        review = 1
                        if state != state_x:
                            review = 3  # downgrade if states mismatch--border lakes OK, random common IDs NOT. Check.
                        legacy_flag = 'Y'
                        comment = 'LAGOS-NE legacy link'  # only comment non-exact location matches
                        lagosid = mid_x
                        if name and mname_x:
                            words = lagosGIS.list_shared_words(
                                name,
                                mname_x)  # update words only if legacy comment

                new_row = id, name, mid_x, mname_x, state_x, comment, review, words, lagosid, legacy_flag
                uCursor.updateRow(new_row)

        # # Undo the next line if you ever bring this chunk back.
    join5 = join4

    # then re-code the no matches as a 3 and copy comments to the editable field
    # compress the joined lake ids into one field
    # having two fields lets us keep track of how many of the auto matches are bad
    if arcpy.ListFields(join5, 'Comment'):
        comment_field_name = 'Comment_LAGOS'
    else:
        comment_field_name = 'Comment'

    DM.AddField(join5, comment_field_name, 'TEXT', field_length=100)
    with arcpy.da.UpdateCursor(
            join5, ['Manual_Review', 'Auto_Comment', 'Comment']) as cursor:
        for flag, ac, comment in cursor:
            if flag is None:
                flag = 3
                ac = 'Not linked'
            comment = ac
            cursor.updateRow((flag, ac, comment))

    # Re-code points more than 100m into the polygon of the lake as no need to check
    DM.MakeFeatureLayer(join5, 'join5_lyr')
    DM.MakeFeatureLayer(master_lakes_lines, 'lake_lines_lyr')
    DM.SelectLayerByAttribute('join5_lyr', 'NEW_SELECTION',
                              "Auto_Comment = 'Exact location link'")
    DM.SelectLayerByLocation('join5_lyr', 'INTERSECT', 'lake_lines_lyr',
                             '100 meters', 'SUBSET_SELECTION', 'INVERT')
    DM.CalculateField('join5_lyr', 'Manual_Review', '-2', 'PYTHON')
    DM.Delete('join5_lyr', 'lake_lines_lyr')

    # Then make sure to only keep the fields necessary when you write to an output
    copy_fields = point_fields + [
        'Linked_lagoslakeid', 'Auto_Comment', 'Manual_Review',
        'Is_Legacy_Link', 'Shared_Words', 'Comment', 'Duplicate_Candidate',
        'GEO_Discovered_Name'
    ]
    copy_fields.remove('Shape')
    copy_fields.remove('OBJECTID')

    lagosGIS.select_fields(join5, out_fc, copy_fields)

    DM.AssignDomainToField(out_fc, 'Comment', 'Comment')

    DM.AddField(out_fc, 'Total_points_in_lake_poly', 'Short')

    # Remove any duplicates. (These originate from the join3/join4 transition because a point can be both
    # within 10m and 100m of lakes, this code takes the closest lake as true for my current sanity.)
    # Or, in other words, this is a hack solution.
    out_fc_fields = [
        f.name for f in arcpy.ListFields(out_fc) if f.name != 'OBJECTID'
    ]
    DM.DeleteIdentical(out_fc, out_fc_fields)

    # Get the join_count for each limno lake ID
    # De-dupe anything resulting from limno ID duplicates first before counting
    id_pairs = list(
        set(
            arcpy.da.SearchCursor(out_fc,
                                  [lake_id_field, 'Linked_lagoslakeid'])))
    # THEN pull out LAGOS id. Any duplicate now are only due to multiple distinct points within lake
    lagos_ids = [ids[1] for ids in id_pairs]
    sample_ids = [ids[0] for ids in id_pairs]
    lagos_lake_counts = Counter(lagos_ids)
    linked_multiple_lake_counts = Counter(sample_ids)

    # Get the count of points in the polygon
    with arcpy.da.UpdateCursor(
            out_fc,
        ['Linked_lagoslakeid', 'Total_points_in_lake_poly']) as cursor:
        for lagos_id, join_count in cursor:
            join_count = lagos_lake_counts[lagos_id]
            cursor.updateRow((lagos_id, join_count))

    # Mark any samples linked to more than one lake so that the analyst can select the correct lake in the
    # manual process
    with arcpy.da.UpdateCursor(
            out_fc, [lake_id_field, 'Duplicate_Candidate']) as cursor:
        for sample_id, duplicate_flag in cursor:
            duplicate_count = linked_multiple_lake_counts[sample_id]
            if duplicate_count > 1:
                duplicate_flag = "Y"
            else:
                duplicate_flag = "N"
            cursor.updateRow((sample_id, duplicate_flag))

    # clean up
    DM.AddField(out_fc, 'Note', 'TEXT', field_length=140)
    DM.Delete('in_memory')
    arcpy.AddMessage('Completed.')
Ejemplo n.º 16
0
    fields = arcpy.ListFields(in_file)

    layer = "feat_layer"
    arcmgt.MakeFeatureLayer(in_file, layer)
    desc = arcpy.Describe(layer)
    fld_names = []
    for fld in desc.fields:
        fld_names.append(fld.name)

    try:
        fields = ["PATH_FROM", "PATH_TO", "PATH_DIST"]
        #arcmgt.DeleteField(layer, fields)
        #arcmgt.DeleteField(layer, "FROM_")
        for fld in fields:
            if not fld in fld_names:
                arcmgt.AddField(table_view, fld,
                                "DOUBLE")  #  SHOULD GET TYPE FROM target_fld

    except Exception as e:
        print e.message
        arcpy.AddError(e.message)
        raise

    rows = arcpy.UpdateCursor(table_view)
    last_target = None

    for row in rows:

        if last_target is None:
            last_target = row.getValue(target_fld)
            continue
Ejemplo n.º 17
0
def import_gpx(gpx_file, wpt_fc, trk_fc):

    GCS_WGS_84 = arcpy.SpatialReference(4326)
    GCS_TRANSFORMS = 'WGS_1984_(ITRF08)_To_NAD_1983_2011; NAD_1927_To_NAD_1983_NADCON'

    arcpy.env.geographicTransformations = arcpy.env.geographicTransformations or GCS_TRANSFORMS
    arcpy.AddMessage('Geographic Transformations: %s' % arcpy.env.geographicTransformations)

    scratch = arcpy.env.scratchWorkspace
    arcpy.env.addOutputsToMap = False

    WPT_FIELDS = [
        ('ELEVATION', 'gpx:ele'),
        ('TIME', 'gpx:time'),
        ('NAME', 'gpx:name'),
        ('DESCRIPTION', 'gpx:desc'),
        ('SYMBOL', 'gpx:sym'),
        ('TYPE', 'gpx:type'),
        ('SAMPLES', 'gpx:extensions/wptx1:WaypointExtension/wptx1:Samples')
    ]

    ns = {
        'gpx': 'http://www.topografix.com/GPX/1/1',
        'gpxx': 'http://www.garmin.com/xmlschemas/GpxExtensions/v3',
        'wptx1': 'http://www.garmin.com/xmlschemas/WaypointExtension/v1',
        'ctx': 'http://www.garmin.com/xmlschemas/CreationTimeExtension/v1',
    }

    etree.register_namespace('', 'http://www.topografix.com/GPX/1/1')
    etree.register_namespace('gpxx', 'http://www.garmin.com/xmlschemas/GpxExtensions/v3')
    etree.register_namespace('wptx1', 'http://www.garmin.com/xmlschemas/WaypointExtension/v1')
    etree.register_namespace('ctx', 'http://www.garmin.com/xmlschemas/CreationTimeExtension/v1')

    gpx = etree.parse(gpx_file).getroot()

    sr = arcpy.env.outputCoordinateSystem

    if wpt_fc:
        create_points_feature_class(wpt_fc, sr)

        waypoints = []
        for wpt in gpx.findall('gpx:wpt', ns):
            x, y = wpt.get('lon'), wpt.get('lat')
            row = [arcpy.PointGeometry(arcpy.Point(x, y), GCS_WGS_84).projectAs(sr)]
            for field, tag in WPT_FIELDS:
                elem = wpt.find(tag, ns)

                if elem is None:
                    row.append(None)
                elif field == 'ELEVATION':
                    row.append('%0.4f' % (float(elem.text) / sr.metersPerUnit))
                elif field == 'NAME' and elem.text.isdigit():
                    row.append('%d' % int(elem.text))
                else:
                    row.append(elem.text)
            waypoints.append(row)

        if waypoints:
            fields = ['SHAPE@'] + [f[0] for f in WPT_FIELDS]
            cur = arcpy.da.InsertCursor(wpt_fc, fields)
            for row in waypoints:
                cur.insertRow(row)
            del cur

    if trk_fc:

        # idle time between trkpts to start a new track segment
        TRKSEG_IDLE_SECS = 600

        tracks = []
        track_num = 0
        for trk in gpx.findall('gpx:trk', ns):
            track_num += 1
            elem = trk.find('gpx:name', ns)
            if elem is None:
                track_name = 'track-%04d' % track_num
            else:
                track_name = elem.text

            track_pts = []
            dt_last = None
            segment_num = 0
            for trkpt in trk.findall('./gpx:trkseg/gpx:trkpt', ns):
                x, y = trkpt.get('lon'), trkpt.get('lat')
                pt = arcpy.PointGeometry(arcpy.Point(x, y), GCS_WGS_84).projectAs(sr).firstPoint

                # See if there's a track point time
                elem = trkpt.find('gpx:time', ns)
                if elem is None:
                    dt_last = None
                else:
                    dt = utils.default_tzinfo(parser.parse(elem.text), tz.UTC)
                    if dt_last and (dt - dt_last).seconds > TRKSEG_IDLE_SECS:
                        # start a new segment
                        if len(track_pts) > 1:
                            segment_num += 1
                            if segment_num > 1:
                                segment_name = '%s SEG-%04d' % (track_name, segment_num)
                            else:
                                segment_name = track_name
                            geom = arcpy.Polyline(arcpy.Array(track_pts), sr)
                            tracks.append([geom , segment_name, len(track_pts)])
                        else:
                            arcpy.AddMessage('Skipping track "%s": track_pts=%d' % (track_name, len(track_pts)))
                        track_pts = []
                    dt_last = dt

                track_pts.append(pt)

            if len(track_pts) > 1:
                segment_num += 1
                if segment_num > 1:
                    segment_name = '%s SEG-%04d' % (track_name, segment_num)
                else:
                    segment_name = track_name
                geom = arcpy.Polyline(arcpy.Array(track_pts), sr)
                tracks.append([geom, segment_name, len(track_pts)])
            else:
                arcpy.AddMessage('Skipping track "%s": track_pts=%d' % (track_name, len(track_pts)))

        if tracks:
            temp_fc = os.path.join(scratch, os.path.basename(trk_fc) + '_Temp')
            if sr is None:
                arcpy.AddError('Geoprocessing environment not set: outputCoordinateSystem')
                return None

            fc = mgmt.CreateFeatureclass(*os.path.split(temp_fc), geometry_type='POLYLINE', spatial_reference=sr)
            mgmt.AddField(fc, 'NAME', 'TEXT', field_length=64)
            mgmt.AddField(fc, 'POINTS', 'LONG')

            cur = arcpy.da.InsertCursor(fc, ('SHAPE@', 'NAME', 'POINTS'))
            for row in tracks:
                cur.insertRow(row)
            del cur

            mgmt.CopyFeatures(temp_fc, trk_fc)
            del fc
def classify_lakes(nhd,
                   out_feature_class,
                   exclude_intermit_flowlines=False,
                   debug_mode=False):
    if debug_mode:
        arcpy.env.overwriteOutput = True
        temp_gdb = cu.create_temp_GDB('classify_lake_connectivity')
        arcpy.env.workspace = temp_gdb
        arcpy.AddMessage('Debugging workspace located at {}'.format(temp_gdb))

    else:
        arcpy.env.workspace = 'in_memory'

    if arcpy.Exists("temp_fc"):
        print("There is a problem here.")
        raise Exception

    # Tool temporary feature classes
    temp_fc = "temp_fc"
    csiwaterbody_10ha = "csiwaterbody_10ha"
    nhdflowline_filtered = "nhdflowline_filtered"
    dangles = "dangles"
    start = "start"
    end = "end"
    startdangles = "startdangles"
    enddangles = "enddangles"
    non_artificial_end = "non_artificial_end"
    flags_10ha_lake_junctions = "flags_10ha_lake_junctions"
    midvertices = "midvertices"
    non10vertices = "non10vertices"
    non10junctions = "non10junctions"
    all_non_flag_points = "all_non_flag_points"
    barriers = "barriers"
    trace1_junctions = "trace1_junctions"
    trace1_flowline = "trace1_flowline"
    trace2_junctions = "trace2junctions"
    trace2_flowline = "trace2_flowline"

    # Clean up workspace in case of bad exit from prior run in same session.
    this_tool_layers = [
        "dangles_lyr", "nhdflowline_lyr", "junction_lyr", "midvertices_lyr",
        "all_non_flag_points_lyr", "non10vertices_lyr", "out_fc_lyr", "trace1",
        "trace2"
    ]
    this_tool_temp = [
        temp_fc, csiwaterbody_10ha, nhdflowline_filtered, dangles, start, end,
        startdangles, enddangles, non_artificial_end,
        flags_10ha_lake_junctions, midvertices, non10vertices, non10junctions,
        all_non_flag_points, barriers, trace1_junctions, trace1_flowline,
        trace2_junctions, trace2_flowline
    ]
    for item in this_tool_layers + this_tool_temp:
        try:
            DM.Delete(item)
        except:
            pass

    # Local variables:
    nhdflowline = os.path.join(nhd, "Hydrography", "NHDFLowline")
    nhdjunction = os.path.join(nhd, "Hydrography", "HYDRO_NET_Junctions")
    nhdwaterbody = os.path.join(nhd, "Hydrography", "NHDWaterbody")
    network = os.path.join(nhd, "Hydrography", "HYDRO_NET")

    # Get lakes, ponds and reservoirs over a hectare.
    #csi_population_filter = '''"AreaSqKm" >=0.01 AND\
    #"FCode" IN (39000,39004,39009,39010,39011,39012,43600,43613,43615,43617,43618,43619,43621)'''
    all_lakes_reservoirs_filter = '''"FType" IN (390, 436)'''

    # Can't see why we shouldn't just attribute all lakes and reservoirs
    # arcpy.Select_analysis(nhdwaterbody, "csiwaterbody", lake_population_filter)
    arcpy.AddMessage("Initializing output.")
    if exclude_intermit_flowlines:
        DM.CopyFeatures(out_feature_class, temp_fc)
        DM.Delete(out_feature_class)
    else:
        arcpy.Select_analysis(nhdwaterbody, temp_fc,
                              all_lakes_reservoirs_filter)

    # Get lakes, ponds and reservoirs over 10 hectares.
    lakes_10ha_filter = '''"AreaSqKm" >= 0.1 AND "FType" IN (390, 436)'''
    arcpy.Select_analysis(nhdwaterbody, csiwaterbody_10ha, lakes_10ha_filter)

    # Exclude intermittent flowlines, if requested
    if exclude_intermit_flowlines:
        flowline_where_clause = '''"FCode" NOT IN (46003,46007)'''
        nhdflowline = arcpy.Select_analysis(nhdflowline, nhdflowline_filtered,
                                            flowline_where_clause)

    # Make dangle points at end of nhdflowline
    DM.FeatureVerticesToPoints(nhdflowline, dangles, "DANGLE")
    DM.MakeFeatureLayer(dangles, "dangles_lyr")

    # Isolate start dangles from end dangles.
    DM.FeatureVerticesToPoints(nhdflowline, start, "START")
    DM.FeatureVerticesToPoints(nhdflowline, end, "END")

    DM.SelectLayerByLocation("dangles_lyr", "ARE_IDENTICAL_TO", start)
    DM.CopyFeatures("dangles_lyr", startdangles)
    DM.SelectLayerByLocation("dangles_lyr", "ARE_IDENTICAL_TO", end)
    DM.CopyFeatures("dangles_lyr", enddangles)

    # Special handling for lakes that have some intermittent flow in and some permanent
    if exclude_intermit_flowlines:
        DM.MakeFeatureLayer(nhdflowline, "nhdflowline_lyr")
        DM.SelectLayerByAttribute("nhdflowline_lyr", "NEW_SELECTION",
                                  '''"WBArea_Permanent_Identifier" is null''')
        DM.FeatureVerticesToPoints("nhdflowline_lyr", non_artificial_end,
                                   "END")
        DM.SelectLayerByAttribute("nhdflowline_lyr", "CLEAR_SELECTION")

    arcpy.AddMessage("Found source area nodes.")

    # Get junctions from lakes >= 10 hectares.
    DM.MakeFeatureLayer(nhdjunction, "junction_lyr")
    DM.SelectLayerByLocation("junction_lyr", "INTERSECT", csiwaterbody_10ha,
                             XY_TOLERANCE, "NEW_SELECTION")

    DM.CopyFeatures("junction_lyr", flags_10ha_lake_junctions)
    arcpy.AddMessage("Found lakes >= 10 ha.")

    # Make points shapefile and layer at flowline vertices to act as potential flags and/or barriers.
    arcpy.AddMessage("Tracing...")
    DM.FeatureVerticesToPoints(nhdflowline, midvertices, "MID")
    DM.MakeFeatureLayer(midvertices, "midvertices_lyr")

    # Get vertices that are not coincident with 10 hectare lake junctions.
    DM.SelectLayerByLocation("midvertices_lyr", "INTERSECT",
                             flags_10ha_lake_junctions, "", "NEW_SELECTION")
    DM.SelectLayerByLocation("midvertices_lyr", "INTERSECT",
                             flags_10ha_lake_junctions, "", "SWITCH_SELECTION")
    DM.CopyFeatures("midvertices_lyr", non10vertices)

    # Get junctions that are not coincident with 10 hectare lake junctions.
    DM.SelectLayerByLocation("junction_lyr", "INTERSECT",
                             flags_10ha_lake_junctions, "", "NEW_SELECTION")
    DM.SelectLayerByLocation("junction_lyr", "INTERSECT",
                             flags_10ha_lake_junctions, "", "SWITCH_SELECTION")
    DM.CopyFeatures("junction_lyr", non10junctions)

    # Merge non10vertices with non10junctions
    DM.Merge([non10junctions, non10vertices],
             all_non_flag_points)  # inputs both point fc in_memory
    DM.MakeFeatureLayer(all_non_flag_points, "all_non_flag_points_lyr")

    # Tests the counts...for some reason I'm not getting stable behavior from the merge.
    mid_n = int(DM.GetCount(non10vertices).getOutput(0))
    jxn_n = int(DM.GetCount(non10junctions).getOutput(0))
    merge_n = int(DM.GetCount(all_non_flag_points).getOutput(0))
    if merge_n < mid_n + jxn_n:
        arcpy.AddWarning(
            "The total number of flags ({0}) is less than the sum of the input junctions ({1}) "
            "and input midpoints ({2})".format(merge_n, jxn_n, mid_n))

    # For tracing barriers, select all_non_flag_points points that intersect a 10 ha lake.
    DM.SelectLayerByLocation("all_non_flag_points_lyr", "INTERSECT",
                             csiwaterbody_10ha, XY_TOLERANCE, "NEW_SELECTION")
    DM.CopyFeatures("all_non_flag_points_lyr", barriers)

    # Trace1-Trace downstream to first barrier (junctions+midvertices in 10 ha lake) starting from flags_10ha_lake_junctions flag points.
    DM.TraceGeometricNetwork(network, "trace1", flags_10ha_lake_junctions,
                             "TRACE_DOWNSTREAM", barriers)

    # Save trace1 flowlines and junctions to layers on disk.
    DM.CopyFeatures("trace1\HYDRO_NET_Junctions",
                    trace1_junctions)  # extra for debugging
    DM.CopyFeatures("trace1\NHDFlowline", trace1_flowline)

    # Select vertice midpoints that intersect trace1 flowlines selection for new flags for trace2.
    DM.MakeFeatureLayer(non10vertices, "non10vertices_lyr")
    DM.SelectLayerByLocation("non10vertices_lyr", "INTERSECT", trace1_flowline,
                             "", "NEW_SELECTION")

    # Trace2-Trace downstream from midpoints of flowlines that intersect the selected flowlines from trace1.
    DM.TraceGeometricNetwork(network, "trace2", "non10vertices_lyr",
                             "TRACE_DOWNSTREAM")

    # Save trace1 flowlines and junctions to layers and then shapes on disk.
    DM.CopyFeatures("trace2\HYDRO_NET_Junctions", trace2_junctions)
    DM.CopyFeatures("trace2\NHDFlowline",
                    trace2_flowline)  # extra for debugging
    arcpy.AddMessage("Done tracing.")

    # Make shapefile for seepage lakes. (Ones that don't intersect flowlines)
    if exclude_intermit_flowlines:
        class_field_name = "Lake_Connectivity_Permanent"
    else:
        class_field_name = "Lake_Connectivity_Class"
    DM.AddField(temp_fc, class_field_name, "TEXT", field_length=13)
    DM.MakeFeatureLayer(temp_fc, "out_fc_lyr")
    DM.SelectLayerByLocation("out_fc_lyr", "INTERSECT", nhdflowline,
                             XY_TOLERANCE, "NEW_SELECTION")
    DM.SelectLayerByLocation("out_fc_lyr", "INTERSECT", nhdflowline, "",
                             "SWITCH_SELECTION")
    DM.CalculateField("out_fc_lyr", class_field_name, """'Isolated'""",
                      "PYTHON")

    # New type of "Isolated" classification, mostly for "permanent" but there were some oddballs in "maximum" too
    DM.SelectLayerByLocation("out_fc_lyr", "INTERSECT", startdangles,
                             XY_TOLERANCE, "NEW_SELECTION")
    DM.SelectLayerByLocation("out_fc_lyr", "INTERSECT", enddangles,
                             XY_TOLERANCE, "SUBSET_SELECTION")
    DM.CalculateField("out_fc_lyr", class_field_name, """'Isolated'""",
                      "PYTHON")

    # Get headwater lakes.
    DM.SelectLayerByLocation("out_fc_lyr", "INTERSECT", startdangles,
                             XY_TOLERANCE, "NEW_SELECTION")
    DM.SelectLayerByAttribute(
        "out_fc_lyr", "REMOVE_FROM_SELECTION",
        '''"{}" = 'Isolated' '''.format(class_field_name))
    DM.CalculateField("out_fc_lyr", class_field_name, """'Headwater'""",
                      "PYTHON")

    # Select csiwaterbody that intersect trace2junctions
    arcpy.AddMessage("Beginning connectivity attribution...")
    DM.SelectLayerByLocation("out_fc_lyr", "INTERSECT", trace2_junctions,
                             XY_TOLERANCE, "NEW_SELECTION")
    DM.CalculateField("out_fc_lyr", class_field_name, """'DrainageLk'""",
                      "PYTHON")

    # Get stream drainage lakes. Either unassigned so far or convert "Headwater" if a permanent stream flows into it,
    # which is detected with "non_artificial_end"
    DM.SelectLayerByAttribute("out_fc_lyr", "NEW_SELECTION",
                              '''"{}" IS NULL'''.format(class_field_name))
    DM.CalculateField("out_fc_lyr", class_field_name, """'Drainage'""",
                      "PYTHON")
    if exclude_intermit_flowlines:
        DM.SelectLayerByAttribute(
            "out_fc_lyr", "NEW_SELECTION",
            '''"{}" = 'Headwater' '''.format(class_field_name))
        DM.SelectLayerByLocation("out_fc_lyr", "INTERSECT", non_artificial_end,
                                 XY_TOLERANCE, "SUBSET_SELECTION")
        DM.CalculateField("out_fc_lyr", class_field_name, """'Drainage'""",
                          "PYTHON")

        # Prevent 'upgrades' due to very odd flow situations and artifacts of bad digitization. The effects of these
        # are varied--to avoid confusion, just keep the class  assigned with all flowlines

        # 1--Purely hypothetical, not seen in testing
        DM.SelectLayerByAttribute(
            "out_fc_lyr", "NEW_SELECTION",
            '''"Lake_Connectivity_Class" = 'Isolated' AND "Lake_Connectivity_Permanent" <> 'Isolated' '''
        )
        DM.CalculateField("out_fc_lyr", class_field_name, """'Isolated'""",
                          "PYTHON")

        # 2--Headwater to Drainage upgrade seen in testing with odd multi-inlet flow situation
        DM.SelectLayerByAttribute(
            "out_fc_lyr", "NEW_SELECTION",
            '''"Lake_Connectivity_Class" = 'Headwater' AND "Lake_Connectivity_Permanent" IN ('Drainage', 'DrainageLk') '''
        )
        DM.CalculateField("out_fc_lyr", class_field_name, """'Headwater'""",
                          "PYTHON")

        # 3--Drainage to DrainageLk upgrade seen in testing when intermittent stream segments were used
        # erroneously instead of artificial paths
        DM.SelectLayerByAttribute(
            "out_fc_lyr", "NEW_SELECTION",
            '''"Lake_Connectivity_Class" = 'Drainage' AND "Lake_Connectivity_Permanent" = 'DrainageLk' '''
        )
        DM.CalculateField("out_fc_lyr", class_field_name, """'Drainage'""",
                          "PYTHON")
        DM.SelectLayerByAttribute("out_fc_lyr", "CLEAR_SELECTION")

        # Add change flag for users
        DM.AddField(temp_fc,
                    "Lake_Connectivity_Fluctuates",
                    "Text",
                    field_length="1")
        flag_codeblock = """def flag_calculate(arg1, arg2):
            if arg1 == arg2:
                return 'N'
            else:
                return 'Y'"""
        expression = 'flag_calculate(!Lake_Connectivity_Class!, !Lake_Connectivity_Permanent!)'
        DM.CalculateField(temp_fc, "Lake_Connectivity_Fluctuates", expression,
                          "PYTHON", flag_codeblock)

    # Project output once done with both. Switching CRS earlier causes trace problems.
    if not exclude_intermit_flowlines:
        DM.CopyFeatures(temp_fc, out_feature_class)
    else:
        DM.Project(temp_fc, out_feature_class, arcpy.SpatialReference(102039))

    # Clean up
    if not debug_mode:
        for item in this_tool_layers + this_tool_temp:
            if arcpy.Exists(item):
                DM.Delete(item)

    if not debug_mode:
        DM.Delete("trace1")
        DM.Delete("trace2")
    arcpy.AddMessage("{} classification is complete.".format(class_field_name))
Ejemplo n.º 19
0
    sciNames[i['code']] = i['sciname']
commNames = {}
for i in lst:
    commNames[i['code']] = i['commname']
habitat_Use = {}
for i in lst:
    habitat_Use[i['code']] = i['Loc_Use']
# cycle through each zone raster
for i in range(len(polZ)):
    print("working on " + polZ[i])
    # don't assume i is the class level -- extract class here
    classLevel = polZ[i][-1:]
    curZo = "zon_Joined_final_C" + classLevel
    fldCodeList = [f.name for f in arcpy.ListFields(curZo, "codeList", "Text")]
    if not fldCodeList:
        man.AddField(curZo, "codeList", "TEXT", "", "", "500")
    fldSciNmList = [
        f.name for f in arcpy.ListFields(curZo, "SciNames", "Text")
    ]
    if not fldSciNmList:
        man.AddField(curZo, "SciNames", "TEXT", "", "", "550")
    fldCmNmList = [
        f.name for f in arcpy.ListFields(curZo, "CommNames", "Text")
    ]
    if not fldCmNmList:
        man.AddField(curZo, "CommNames", "TEXT", "", "", "500")
    fldHabUseList = [
        f.name for f in arcpy.ListFields(curZo, "HabitatUse", "Text")
    ]
    if not fldHabUseList:
        man.AddField(curZo, "HabitatUse", "TEXT", "", "", "500")
Ejemplo n.º 20
0
start = 0
#####

# loop through all binary (0/1) grids, build the hypergrid
# with info stored in a single text column
for i in range(start, len(codeL)):
    elem = codeL[i]
    rasName = elem + "_c.tif"
    if rasName in rasL:
        if i == 0:
            inRas = inPath + "/" + rasName
            curHyp = wrk + "/hyp" + str(i)
            print("working on " + rasName)
            man.CopyRaster(inRas, curHyp)
            man.BuildRasterAttributeTable(curHyp)
            man.AddField(curHyp, "spp0", "TEXT", "", "", 251)
            man.AddField(curHyp, "temp", "SHORT", 1)
            expr = "str( !Value! )"
            man.CalculateField(curHyp, "spp0", expr, "PYTHON")
        else:
            iminus = i - 1
            prevHyp = wrk + "/hyp" + str(iminus)
            print("working on " + elem + ", " + str(i) + " of " + str(listLen))
            curHyp = Combine([prevHyp, rasName])
            curHyp.save(wrk + "/hyp" + str(i))
            man.AddField(curHyp, "spp0", "TEXT", "", "", 251)
            jval = "hyp" + str(iminus)
            man.JoinField(curHyp, jval, prevHyp, "VALUE", ["spp0"])
            rasNoDot = rasName[0:rasName.find(".")]
            newCol = rasNoDot[0:11].upper()
            expr = "str(!spp0_1!) + str(!" + newCol + "!)"
Ejemplo n.º 21
0
def flatten_overlaps(zone_fc,
                     zone_field,
                     output_fc,
                     output_table,
                     cluster_tolerance=' 3 Meters'):
    orig_env = arcpy.env.workspace
    arcpy.env.workspace = 'in_memory'

    objectid = [f.name for f in arcpy.ListFields(zone_fc)
                if f.type == 'OID'][0]
    zone_type = [f.type for f in arcpy.ListFields(zone_fc, zone_field)][0]
    fid1 = 'FID_{}'.format(os.path.basename(zone_fc))
    flat_zoneid = 'flat{}'.format(zone_field)
    flat_zoneid_prefix = 'flat{}_'.format(zone_field.replace('_zoneid', ''))

    # Union with FID_Only (A)
    arcpy.AddMessage("Splitting overlaps in polygons...")
    zoneid_dict = {
        r[0]: r[1]
        for r in arcpy.da.SearchCursor(zone_fc, [objectid, zone_field])
    }
    self_union = AN.Union([zone_fc],
                          'self_union',
                          'ONLY_FID',
                          cluster_tolerance=cluster_tolerance)

    # #If you don't run this section, Find Identical fails with error 999999. Seems to have to do with small slivers
    # #having 3 vertices and/or only circular arcs in the geometry.
    arcpy.AddMessage("Repairing self-union geometries...")
    # DM.AddGeometryAttributes(self_union, 'POINT_COUNT; AREA')
    # union_fix = DM.MakeFeatureLayer(self_union, 'union_fix', where_clause='PNT_COUNT <= 10 OR POLY_AREA < 5000')
    # arcpy.Densify_edit(union_fix, 'DISTANCE', distance = '1 Meters', max_deviation='1 Meters')  # selection ON, edits self_union disk
    DM.RepairGeometry(
        self_union, 'DELETE_NULL'
    )  # eliminate empty geoms. selection ON, edits self_union disk
    # for field in ['PNT_COUNT', 'POLY_AREA']:
    #     DM.DeleteField(self_union, field)

    # Find Identical by Shape (B)
    if arcpy.Exists('identical_shapes'):
        DM.Delete(
            'identical_shapes'
        )  # causes failure in FindIdentical even when overwrite is allowed
    identical_shapes = DM.FindIdentical(self_union, 'identical_shapes',
                                        'Shape')

    # Join A to B and calc flat[zone]_zoneid = FEAT_SEQ (C)
    DM.AddField(self_union, flat_zoneid, 'TEXT', field_length=20)
    union_oid = [
        f.name for f in arcpy.ListFields(self_union) if f.type == 'OID'
    ][0]
    identical_shapes_dict = {
        r[0]: r[1]
        for r in arcpy.da.SearchCursor(identical_shapes,
                                       ['IN_FID', 'FEAT_SEQ'])
    }
    with arcpy.da.UpdateCursor(self_union,
                               [union_oid, flat_zoneid]) as u_cursor:
        for row in u_cursor:
            row[1] = '{}{}'.format(flat_zoneid_prefix,
                                   identical_shapes_dict[row[0]])
            u_cursor.updateRow(row)

    # Add the original zone ids and save to table (E)
    arcpy.AddMessage("Assigning temporary IDs to split polygons...")
    unflat_table = DM.CopyRows(self_union, 'unflat_table')
    DM.AddField(unflat_table, zone_field,
                zone_type)  # default text length of 50 is fine if needed
    with arcpy.da.UpdateCursor(unflat_table, [fid1, zone_field]) as u_cursor:
        for row in u_cursor:
            row[1] = zoneid_dict[row[0]]  # assign zone id
            u_cursor.updateRow(row)

    # Delete Identical (C) (save as flat[zone])
    with arcpy.da.UpdateCursor(self_union, 'OID@') as cursor:
        visited = []
        for row in cursor:
            feat_seq = identical_shapes_dict[row[0]]
            if feat_seq in visited:
                cursor.deleteRow()
            visited.append(feat_seq)

    DM.DeleteField(self_union, fid1)
    DM.DeleteField(unflat_table, fid1)

    # save outputs
    output_fc = DM.CopyFeatures(self_union, output_fc)
    output_table = DM.CopyRows(unflat_table, output_table)

    # cleanup
    for item in [self_union, identical_shapes, unflat_table]:
        DM.Delete(item)
    arcpy.env.workspace = orig_env

    return output_fc
Ejemplo n.º 22
0
    def unflatten(intermediate_table):
        flat_zoneid = zone_field
        unflat_zoneid = zone_field.replace('flat', '')
        zone_type = [f.type for f in arcpy.ListFields(zone_fc, flat_zoneid)][0]
        # Set up the output table (can't do this until the prior tool is run)
        # if os.path.dirname(out_table):
        #     out_path = os.path.dirname(out_table)
        # else:
        #     out_path = orig_env

        unflat_result = DM.CreateTable('in_memory',
                                       os.path.basename(out_table))

        # get the fields to add to the table
        editable_fields = [
            f for f in arcpy.ListFields(intermediate_table)
            if f.editable and f.name.lower() != flat_zoneid.lower()
        ]

        # populate the new table schema
        DM.AddField(unflat_result, unflat_zoneid, zone_type)
        for f in editable_fields:
            DM.AddField(unflat_result, f.name, f.type, field_length=f.length)

        # map original zone ids to new zone ids
        original_flat = defaultdict(list)
        with arcpy.da.SearchCursor(unflat_table,
                                   [unflat_zoneid, flat_zoneid]) as cursor:
            for row in cursor:
                if row[1] not in original_flat[row[0]]:
                    original_flat[row[0]].append(row[1])

        # Use CELL_COUNT as weight for means to calculate final values for each zone.
        fixed_fields = [
            unflat_zoneid, 'ORIGINAL_COUNT', 'CELL_COUNT', 'datacoveragepct'
        ]
        other_field_names = [
            f.name for f in editable_fields if f.name not in fixed_fields
        ]
        i_cursor = arcpy.da.InsertCursor(
            unflat_result,
            fixed_fields + other_field_names)  # open output table cursor
        flat_stats = {
            r[0]: r[1:]
            for r in arcpy.da.SearchCursor(intermediate_table, [
                flat_zoneid, 'ORIGINAL_COUNT', 'CELL_COUNT', 'datacoveragepct'
            ] + other_field_names)
        }

        count_diff = 0
        for zid, unflat_ids in original_flat.items():
            valid_unflat_ids = [id for id in unflat_ids if id in flat_stats
                                ]  # skip flatpolys not rasterized
            area_vec = [flat_stats[id][0] for id in valid_unflat_ids
                        ]  # ORIGINAL_COUNT specified in 0 index earlier
            cell_vec = [flat_stats[id][1] for id in valid_unflat_ids]
            coverage_vec = [flat_stats[id][2] for id in valid_unflat_ids
                            ]  # datacoveragepct special handling
            stat_vectors_by_id = [
                flat_stats[id][3:] for id in valid_unflat_ids
            ]  # "the rest", list of lists

            # calc the new summarized values
            original_count = sum(
                filter(None, area_vec)
            )  # None area is functionally equivalent to 0, all Nones = 0 too
            cell_count = sum(filter(None, cell_vec))
            if cell_count > 0:
                weighted_coverage = sum(
                    [a * b
                     for a, b in zip(area_vec, coverage_vec)]) / original_count

                # this calculation accounts for fractional missing values, both kinds (whole zone is no data, or zone
                # was missing some data and had data coverage % < 100). This is done by converting None to 0
                # and by using the cell_count (count of cells with data present)
                # instead of the full zone original_count. You have to do both or the mean will be distorted.
                # hand-verification that this works as intended using test GIS data on was completed 2019-11-01 by NJS
                crossprods = []
                for i in range(0, len(valid_unflat_ids)):
                    crossprods.append([
                        cell_vec[i] * float(s or 0)
                        for s in stat_vectors_by_id[i]
                    ])

                weighted_stat_means = []
                for i in range(0, len(other_field_names)):
                    weighted_stat_means.append(
                        sum(zip(*crossprods)[i]) / cell_count)
            else:
                weighted_coverage = 0
                weighted_stat_means = [None] * len(other_field_names)
                count_diff += 1

            new_row = [zid, original_count, cell_count, weighted_coverage
                       ] + weighted_stat_means
            i_cursor.insertRow(new_row)
        del i_cursor

        DM.Delete(intermediate_table)

        return [unflat_result, count_diff]
Ejemplo n.º 23
0
    def basinAve(aoiBasin, pmpField):
        pmpPoints = env.scratchGDB + "\\PMP_Points"  # Path of 'PMP_Points' scratch feature class
        if weightedAve:
            arcpy.AddMessage("\tCalculating basin average for " + pmpField +
                             "(weighted)...")
            vectorGridClip = env.scratchGDB + "\\VectorGridClip"  # Path of 'PMP_Points' scratch feature class
            sumstats = env.scratchGDB + "\\SummaryStats"

            dm.MakeFeatureLayer(
                home + "\\Input\Non_Storm_Data.gdb\\Vector_Grid",
                "vgLayer")  # make a feature layer of vector grid cells
            dm.SelectLayerByLocation(
                "vgLayer", "INTERSECT", aoiBasin
            )  # select the vector grid cells that intersect the aoiBasin polygon

            an.Clip("vgLayer", aoiBasin,
                    vectorGridClip)  # clips aoi vector grid to basin
            dm.AddField(
                pmpPoints, "WEIGHT", "DOUBLE"
            )  # adds 'WEIGHT' field to PMP_Points scratch feature class
            dm.MakeFeatureLayer(
                vectorGridClip, "vgClipLayer"
            )  # make a feature layer of basin clipped vector grid cells
            dm.MakeFeatureLayer(
                pmpPoints, "pmpPointsLayer"
            )  # make a feature layer of PMP_Points feature class

            dm.AddJoin("pmpPointsLayer", "ID", "vgClipLayer",
                       "ID")  # joins PMP_Points and vectorGridBasin tables
            dm.CalculateField(
                "pmpPointsLayer", "WEIGHT", "!vectorGridClip.Shape_Area!",
                "PYTHON_9.3"
            )  # Calculates basin area proportion to use as weight for each grid cell.
            dm.RemoveJoin("pmpPointsLayer", "vectorGridClip")

            an.Statistics(pmpPoints, sumstats, [["WEIGHT", "SUM"]], "")
            stats = arcpy.SearchCursor(sumstats)
            pmpWgtAve = pmpField + "_WgtAve"

            for row in stats:
                calc = row.getValue("SUM_WEIGHT")
                express = "(!WEIGHT!/{})* !{}!".format(calc, pmpField)
                i = 0
                for field in arcpy.ListFields(pmpPoints, pmpField):
                    dm.AddField(pmpPoints, pmpWgtAve, "DOUBLE", 2)
                    dm.CalculateField(pmpPoints, pmpWgtAve, express,
                                      "PYTHON_9.3")
                    i += 1
                del stats, row

            an.Statistics(pmpPoints, sumstats, [[pmpWgtAve, "SUM"]], "")
            sumwgtave = "SUM_" + pmpWgtAve
            with arcpy.da.SearchCursor(sumstats, sumwgtave) as stats:
                for row in stats:
                    wgtAve = row[0]
                    return round(wgtAve, 2)

##            na = arcpy.da.TableToNumPyArray(pmpPoints,(pmpField, 'WEIGHT'))                                 # Assign pmpPoints values and weights to Numpy array (na)
##            wgtAve = numpy.average(na[pmpField], weights=na['WEIGHT'])                                         # Calculate weighted average with Numpy average
##            del na
##            return round(wgtAve, 2)

        else:
            arcpy.AddMessage("\tCalculating basin average for " + pmpField +
                             "(not weighted)...")
            sumstats = env.scratchGDB + "\\SummaryStats"
            an.Statistics(pmpPoints, sumstats, [[pmpField, "MEAN"]], "")
            mean = "MEAN_" + pmpField
            with arcpy.da.SearchCursor(sumstats, mean) as stats:
                for row in stats:
                    fieldAve = row[0]
                    return round(fieldAve, 2)
Ejemplo n.º 24
0
    def stats_area_table(zone_fc=zone_fc,
                         zone_field=zone_field,
                         in_value_raster=in_value_raster,
                         out_table=out_table,
                         is_thematic=is_thematic):
        def refine_zonal_output(t):
            """Makes a nicer output for this tool. Rename some fields, drop unwanted
                ones, calculate percentages using raster AREA before deleting that
                field."""
            if is_thematic:
                value_fields = arcpy.ListFields(t, "VALUE*")
                pct_fields = [
                    '{}_pct'.format(f.name) for f in value_fields
                ]  # VALUE_41_pct, etc. Field can't start with number.

                # add all the new fields needed
                for f, pct_field in zip(value_fields, pct_fields):
                    arcpy.AddField_management(t, pct_field, f.type)

                # calculate the percents
                cursor_fields = ['AREA'] + [f.name
                                            for f in value_fields] + pct_fields
                uCursor = arcpy.da.UpdateCursor(t, cursor_fields)
                for uRow in uCursor:
                    # unpacks area + 3 tuples of the right fields for each, no matter how many there are
                    vf_i_end = len(value_fields) + 1
                    pf_i_end = vf_i_end + len(pct_fields)

                    # pct_values and ha_values are both null at this point but unpack for clarity
                    area, value_values, pct_values = uRow[0], uRow[
                        1:vf_i_end], uRow[vf_i_end:pf_i_end]
                    new_pct_values = [100 * vv / area for vv in value_values]
                    new_row = [area] + value_values + new_pct_values
                    uCursor.updateRow(new_row)

                for vf in value_fields:
                    arcpy.DeleteField_management(t, vf.name)

            arcpy.AlterField_management(t, 'COUNT', 'CELL_COUNT')
            drop_fields = ['ZONE_CODE', 'COUNT', 'AREA']
            if not debug_mode:
                for df in drop_fields:
                    try:
                        arcpy.DeleteField_management(t, df)
                    except:
                        continue

        # Set up environments for alignment between zone raster and theme raster
        if isinstance(zone_fc, arcpy.Result):
            zone_fc = zone_fc.getOutput(0)
        this_files_dir = os.path.dirname(os.path.abspath(__file__))
        os.chdir(this_files_dir)
        common_grid = os.path.abspath('../common_grid.tif')
        env.snapRaster = common_grid
        env.cellSize = common_grid
        env.extent = zone_fc

        zone_desc = arcpy.Describe(zone_fc)
        zone_raster = 'convertraster'
        if zone_desc.dataType not in ['RasterDataset', 'RasterLayer']:
            zone_raster = arcpy.PolygonToRaster_conversion(
                zone_fc,
                zone_field,
                zone_raster,
                'CELL_CENTER',
                cellsize=env.cellSize)
            print('cell size is {}'.format(env.cellSize))
            zone_size = int(env.cellSize)
        else:
            zone_raster = zone_fc
            zone_size = min(
                arcpy.Describe(zone_raster).meanCellHeight,
                arcpy.Describe(zone_raster).meanCellWidth)
            raster_size = min(
                arcpy.Describe(in_value_raster).meanCellHeight,
                arcpy.Describe(in_value_raster).meanCellWidth)
            env.cellSize = min([zone_size, raster_size])
            print('cell size is {}'.format(env.cellSize))

        # I tested and there is no need to resample the raster being summarized. It will be resampled correctly
        # internally in the following tool given that the necessary environments are set above (cell size, snap).
        # # in_value_raster = arcpy.Resample_management(in_value_raster, 'in_value_raster_resampled', CELL_SIZE)
        if not is_thematic:
            arcpy.AddMessage("Calculating Zonal Statistics...")
            temp_entire_table = arcpy.sa.ZonalStatisticsAsTable(
                zone_raster, zone_field, in_value_raster, 'temp_zonal_table',
                'DATA', 'MEAN')

        if is_thematic:
            # for some reason env.cellSize doesn't work
            # calculate/doit
            arcpy.AddMessage("Tabulating areas...")
            temp_entire_table = arcpy.sa.TabulateArea(
                zone_raster,
                zone_field,
                in_value_raster,
                'Value',
                'temp_area_table',
                processing_cell_size=env.cellSize)
            # TabulateArea capitalizes the zone for some annoying reason and ArcGIS is case-insensitive to field names
            # so we have this work-around:
            zone_field_t = '{}_t'.format(zone_field)
            DM.AddField(temp_entire_table,
                        zone_field_t,
                        'TEXT',
                        field_length=20)
            expr = '!{}!'.format(zone_field.upper())
            DM.CalculateField(temp_entire_table, zone_field_t, expr, 'PYTHON')
            DM.DeleteField(temp_entire_table, zone_field.upper())
            DM.AlterField(temp_entire_table,
                          zone_field_t,
                          zone_field,
                          clear_field_alias=True)

            # replaces join to Zonal Stats in previous versions of tool
            # no joining, just calculate the area/count from what's produced by TabulateArea
            arcpy.AddField_management(temp_entire_table, 'AREA', 'DOUBLE')
            arcpy.AddField_management(temp_entire_table, 'COUNT', 'DOUBLE')

            cursor_fields = ['AREA', 'COUNT']
            value_fields = [
                f.name for f in arcpy.ListFields(temp_entire_table, 'VALUE*')
            ]
            cursor_fields.extend(value_fields)
            with arcpy.da.UpdateCursor(temp_entire_table,
                                       cursor_fields) as uCursor:
                for uRow in uCursor:
                    area, count, value_fields = uRow[0], uRow[1], uRow[2:]
                    area = sum(value_fields)
                    count = round(
                        area / (int(env.cellSize) * int(env.cellSize)), 0)
                    new_row = [area, count] + value_fields
                    uCursor.updateRow(new_row)

        arcpy.AddMessage("Refining output table...")

        arcpy.AddField_management(temp_entire_table, 'datacoveragepct',
                                  'DOUBLE')
        arcpy.AddField_management(temp_entire_table, 'ORIGINAL_COUNT', 'LONG')

        # calculate datacoveragepct by comparing to original areas in zone raster
        # alternative to using JoinField, which is prohibitively slow if zones exceed hu12 count
        zone_raster_dict = {
            row[0]: row[1]
            for row in arcpy.da.SearchCursor(zone_raster,
                                             [zone_field, 'Count'])
        }
        temp_entire_table_dict = {
            row[0]: row[1]
            for row in arcpy.da.SearchCursor(temp_entire_table,
                                             [zone_field, 'COUNT'])
        }

        sum_cell_area = float(env.cellSize) * float(env.cellSize)
        orig_cell_area = zone_size * zone_size

        with arcpy.da.UpdateCursor(
                temp_entire_table,
            [zone_field, 'datacoveragepct', 'ORIGINAL_COUNT']) as cursor:
            for uRow in cursor:
                key_value, data_pct, count_orig = uRow
                count_orig = zone_raster_dict[key_value]
                if key_value in temp_entire_table_dict:
                    count_summarized = temp_entire_table_dict[key_value]
                    data_pct = 100 * float((count_summarized * sum_cell_area) /
                                           (count_orig * orig_cell_area))
                else:
                    data_pct = None
                cursor.updateRow((key_value, data_pct, count_orig))

        # Refine the output
        refine_zonal_output(temp_entire_table)

        # in order to add vector capabilities back, need to do something with this
        # right now we just can't fill in polygon zones that didn't convert to raster in our system
        stats_result = cu.one_in_one_out(temp_entire_table, zone_fc,
                                         zone_field, out_table)

        # Convert "datacoveragepct" and "ORIGINAL_COUNT" values to 0 for zones with no metrics calculated
        with arcpy.da.UpdateCursor(
                out_table,
            [zone_field, 'datacoveragepct', 'ORIGINAL_COUNT', 'CELL_COUNT'
             ]) as u_cursor:
            for row in u_cursor:
                # data_coverage pct to 0
                if row[1] is None:
                    row[1] = 0
                # original count filled in if a) zone outside raster bounds or b) zone too small to be rasterized
                if row[2] is None:
                    if row[0] in zone_raster_dict:
                        row[2] = zone_raster_dict[row[0]]
                    else:
                        row[2] = 0
                # cell count set to 0
                if row[3] is None:
                    row[3] = 0
                u_cursor.updateRow(row)

        # count whether all zones got an output record or not)
        out_count = int(
            arcpy.GetCount_management(temp_entire_table).getOutput(0))
        in_count = int(arcpy.GetCount_management(zone_fc).getOutput(0))
        count_diff = in_count - out_count

        # cleanup
        if not debug_mode:
            for item in [
                    'temp_zonal_table', temp_entire_table, 'convertraster'
            ]:  # don't add zone_raster, orig
                arcpy.Delete_management(item)
        arcpy.ResetEnvironments()
        env.workspace = orig_env  # hope this prevents problems using list of FCs from workspace as batch
        arcpy.CheckInExtension("Spatial")

        return [stats_result, count_diff]
Ejemplo n.º 25
0
def fast_join(fc_target, fc_target_keyfield, fc_join, fc_join_keyfield,
              fields_to_join):

    start_time = perf_counter()

    # make field dict for join fc fields {fname: [dtype, len]}
    jfields_names = [f.name for f in ListFields(fc_join)]
    jfields_dtypes = [f.type for f in ListFields(fc_join)]
    jfields_len = [f.length for f in ListFields(fc_join)]
    dts_lens = [[type, len] for type, len in zip(jfields_dtypes, jfields_len)]
    jfields_dict = dict(zip(jfields_names, dts_lens))

    # field names in the target fc
    target_start_fields = [f.name for f in ListFields(fc_target)]

    # as needed, add field(s) to target FC if it doesn't already exist.
    print(f"Adding fields {fields_to_join} to target table {fc_target}...")
    import pdb
    pdb.set_trace()
    for jfield in fields_to_join:
        if jfield not in target_start_fields:
            ftype = jfields_dict[jfield][0]
            flen = jfields_dict[jfield][1]

            management.AddField(in_table=fc_target,
                                field_name=jfield,
                                field_type=ftype,
                                field_length=flen)
        else:
            print(
                f"\t{jfield} already in {fc_target}'s fields. Will be OVERWRITTEN with joined data..."
            )

    cur_fields = [fc_target_keyfield] + fields_to_join

    join_dict = {}
    print("reading data from join table...")
    with SearchCursor(fc_join, cur_fields) as scur:
        for row in scur:
            jkey = row[cur_fields.index(fc_join_keyfield)]
            vals_to_join = [
                row[cur_fields.index(fname)] for fname in fields_to_join
            ]
            join_dict[jkey] = vals_to_join

    print("writing join data to target table...")
    with UpdateCursor(fc_target, cur_fields) as ucur:
        for row in ucur:
            jkey = row[cur_fields.index(fc_join_keyfield)]

            # if a join id value is in the target table but not the join table,
            # skip the join. The values in the resulting joined column will be null for these cases.
            if join_dict.get(jkey):
                vals_to_join = join_dict[jkey]
            else:
                continue

            row_out = [jkey] + vals_to_join
            row = row_out
            ucur.updateRow(row)

    elapsed_sec = round(perf_counter() - start_time, 1)

    print(f"Successfully joined fields {fields_to_join} from {fc_join} onto {fc_target}" \
        f" in {elapsed_sec} seconds!")
Ejemplo n.º 26
0
def process_zone(zone_fc, output, zone_name, zone_id_field, zone_name_field,
                 other_keep_fields, clip_hu8, lagosne_name):
    # dissolve fields by the field that zone_id is based on (the field that identifies a unique zone)
    dissolve_fields = [
        f for f in "{}, {}, {}".format(zone_id_field, zone_name_field,
                                       other_keep_fields).split(', ')
        if f != ''
    ]
    print("Dissolving...")
    dissolve1 = DM.Dissolve(zone_fc, 'dissolve1', dissolve_fields)

    # update name field to match our standard
    DM.AlterField(dissolve1, zone_name_field, 'name')

    # original area

    DM.AddField(dissolve1, 'originalarea', 'DOUBLE')
    DM.CalculateField(dissolve1, 'originalarea', '!shape.area@hectares!',
                      'PYTHON')

    #clip
    print("Clipping...")
    clip = AN.Clip(dissolve1, MASTER_CLIPPING_POLY, 'clip')
    if clip_hu8 == 'Y':
        final_clip = AN.Clip(clip, HU8_OUTPUT, 'final_clip')
    else:
        final_clip = clip

    print("Selecting...")
    # calc new area, orig area pct, compactness
    DM.AddField(final_clip, 'area_ha', 'DOUBLE')
    DM.AddField(final_clip, 'originalarea_pct', 'DOUBLE')
    DM.AddField(final_clip, 'compactness', 'DOUBLE')
    DM.JoinField(final_clip, zone_id_field, dissolve1, zone_id_field,
                 'originalarea_pct')

    uCursor_fields = [
        'area_ha', 'originalarea_pct', 'originalarea', 'compactness',
        'SHAPE@AREA', 'SHAPE@LENGTH'
    ]
    with arcpy.da.UpdateCursor(final_clip, uCursor_fields) as uCursor:
        for row in uCursor:
            area, orig_area_pct, orig_area, comp, shape_area, shape_length = row
            area = shape_area / 10000  # convert from m2 to hectares
            orig_area_pct = round(100 * area / orig_area, 2)
            comp = 4 * 3.14159 * shape_area / (shape_length**2)
            row = (area, orig_area_pct, orig_area, comp, shape_area,
                   shape_length)
            uCursor.updateRow(row)

    # if zones are present with <5% of original area and a compactness measure of <.2 (ranges from 0-1)
    # AND ALSO they are no bigger than 500 sq. km. (saves Chippewa County and a WWF), filter out
    # save eliminated polygons to temp database as a separate layer for inspection

    # Different processing for HU4 and HU8, so that they match the extent of HU8 more closely but still throw out tiny slivers
    # County also only eliminated if a tiny, tiny, tiny sliver (so: none should be eliminated)
    if zone_name not in ('hu4', 'hu12', 'county'):
        selected = AN.Select(
            final_clip, 'selected',
            "originalarea_pct >= 5 OR compactness >= .2 OR area_ha > 50000")
        not_selected = AN.Select(
            final_clip, '{}_not_selected'.format(output),
            "originalarea_pct < 5 AND compactness < .2 AND area_ha < 50000")

    else:
        selected = final_clip
    # eliminate small slivers, re-calc area fields, add perimeter and multipart flag
    # leaves the occasional errant sliver but some areas over 25 hectares are more valid so this is
    # CONSERVATIVE
    print("Trimming...")
    trimmed = DM.EliminatePolygonPart(selected,
                                      'trimmed',
                                      'AREA',
                                      '25 Hectares',
                                      part_option='ANY')

    # gather up a few calculations into one cursor because this is taking too long over the HU12 layer
    DM.AddField(trimmed, 'perimeter_m', 'DOUBLE')
    DM.AddField(trimmed, 'multipart', 'TEXT', field_length=1)
    uCursor_fields = [
        'area_ha', 'originalarea_pct', 'originalarea', 'perimeter_m',
        'multipart', 'SHAPE@'
    ]
    with arcpy.da.UpdateCursor(trimmed, uCursor_fields) as uCursor:
        for row in uCursor:
            area, orig_area_pct, orig_area, perim, multipart, shape = row
            area = shape.area / 10000  # convert to hectares from m2
            orig_area_pct = round(100 * area / orig_area, 2)
            perim = shape.length

            # multipart flag calc
            if shape.isMultipart:
                multipart = 'Y'
            else:
                multipart = 'N'
            row = (area, orig_area_pct, orig_area, perim, multipart, shape)
            uCursor.updateRow(row)

    # delete intermediate fields
    DM.DeleteField(trimmed, 'compactness')
    DM.DeleteField(trimmed, 'originalarea')

    print("Zone IDs....")
    # link to LAGOS-NE zone IDs
    DM.AddField(trimmed, 'zoneid', 'TEXT', field_length=40)
    trimmed_lyr = DM.MakeFeatureLayer(trimmed, 'trimmed_lyr')
    if lagosne_name:
        # join to the old master GDB path on the same master field and copy in the ids
        old_fc = os.path.join(LAGOSNE_GDB, lagosne_name)
        old_fc_lyr = DM.MakeFeatureLayer(old_fc, 'old_fc_lyr')
        if lagosne_name == 'STATE' or lagosne_name == 'COUNTY':
            DM.AddJoin(trimmed_lyr, zone_id_field, old_fc_lyr, 'FIPS')
        else:
            DM.AddJoin(trimmed_lyr, zone_id_field, old_fc_lyr,
                       zone_id_field)  # usually works because same source data

        # copy
        DM.CalculateField(trimmed_lyr, 'zoneid',
                          '!{}.ZoneID!.lower()'.format(lagosne_name), 'PYTHON')
        DM.RemoveJoin(trimmed_lyr)

    # generate new zone ids
    old_ids = [row[0] for row in arcpy.da.SearchCursor(trimmed, 'zoneid')]
    with arcpy.da.UpdateCursor(trimmed, 'zoneid') as cursor:
        counter = 1
        for row in cursor:
            if not row[
                    0]:  # if no existing ID borrowed from LAGOS-NE, assign a new one
                new_id = '{name}_{num}'.format(name=zone_name, num=counter)

                # ensures new ids don't re-use old numbers but fills in all positive numbers eventually
                while new_id in old_ids:
                    counter += 1
                    new_id = '{name}_{num}'.format(name=zone_name, num=counter)
                row[0] = new_id
                cursor.updateRow(row)
                counter += 1

    print("Edge flags...")
    # add flag fields
    DM.AddField(trimmed, 'onlandborder', 'TEXT', field_length=2)
    DM.AddField(trimmed, 'oncoast', 'TEXT', field_length=2)

    # identify border zones
    border_lyr = DM.MakeFeatureLayer(LAND_BORDER, 'border_lyr')
    DM.SelectLayerByLocation(trimmed_lyr, 'INTERSECT', border_lyr)
    DM.CalculateField(trimmed_lyr, 'onlandborder', "'Y'", 'PYTHON')
    DM.SelectLayerByAttribute(trimmed_lyr, 'SWITCH_SELECTION')
    DM.CalculateField(trimmed_lyr, 'onlandborder', "'N'", 'PYTHON')

    # identify coastal zones
    coastal_lyr = DM.MakeFeatureLayer(COASTLINE, 'coastal_lyr')
    DM.SelectLayerByLocation(trimmed_lyr, 'INTERSECT', coastal_lyr)
    DM.CalculateField(trimmed_lyr, 'oncoast', "'Y'", 'PYTHON')
    DM.SelectLayerByAttribute(trimmed_lyr, 'SWITCH_SELECTION')
    DM.CalculateField(trimmed_lyr, 'oncoast', "'N'", 'PYTHON')

    print("State assignment...")
    # State?
    DM.AddField(trimmed, "state", 'text', field_length='2')
    state_center = arcpy.SpatialJoin_analysis(
        trimmed,
        STATE_FC,
        'state_center',
        join_type='KEEP_COMMON',
        match_option='HAVE_THEIR_CENTER_IN')
    state_intersect = arcpy.SpatialJoin_analysis(trimmed,
                                                 STATE_FC,
                                                 'state_intersect',
                                                 match_option='INTERSECT')
    state_center_dict = {
        row[0]: row[1]
        for row in arcpy.da.SearchCursor(state_center, ['ZoneID', 'STUSPS'])
    }
    state_intersect_dict = {
        row[0]: row[1]
        for row in arcpy.da.SearchCursor(state_intersect, ['ZoneID', 'STUSPS'])
    }
    with arcpy.da.UpdateCursor(trimmed, ['ZoneID', 'state']) as cursor:
        for updateRow in cursor:
            keyValue = updateRow[0]
            if keyValue in state_center_dict:
                updateRow[1] = state_center_dict[keyValue]
            else:
                updateRow[1] = state_intersect_dict[keyValue]
            cursor.updateRow(updateRow)

    # glaciation status?
    # TODO as version 0.6

    # preface the names with the zones
    DM.DeleteField(trimmed, 'ORIG_FID')
    fields = [
        f.name for f in arcpy.ListFields(trimmed, '*')
        if f.type not in ('OID',
                          'Geometry') and not f.name.startswith('Shape_')
    ]
    for f in fields:
        new_fname = '{zn}_{orig}'.format(zn=zone_name, orig=f).lower()
        try:
            DM.AlterField(trimmed, f, new_fname, clear_field_alias='TRUE')
        # sick of debugging the required field message-I don't want to change required fields anyway
        except:
            pass

    DM.CopyFeatures(trimmed, output)

    # cleanup
    lyr_objects = [
        lyr_object for var_name, lyr_object in locals().items()
        if var_name.endswith('lyr')
    ]
    temp_fcs = arcpy.ListFeatureClasses('*')
    for l in lyr_objects + temp_fcs:
        DM.Delete(l)
Ejemplo n.º 27
0
#     # make a dict from combining the INTERSECT and CLOSEST results
#     zone_dict = {r[0]:r[1] for r in arcpy.da.SearchCursor(join_fc, ['lagoslakeid', zoneid1])}
#     for k, v in zone_dict.items():
#         if not v:
#             zone_dict[k] = update_vals[k]
#
#     with arcpy.da.UpdateCursor(lakes_fc, ['lagoslakeid', zoneid]) as u_cursor:
#         for row in u_cursor:
#             row[1] = zone_dict[row[0]]
#             u_cursor.updateRow(row)
#
#     DM.Delete('in_memory')

# update the main lakes layer
lakes_poly = r'D:\Continental_Limnology\Data_Working\LAGOS_US_GIS_Data_v0.6.gdb\Lakes\LAGOS_US_All_Lakes_1ha'
lakes_point = r'D:\Continental_Limnology\Data_Working\LAGOS_US_GIS_Data_v0.6.gdb\Lakes\LAGOS_US_All_Lakes_1ha_points'

#zones = ['hu12', 'hu8', 'hu4', 'county', 'state', 'epanutr4', 'wwf', 'mlra', 'bailey', 'neon']
zones = ['omernik3', 'epanutr']
zoneids = ['{}_zoneid'.format(z) for z in zones]

point_dict = {r[0]:r[1:] for r in arcpy.da.SearchCursor(lakes_point, ['lagoslakeid'] + zoneids)}

for z in zoneids:
    if not arcpy.ListFields(lakes_poly, z):
        DM.AddField(lakes_poly, z, 'TEXT', field_length=20)
with arcpy.da.UpdateCursor(lakes_poly, ['lagoslakeid'] + zoneids) as u_cursor:
    for row in u_cursor:
        row[1:] = point_dict[row[0]]
        u_cursor.updateRow(row)
Ejemplo n.º 28
0
# cycle through each zone raster
for i in range(len(hypZ)):
    print("working on " + hypZ[i])
    # don't assume i is the class level -- extract class here
    classLevel = hypZ[i][-1:]
    curZo = wrk + "/zon_C" + classLevel
    # cycle through each edm
    for j in range(len(rasL)):
        if j == 0:
            inRas = inPath + "/" + rasL[j] + "_c.tif"
            curZoT_out = wrk + "/zonTab_C" + str(i) + "_" + str(j)
            print(".. zoning " + rasL[j])
            curZoT = ZonalStatisticsAsTable(hypZ[i], "Value", inRas,
                                            curZoT_out, "DATA", "MAXIMUM")
            man.CopyRaster(hypZ[i], curZo)
            man.AddField(curZo, "spp0", "TEXT", "", "", 251)
            man.JoinField(curZo, "Value", curZoT, "VALUE", ["MAX"])
            expr = "str( !MAX! )"
            man.CalculateField(curZo, "spp0", expr, "PYTHON")
            man.DeleteField(curZo, "MAX")
            man.Delete(curZoT_out)
        else:
            #jminus = j-1
            inRas = inPath + "/" + rasL[j] + "_c.tif"
            print(".. zoning " + rasL[j])
            curZoT_out = wrk + "/zonTab_C" + str(i) + "_" + str(j)
            curZoT = ZonalStatisticsAsTable(hypZ[i], "Value", inRas,
                                            curZoT_out, "DATA", "MAXIMUM")
            man.JoinField(curZo, "Value", curZoT, "VALUE", ["MAX"])
            expr = "str(!spp0!) + str(!MAX!)"
            man.CalculateField(curZo, "spp0", expr, "PYTHON")