Esempio n. 1
0
def find_states(fc, state_fc):
    """Populate *_states field. States fc must have field 'states' with length 255 and state abbreviations within."""
    states_field = '{}_states'.format(os.path.basename(fc))
    if arcpy.ListFields(fc, states_field):
        DM.DeleteField(fc, states_field)

    # reverse buffer the states slightly to avoid "D", "I", "J"  situations in "INTERSECT" illustration
    # from graphic examples of ArcGIS join types "Select polygon using polygon" section in Help


    # make a field mapping that gathers all the intersecting states into one new value
    field_list = [f.name for f in arcpy.ListFields(fc) if f.type <> 'OID' and f.type <> 'Geometry']
    field_mapping = arcpy.FieldMappings()
    for f in field_list:
        map = arcpy.FieldMap()
        map.addInputField(fc, f)
        field_mapping.addFieldMap(map)
    map_states = arcpy.FieldMap()
    map_states.addInputField(state_fc, 'states')
    map_states.mergeRule = 'Join'
    map_states.joinDelimiter = ' '
    field_mapping.addFieldMap(map_states)

    # perform join and use output to replace original fc
    spjoin = AN.SpatialJoin(fc, state_fc, 'in_memory/spjoin_intersect', 'JOIN_ONE_TO_ONE',
                            field_mapping=field_mapping, match_option='INTERSECT')
    DM.AlterField(spjoin, 'states', new_field_name=states_field, clear_field_alias=True)
    DM.Delete(fc)
    DM.CopyFeatures(spjoin, fc)
    DM.Delete(spjoin)
def drought_analysis(date_string):
    ARCPY.env.overwriteOutput = True
    working_dir = r"C:\Data\git\devsummit-14-python"
    zip_name = "USDM_" + date_string + "_M.zip"
    url = "http://droughtmonitor.unl.edu/data/shapefiles_m/" + zip_name
    mxd_path = OS.path.join(working_dir, "MapTemplate.mxd")
    lyr_template = OS.path.join(working_dir, "CurrentDroughtConditions.lyr")
    zip_name = OS.path.basename(url)

    drought_zip_file = URLLIB.URLopener()
    dzf = drought_zip_file.retrieve(url, OS.path.join(r"C:\Temp", zip_name))
    zf = ZIPFILE.ZipFile(dzf[0], "r")
    shp_name = [n for n in zf.namelist() if n.endswith('.shp')][0]
    zf.extractall(working_dir)

    drought = OS.path.splitext(shp_name)[0]
    DM.MakeFeatureLayer(OS.path.join(working_dir, shp_name), drought)

    #### Add Winery Data ####
    beerWinePath = OS.path.join(working_dir, "BeerWine", "BeerWine.gdb",
                                "BeerWine")
    intermediate_output = OS.path.join(working_dir, "BeerWine", "BeerWine.gdb",
                                       "BeerWineDrought")
    wine = "BeerWine"
    wine_drought = "Wine_Drought"
    DM.MakeFeatureLayer(beerWinePath, wine)
    DM.SelectLayerByAttribute(wine, "NEW_SELECTION", "Type = 'Winery'")
    ANALYSIS.SpatialJoin(drought, wine, intermediate_output, "JOIN_ONE_TO_ONE",
                         "KEEP_ALL")
    try:
        DM.DeleteField(intermediate_output, "NAME")
    except:
        pass
    final_wine_drought = "Wine_Drought_Summary"
    DM.MakeFeatureLayer(intermediate_output, final_wine_drought)

    lf = DM.SaveToLayerFile(
        final_wine_drought,
        OS.path.join(working_dir, '{}.lyr'.format(final_wine_drought)))
    DM.ApplySymbologyFromLayer(lf, lyr_template)

    pw = "PASSWORDHERE"  #GETPASS.getpass("Enter AGOL password:"******"Drought_Wine_Service"

    agol = AGOLHandler("USERNAMEHERE", pw, service_name)

    publish_service(agol, service_name, mxd_path, lf[0])
    TIME.sleep(5)
    fs_url = agol.findItemURL('Feature Service')
    TIME.sleep(35)
    gp_url, jsondata = enrich(agol, fs_url + '/0',
                              '{}_Enriched'.format(service_name), agol.token)
    check_job_status(gp_url, jsondata, agol.token)

    DM.Delete(OS.path.join(working_dir, shp_name))
    DM.Delete(OS.path.join(working_dir, lf[0]))
Esempio n. 3
0
def create_points_feature_class(fc, sr=None):

    arcpy.env.addOutputsToMap = False

    sr = sr or arcpy.env.outputCoordinateSystem
    if sr is None:
        arcpy.AddError('No spatial reference system.')
        return None

    scratch_fc = os.path.join(arcpy.env.scratchWorkspace, os.path.basename(fc))

    mgmt.CreateFeatureclass(*os.path.split(scratch_fc),
                            'POINT',
                            spatial_reference=sr)
    mgmt.AddField(scratch_fc, 'ELEVATION', 'DOUBLE')
    mgmt.AddField(scratch_fc, 'TIME', 'TEXT', field_length=64)
    mgmt.AddField(scratch_fc, 'NAME', 'TEXT', field_length=64)
    mgmt.AddField(scratch_fc, 'DESCRIPTION', 'TEXT', field_length=64)
    mgmt.AddField(scratch_fc, 'SYMBOL', 'TEXT', field_length=64)
    mgmt.AddField(scratch_fc, 'TYPE', 'TEXT', field_length=64)
    mgmt.AddField(scratch_fc, 'SAMPLES', 'LONG')

    if fc != scratch_fc:
        mgmt.Copy(scratch_fc, fc)
        mgmt.Delete(scratch_fc)

    return fc
Esempio n. 4
0
def process_ws(ws_fc, zone_name):

    # generate new zone ids
    DM.AddField(ws_fc, 'zoneid', 'TEXT', field_length=10)
    DM.CalculateField(ws_fc, 'zoneid', '!lagoslakeid!', 'PYTHON')
    ws_fc_lyr = DM.MakeFeatureLayer(ws_fc)

    # multipart
    DM.AddField(ws_fc, 'ismultipart', 'TEXT', field_length=2)
    with arcpy.da.UpdateCursor(ws_fc, ['ismultipart', 'SHAPE@']) as u_cursor:
        for row in u_cursor:
            if row[1].isMultipart:
                row[0] = 'Y'
            else:
                row[0] = 'N'
            u_cursor.updateRow(row)


    print("Edge flags...")
    # add flag fields
    DM.AddField(ws_fc, 'onlandborder', 'TEXT', field_length = 2)
    DM.AddField(ws_fc, 'oncoast', 'TEXT', field_length = 2)

    # identify border zones
    border_lyr = DM.MakeFeatureLayer(LAND_BORDER, 'border_lyr')
    DM.SelectLayerByLocation(ws_fc_lyr, 'INTERSECT', border_lyr)
    DM.CalculateField(ws_fc_lyr, 'onlandborder', "'Y'", 'PYTHON')
    DM.SelectLayerByAttribute(ws_fc_lyr, 'SWITCH_SELECTION')
    DM.CalculateField(ws_fc_lyr, 'onlandborder' ,"'N'", 'PYTHON')

    # identify coastal zones
    coastal_lyr = DM.MakeFeatureLayer(COASTLINE, 'coastal_lyr')
    DM.SelectLayerByLocation(ws_fc_lyr, 'INTERSECT', coastal_lyr)
    DM.CalculateField(ws_fc_lyr, 'oncoast', "'Y'", 'PYTHON')
    DM.SelectLayerByAttribute(ws_fc_lyr, 'SWITCH_SELECTION')
    DM.CalculateField(ws_fc_lyr, 'oncoast' ,"'N'", 'PYTHON')

    print("State assignment...")
    # States
    state_geo = r'D:\Continental_Limnology\Data_Working\LAGOS_US_GIS_Data_v0.6.gdb\Spatial_Classifications\state'
    find_states(ws_fc, STATES_GEO)
    # glaciation status?
    calc_glaciation(ws_fc, 'zoneid')

    # preface the names with the zones
    DM.DeleteField(ws_fc, 'ORIG_FID')
    fields = [f.name for f in arcpy.ListFields(ws_fc, '*') if f.type not in ('OID', 'Geometry') and not f.name.startswith('Shape_')]
    for f in fields:
        new_fname = '{zn}_{orig}'.format(zn=zone_name, orig = f).lower()
        try:
            DM.AlterField(ws_fc, f, new_fname, clear_field_alias = 'TRUE')
        # sick of debugging the required field message-I don't want to change required fields anyway
        except:
            pass

    # cleanup
    lyr_objects = [lyr_object for var_name, lyr_object in locals().items() if var_name.endswith('lyr')]
    for l in lyr_objects:
        DM.Delete(l)
Esempio n. 5
0
def cleanupNetLayer(netLayer):
    """Tries to delete Network OD Cost Layers.
    
    INPUTS:
    netLayer (str): network OD cost layer
    """
    try:
        DM.Delete(netLayer)
    except:
        pass
def get_percentile(in_file, percentile=0.5, multiplier=100, skip_value=None):
    # mult_rast = Times (in_file, multiplier)
    int_rast = Int(Times(in_file, multiplier))

    #  no idea why this is needed, but something needs to tickle int_rast
    print(int_rast)

    arcmgt.BuildRasterAttributeTable(int_rast)

    table_view = "table_view%d" % int(random.random() * 1000)
    arcmgt.MakeTableView(int_rast, table_view)

    fields = arcpy.ListFields(in_file)

    rows = arcpy.SearchCursor(table_view)
    cum_sum = 0

    for row in rows:

        val = row.getValue("Value")
        count = row.getValue("Count")

        if val == skip_value:
            continue

        cum_sum = cum_sum + count

    row = None

    target = cum_sum * percentile

    rows2 = arcpy.SearchCursor(table_view)
    cum_sum = 0
    val = 0

    for row2 in rows2:

        val = row2.getValue("Value")
        count = row2.getValue("Count")

        if val == skip_value:
            continue

        cum_sum = cum_sum + count

        if cum_sum >= target:
            break

    val = float(val) / multiplier

    arcmgt.Delete(table_view)

    return val
Esempio n. 7
0
def setupOptHotSpot():
    """Retrieves the parameters from the User Interface and executes the
    appropriate commands."""

    #### Input Parameters ####
    inputFC = ARCPY.GetParameterAsText(0)
    outputFC = ARCPY.GetParameterAsText(1)
    varName = UTILS.getTextParameter(2, fieldName=True)
    aggMethod = UTILS.getTextParameter(3)
    if aggMethod:
        aggType = aggTypes[aggMethod.upper()]
    else:
        aggType = 1
    boundaryFC = UTILS.getTextParameter(4)
    polygonFC = UTILS.getTextParameter(5)
    outputRaster = UTILS.getTextParameter(6)

    makeFeatureLayerNoExtent = UTILS.clearExtent(DM.MakeFeatureLayer)
    selectLocationNoExtent = UTILS.clearExtent(DM.SelectLayerByLocation)
    featureLayer = "InputOHSA_FC"
    makeFeatureLayerNoExtent(inputFC, featureLayer)
    if boundaryFC:
        selectLocationNoExtent(featureLayer, "INTERSECT", boundaryFC, "#",
                               "NEW_SELECTION")
        polygonFC = None

    if polygonFC:
        selectLocationNoExtent(featureLayer, "INTERSECT", polygonFC, "#",
                               "NEW_SELECTION")
        boundaryFC = None

    #### Create SSDO ####
    ssdo = SSDO.SSDataObject(featureLayer,
                             templateFC=outputFC,
                             useChordal=True)

    hs = OptHotSpots(ssdo,
                     outputFC,
                     varName=varName,
                     aggType=aggType,
                     polygonFC=polygonFC,
                     boundaryFC=boundaryFC,
                     outputRaster=outputRaster)

    DM.Delete(featureLayer)
Esempio n. 8
0
def calc_glaciation(fc, zone_field):
    # tab area
    g_field = '{}_glaciatedlatewisc'.format(os.path.basename(fc))
    AN.TabulateIntersection(fc, zone_field, GLACIAL_EXTENT, 'in_memory/glacial_tab')
    glacial_pct = {r[0]:r[1] for r in arcpy.da.SearchCursor('in_memory/glacial_tab', [zone_field, 'PERCENTAGE'])}
    DM.AddField(fc, g_field, 'TEXT', field_length=20)
    with arcpy.da.UpdateCursor(fc, [zone_field, g_field]) as u_cursor:
        for row in u_cursor:
            zoneid, glaciation = row
            if zoneid not in glacial_pct:
                glaciation = 'Not_Glaciated'
            else:
                if glacial_pct[zoneid] >=99.99:
                    glaciation = 'Glaciated'
                elif glacial_pct[zoneid] < 0.01:
                    glaciation = 'Not_Glaciated'
                else:
                    glaciation = 'Partially_Glaciated'
            u_cursor.updateRow((zoneid, glaciation))
    DM.Delete('in_memory/glacial_tab')
Esempio n. 9
0
outWeighted = WeightedSum(myWSTable)
k = time.perf_counter() / 60
print(f"Weighted sum complete: {round(k - j, 2)} minutes")
# Save the output
output = script + r"/modeloutput"
outWeighted.save(output)

#Make contour around high habitat probability model regions with highest kernel density
points = script + r"\points.shp"
points2 = script + r"\points2.shp"
CO.RasterToPoint(output, points, "Value")
AN.Select(points, points2, "grid_code > 0.9")
kernel = KernelDensity(points2, "NONE", 100, None, "SQUARE_KILOMETERS",
                       "DENSITIES", "PLANAR")
contour = script + r"\contour.shp"
DDD.Contour(kernel, contour, 1000, 20, 1, "CONTOUR", None)
l = time.perf_counter() / 60
print(f"Contour complete: {round(l - k, 2)} minutes")

# Clean things up a bit
if arcpy.Exists(path):
    DM.Delete(path)
if arcpy.Exists(points):
    DM.Delete(points)
if arcpy.Exists(points2):
    DM.Delete(points2)

#All done
print(f"Model complete, total time elapsed: {round(l - a, 2)} minutes")
print("Files located at: " + script)
Esempio n. 10
0
for i in range(len(classes)):
    cl_str = str(classes[i])
    cl_int = classes[i]
    print("extracting class " + cl_str)
    selStr = '"Value" = ' + cl_str
    outR = ExtractByAttributes(outReclass, selStr)
    outName = "hyp_Class_" + cl_str
    outR.save(outName)
    print(" .. to poly")
    outPoly = "hyp_pol_Class_" + cl_str
    arcpy.RasterToPolygon_conversion(outR, outPoly, "NO_SIMPLIFY", "VALUE")
    print(" .. buffering")
    outBuff = "hyp_pol_buff_Class_" + cl_str
    arcpy.Buffer_analysis(outPoly, outBuff, 32, "FULL", "ROUND", "ALL")
    man.Delete(outPoly)
    print(" .. removing singletons")
    outMultiPart = "hyp_pol_buff_m_Class_" + cl_str
    arcpy.MultipartToSinglepart_management(outBuff, outMultiPart)
    man.Delete(outBuff)
    tmpSelLyr = "tmpSelectSet"
    expr = '"Shape_Area" < 7900'
    arcpy.MakeFeatureLayer_management(outMultiPart, tmpSelLyr)
    arcpy.SelectLayerByAttribute_management(tmpSelLyr, "NEW_SELECTION", expr)
    if int(arcpy.GetCount_management(tmpSelLyr).getOutput(0)) > 0:
        arcpy.DeleteFeatures_management(tmpSelLyr)
    #add a field to the attribute table
    fldName = "Class"
    fldVal = cl_int
    arcpy.AddField_management(outMultiPart, fldName, "SHORT")
    #expr = '"' + cl_int + '"'
Esempio n. 11
0
            man.CalculateField(curHyp, "spp0", expr, "PYTHON")
        else:
            iminus = i - 1
            prevHyp = wrk + "/hyp" + str(iminus)
            print("working on " + elem + ", " + str(i) + " of " + str(listLen))
            curHyp = Combine([prevHyp, rasName])
            curHyp.save(wrk + "/hyp" + str(i))
            man.AddField(curHyp, "spp0", "TEXT", "", "", 251)
            jval = "hyp" + str(iminus)
            man.JoinField(curHyp, jval, prevHyp, "VALUE", ["spp0"])
            rasNoDot = rasName[0:rasName.find(".")]
            newCol = rasNoDot[0:11].upper()
            expr = "str(!spp0_1!) + str(!" + newCol + "!)"
            man.CalculateField(curHyp, "spp0", expr, "PYTHON")
            #clean up
            man.Delete(prevHyp)

# clean up a little more
man.DeleteField(curHyp, [jval.upper(), newCol, "spp0_1"])

#needed to continue below if you comment out the previous loop for any reason
#curHyp = wrk + "/hyp" + str(len(codeL)-1)

# expand information out to one col for each spp.
print("adding columns...")
for i in range(len(codeL)):
    newCol = codeL[i].upper()
    print("..." + newCol)
    man.AddField(curHyp, newCol, "SHORT")
    #expr="str(!supp0!)[i:i+1]"
    expr = "str(!spp0!)[" + str(i) + ":" + str(i + 1) + "]"
def flatten_overlaps(zone_fc,
                     zone_field,
                     output_fc,
                     output_table,
                     cluster_tolerance=' 3 Meters'):
    orig_env = arcpy.env.workspace
    arcpy.env.workspace = 'in_memory'

    objectid = [f.name for f in arcpy.ListFields(zone_fc)
                if f.type == 'OID'][0]
    zone_type = [f.type for f in arcpy.ListFields(zone_fc, zone_field)][0]
    fid1 = 'FID_{}'.format(os.path.basename(zone_fc))
    flat_zoneid = 'flat{}'.format(zone_field)
    flat_zoneid_prefix = 'flat{}_'.format(zone_field.replace('_zoneid', ''))

    # Union with FID_Only (A)
    arcpy.AddMessage("Splitting overlaps in polygons...")
    zoneid_dict = {
        r[0]: r[1]
        for r in arcpy.da.SearchCursor(zone_fc, [objectid, zone_field])
    }
    self_union = AN.Union([zone_fc],
                          'self_union',
                          'ONLY_FID',
                          cluster_tolerance=cluster_tolerance)

    # #If you don't run this section, Find Identical fails with error 999999. Seems to have to do with small slivers
    # #having 3 vertices and/or only circular arcs in the geometry.
    arcpy.AddMessage("Repairing self-union geometries...")
    # DM.AddGeometryAttributes(self_union, 'POINT_COUNT; AREA')
    # union_fix = DM.MakeFeatureLayer(self_union, 'union_fix', where_clause='PNT_COUNT <= 10 OR POLY_AREA < 5000')
    # arcpy.Densify_edit(union_fix, 'DISTANCE', distance = '1 Meters', max_deviation='1 Meters')  # selection ON, edits self_union disk
    DM.RepairGeometry(
        self_union, 'DELETE_NULL'
    )  # eliminate empty geoms. selection ON, edits self_union disk
    # for field in ['PNT_COUNT', 'POLY_AREA']:
    #     DM.DeleteField(self_union, field)

    # Find Identical by Shape (B)
    if arcpy.Exists('identical_shapes'):
        DM.Delete(
            'identical_shapes'
        )  # causes failure in FindIdentical even when overwrite is allowed
    identical_shapes = DM.FindIdentical(self_union, 'identical_shapes',
                                        'Shape')

    # Join A to B and calc flat[zone]_zoneid = FEAT_SEQ (C)
    DM.AddField(self_union, flat_zoneid, 'TEXT', field_length=20)
    union_oid = [
        f.name for f in arcpy.ListFields(self_union) if f.type == 'OID'
    ][0]
    identical_shapes_dict = {
        r[0]: r[1]
        for r in arcpy.da.SearchCursor(identical_shapes,
                                       ['IN_FID', 'FEAT_SEQ'])
    }
    with arcpy.da.UpdateCursor(self_union,
                               [union_oid, flat_zoneid]) as u_cursor:
        for row in u_cursor:
            row[1] = '{}{}'.format(flat_zoneid_prefix,
                                   identical_shapes_dict[row[0]])
            u_cursor.updateRow(row)

    # Add the original zone ids and save to table (E)
    arcpy.AddMessage("Assigning temporary IDs to split polygons...")
    unflat_table = DM.CopyRows(self_union, 'unflat_table')
    DM.AddField(unflat_table, zone_field,
                zone_type)  # default text length of 50 is fine if needed
    with arcpy.da.UpdateCursor(unflat_table, [fid1, zone_field]) as u_cursor:
        for row in u_cursor:
            row[1] = zoneid_dict[row[0]]  # assign zone id
            u_cursor.updateRow(row)

    # Delete Identical (C) (save as flat[zone])
    with arcpy.da.UpdateCursor(self_union, 'OID@') as cursor:
        visited = []
        for row in cursor:
            feat_seq = identical_shapes_dict[row[0]]
            if feat_seq in visited:
                cursor.deleteRow()
            visited.append(feat_seq)

    DM.DeleteField(self_union, fid1)
    DM.DeleteField(unflat_table, fid1)

    # save outputs
    output_fc = DM.CopyFeatures(self_union, output_fc)
    output_table = DM.CopyRows(unflat_table, output_table)

    # cleanup
    for item in [self_union, identical_shapes, unflat_table]:
        DM.Delete(item)
    arcpy.env.workspace = orig_env

    return output_fc
Esempio n. 13
0
    def unflatten(intermediate_table):
        flat_zoneid = zone_field
        unflat_zoneid = zone_field.replace('flat', '')
        zone_type = [f.type for f in arcpy.ListFields(zone_fc, flat_zoneid)][0]
        # Set up the output table (can't do this until the prior tool is run)
        # if os.path.dirname(out_table):
        #     out_path = os.path.dirname(out_table)
        # else:
        #     out_path = orig_env

        unflat_result = DM.CreateTable('in_memory',
                                       os.path.basename(out_table))

        # get the fields to add to the table
        editable_fields = [
            f for f in arcpy.ListFields(intermediate_table)
            if f.editable and f.name.lower() != flat_zoneid.lower()
        ]

        # populate the new table schema
        DM.AddField(unflat_result, unflat_zoneid, zone_type)
        for f in editable_fields:
            DM.AddField(unflat_result, f.name, f.type, field_length=f.length)

        # map original zone ids to new zone ids
        original_flat = defaultdict(list)
        with arcpy.da.SearchCursor(unflat_table,
                                   [unflat_zoneid, flat_zoneid]) as cursor:
            for row in cursor:
                if row[1] not in original_flat[row[0]]:
                    original_flat[row[0]].append(row[1])

        # Use CELL_COUNT as weight for means to calculate final values for each zone.
        fixed_fields = [
            unflat_zoneid, 'ORIGINAL_COUNT', 'CELL_COUNT', 'datacoveragepct'
        ]
        other_field_names = [
            f.name for f in editable_fields if f.name not in fixed_fields
        ]
        i_cursor = arcpy.da.InsertCursor(
            unflat_result,
            fixed_fields + other_field_names)  # open output table cursor
        flat_stats = {
            r[0]: r[1:]
            for r in arcpy.da.SearchCursor(intermediate_table, [
                flat_zoneid, 'ORIGINAL_COUNT', 'CELL_COUNT', 'datacoveragepct'
            ] + other_field_names)
        }

        count_diff = 0
        for zid, unflat_ids in original_flat.items():
            valid_unflat_ids = [id for id in unflat_ids if id in flat_stats
                                ]  # skip flatpolys not rasterized
            area_vec = [flat_stats[id][0] for id in valid_unflat_ids
                        ]  # ORIGINAL_COUNT specified in 0 index earlier
            cell_vec = [flat_stats[id][1] for id in valid_unflat_ids]
            coverage_vec = [flat_stats[id][2] for id in valid_unflat_ids
                            ]  # datacoveragepct special handling
            stat_vectors_by_id = [
                flat_stats[id][3:] for id in valid_unflat_ids
            ]  # "the rest", list of lists

            # calc the new summarized values
            original_count = sum(
                filter(None, area_vec)
            )  # None area is functionally equivalent to 0, all Nones = 0 too
            cell_count = sum(filter(None, cell_vec))
            if cell_count > 0:
                weighted_coverage = sum(
                    [a * b
                     for a, b in zip(area_vec, coverage_vec)]) / original_count

                # this calculation accounts for fractional missing values, both kinds (whole zone is no data, or zone
                # was missing some data and had data coverage % < 100). This is done by converting None to 0
                # and by using the cell_count (count of cells with data present)
                # instead of the full zone original_count. You have to do both or the mean will be distorted.
                # hand-verification that this works as intended using test GIS data on was completed 2019-11-01 by NJS
                crossprods = []
                for i in range(0, len(valid_unflat_ids)):
                    crossprods.append([
                        cell_vec[i] * float(s or 0)
                        for s in stat_vectors_by_id[i]
                    ])

                weighted_stat_means = []
                for i in range(0, len(other_field_names)):
                    weighted_stat_means.append(
                        sum(zip(*crossprods)[i]) / cell_count)
            else:
                weighted_coverage = 0
                weighted_stat_means = [None] * len(other_field_names)
                count_diff += 1

            new_row = [zid, original_count, cell_count, weighted_coverage
                       ] + weighted_stat_means
            i_cursor.insertRow(new_row)
        del i_cursor

        DM.Delete(intermediate_table)

        return [unflat_result, count_diff]
    if len(target_fld) == 0 or target_fld == "#":
        target_fld = "New_WP"

    arcpy.env.overwriteOutput = True

    arcpy.env.workspace = workspace
    if (arcpy.env.workspace is None):
        arcpy.env.workspace = os.getcwd()

    #arcpy.env.extent = "MINOF"
    arcpy.env.snapRaster = cost_rast
    scratch = arcpy.CreateScratchName('xx', '.shp')
    arcpy.Buffer_analysis(in_file, scratch, "2000 meters")
    desc = arcpy.Describe(scratch)
    arcpy.env.extent = desc.extent
    arcmgt.Delete(scratch)
    print "Extent is %s" % arcpy.env.extent

    add_msg_and_print('Currently in directory: %s\n' % os.getcwd())
    add_msg_and_print('Workspace is: %s' % arcpy.env.workspace)
    #add_msg_and_print ('Scratch table is: %s' % out_table)

    table_view = "table_view"
    arcmgt.MakeTableView(in_file, table_view)

    fields = arcpy.ListFields(in_file)

    layer = "feat_layer"
    arcmgt.MakeFeatureLayer(in_file, layer)
    desc = arcpy.Describe(layer)
    fld_names = []
Esempio n. 15
0
    def doFishnet(self):
        #### Initial Data Assessment ####
        printOHSSection(84428, prependNewLine=True)
        printOHSSubject(84431, addNewLine=False)

        #### Find Unique Locations ####
        msg = ARCPY.GetIDMessage(84441)
        ARCPY.SetProgressor("default", msg)
        initCount = UTILS.getCount(self.ssdo.inputFC)
        self.checkIncidents(initCount)
        collectedPointFC = UTILS.returnScratchName("Collect_InitTempFC")
        collInfo = EVENTS.collectEvents(self.ssdo, collectedPointFC)
        self.cleanUpList.append(collectedPointFC)
        collSSDO = SSDO.SSDataObject(collectedPointFC,
                                     explicitSpatialRef=self.ssdo.spatialRef,
                                     useChordal=True)
        collSSDO.obtainDataGA(collSSDO.oidName)
        #################################

        if self.boundaryFC:
            #### Assure Boundary FC Has Area and Obtain Chars ####
            self.checkBoundary()

        #### Location Outliers ####
        lo = UTILS.LocationInfo(collSSDO,
                                concept="EUCLIDEAN",
                                silentThreshold=True,
                                stdDeviations=3)
        printOHSLocationalOutliers(lo, aggType=self.aggType)

        #### Agg Header ####
        printOHSSection(84444)
        if self.boundaryFC:
            extent = self.boundExtent
            forMercExtent = self.boundExtent
            countMSGNumber = 84453

        else:
            countMSGNumber = 84452
            extent = None
            forMercExtent = collSSDO.extent

        if collSSDO.useChordal:
            extentFC_GCS = UTILS.returnScratchName("TempGCS_Extent")
            extentFC_Merc = UTILS.returnScratchName("TempMercator_Extent")
            points = NUM.array([[forMercExtent.XMin, forMercExtent.YMax],
                                [forMercExtent.XMax, forMercExtent.YMin]])
            UTILS.createPointFC(extentFC_GCS,
                                points,
                                spatialRef=collSSDO.spatialRef)
            DM.Project(extentFC_GCS, extentFC_Merc, mercatorProjection)
            d = ARCPY.Describe(extentFC_Merc)
            extent = d.extent
            fishOutputCoords = mercatorProjection
        else:
            fishOutputCoords = self.ssdo.spatialRef

        #### Fish Subject ####
        printOHSSubject(84449, addNewLine=False)
        dist = scaleDecision(lo.nonZeroAvgDist, lo.nonZeroMedDist)
        area = 0.0

        #### Construct Fishnet ####
        fish = UTILS.FishnetInfo(collSSDO, area, extent, explicitCellSize=dist)
        dist = fish.quadLength
        snap = self.ssdo.distanceInfo.linearUnitString(dist)

        #### Cell Size Answer ####
        snapStr = self.ssdo.distanceInfo.printDistance(dist)
        msg = ARCPY.GetIDMessage(84450).format(snapStr)
        printOHSAnswer(msg)
        self.fish = fish

        #### Fishnet Count Subject ####
        printOHSSubject(84451, addNewLine=False)

        #### Create Temp Fishnet Grid ####
        gridFC = UTILS.returnScratchName("Fishnet_TempFC")
        self.cleanUpList.append(gridFC)

        #### Apply Output Coords to Create Fishnet ####
        oldSpatRef = ARCPY.env.outputCoordinateSystem
        ARCPY.env.outputCoordinateSystem = fishOutputCoords

        #### Fish No Extent ####
        oldExtent = ARCPY.env.extent
        ARCPY.env.extent = ""

        #### Apply Max XY Tolerance ####
        fishWithXY = UTILS.funWithXYTolerance(DM.CreateFishnet,
                                              self.ssdo.distanceInfo)

        #### Execute Fishnet ####
        fishWithXY(gridFC, self.fish.origin, self.fish.rotate,
                   self.fish.quadLength, self.fish.quadLength,
                   self.fish.numRows, self.fish.numCols, self.fish.corner,
                   "NO_LABELS", self.fish.extent, "POLYGON")

        #### Project Back to GCS if Use Chordal ####
        if collSSDO.useChordal:
            gridFC_ProjBack = UTILS.returnScratchName("TempFC_Proj")
            DM.Project(gridFC, gridFC_ProjBack, collSSDO.spatialRef)
            UTILS.passiveDelete(gridFC)
            gridFC = gridFC_ProjBack

        #### Set Env Output Coords Back ####
        ARCPY.env.outputCoordinateSystem = oldSpatRef

        #### Create Empty Field Mappings to Ignore Atts ####
        fieldMap = ARCPY.FieldMappings()
        fieldMap.addTable(self.ssdo.inputFC)
        fieldMap.removeAll()

        #### Fishnet Count Answer ####
        printOHSAnswer(ARCPY.GetIDMessage(countMSGNumber))

        #### Create Weighted Fishnet Grid ####
        tempFC = UTILS.returnScratchName("Optimized_TempFC")
        self.cleanUpList.append(tempFC)
        joinWithXY = UTILS.funWithXYTolerance(ANA.SpatialJoin,
                                              self.ssdo.distanceInfo)
        joinWithXY(gridFC, self.ssdo.inputFC, tempFC, "JOIN_ONE_TO_ONE",
                   "KEEP_ALL", "EMPTY")

        #### Clean Up Temp FCs ####
        UTILS.passiveDelete(gridFC)

        #### Remove Locations Outside Boundary FC ####
        featureLayer = "ClippedPointFC"
        DM.MakeFeatureLayer(tempFC, featureLayer)
        if self.boundaryFC:
            msg = ARCPY.GetIDMessage(84454)
            ARCPY.SetProgressor("default", msg)
            DM.SelectLayerByLocation(featureLayer, "INTERSECT",
                                     self.boundaryFC, "#", "NEW_SELECTION")
            DM.SelectLayerByLocation(featureLayer, "INTERSECT", "#", "#",
                                     "SWITCH_SELECTION")
            DM.DeleteFeatures(featureLayer)
        else:
            if additionalZeroDistScale == "ALL":
                msg = ARCPY.GetIDMessage(84455)
                ARCPY.SetProgressor("default", msg)
                DM.SelectLayerByAttribute(featureLayer, "NEW_SELECTION",
                                          '"Join_Count" = 0')
                DM.DeleteFeatures(featureLayer)

            else:
                distance = additionalZeroDistScale * fish.quadLength
                distanceStr = self.ssdo.distanceInfo.linearUnitString(
                    distance, convert=True)
                nativeStr = self.ssdo.distanceInfo.printDistance(distance)
                msg = "Removing cells further than %s from input pointsd...."
                ARCPY.AddMessage(msg % nativeStr)
                DM.SelectLayerByLocation(featureLayer, "INTERSECT",
                                         self.ssdo.inputFC, distanceStr,
                                         "NEW_SELECTION")
                DM.SelectLayerByLocation(featureLayer, "INTERSECT", "#", "#",
                                         "SWITCH_SELECTION")
                DM.DeleteFeatures(featureLayer)

        DM.Delete(featureLayer)
        del collSSDO

        ARCPY.env.extent = oldExtent
        self.createAnalysisSSDO(tempFC, "JOIN_COUNT")
Esempio n. 16
0
 curZo = wrk + "/zon_C" + classLevel
 # cycle through each edm
 for j in range(len(rasL)):
     if j == 0:
         inRas = inPath + "/" + rasL[j] + "_c.tif"
         curZoT_out = wrk + "/zonTab_C" + str(i) + "_" + str(j)
         print(".. zoning " + rasL[j])
         curZoT = ZonalStatisticsAsTable(hypZ[i], "Value", inRas,
                                         curZoT_out, "DATA", "MAXIMUM")
         man.CopyRaster(hypZ[i], curZo)
         man.AddField(curZo, "spp0", "TEXT", "", "", 251)
         man.JoinField(curZo, "Value", curZoT, "VALUE", ["MAX"])
         expr = "str( !MAX! )"
         man.CalculateField(curZo, "spp0", expr, "PYTHON")
         man.DeleteField(curZo, "MAX")
         man.Delete(curZoT_out)
     else:
         #jminus = j-1
         inRas = inPath + "/" + rasL[j] + "_c.tif"
         print(".. zoning " + rasL[j])
         curZoT_out = wrk + "/zonTab_C" + str(i) + "_" + str(j)
         curZoT = ZonalStatisticsAsTable(hypZ[i], "Value", inRas,
                                         curZoT_out, "DATA", "MAXIMUM")
         man.JoinField(curZo, "Value", curZoT, "VALUE", ["MAX"])
         expr = "str(!spp0!) + str(!MAX!)"
         man.CalculateField(curZo, "spp0", expr, "PYTHON")
         man.DeleteField(curZo, "MAX")
         man.Delete(curZoT_out)
 # expand information out to one col for each spp.
 print("adding columns...")
 for i in range(len(rasL)):
def classify_lakes(nhd,
                   out_feature_class,
                   exclude_intermit_flowlines=False,
                   debug_mode=False):
    if debug_mode:
        arcpy.env.overwriteOutput = True
        temp_gdb = cu.create_temp_GDB('classify_lake_connectivity')
        arcpy.env.workspace = temp_gdb
        arcpy.AddMessage('Debugging workspace located at {}'.format(temp_gdb))

    else:
        arcpy.env.workspace = 'in_memory'

    if arcpy.Exists("temp_fc"):
        print("There is a problem here.")
        raise Exception

    # Tool temporary feature classes
    temp_fc = "temp_fc"
    csiwaterbody_10ha = "csiwaterbody_10ha"
    nhdflowline_filtered = "nhdflowline_filtered"
    dangles = "dangles"
    start = "start"
    end = "end"
    startdangles = "startdangles"
    enddangles = "enddangles"
    non_artificial_end = "non_artificial_end"
    flags_10ha_lake_junctions = "flags_10ha_lake_junctions"
    midvertices = "midvertices"
    non10vertices = "non10vertices"
    non10junctions = "non10junctions"
    all_non_flag_points = "all_non_flag_points"
    barriers = "barriers"
    trace1_junctions = "trace1_junctions"
    trace1_flowline = "trace1_flowline"
    trace2_junctions = "trace2junctions"
    trace2_flowline = "trace2_flowline"

    # Clean up workspace in case of bad exit from prior run in same session.
    this_tool_layers = [
        "dangles_lyr", "nhdflowline_lyr", "junction_lyr", "midvertices_lyr",
        "all_non_flag_points_lyr", "non10vertices_lyr", "out_fc_lyr", "trace1",
        "trace2"
    ]
    this_tool_temp = [
        temp_fc, csiwaterbody_10ha, nhdflowline_filtered, dangles, start, end,
        startdangles, enddangles, non_artificial_end,
        flags_10ha_lake_junctions, midvertices, non10vertices, non10junctions,
        all_non_flag_points, barriers, trace1_junctions, trace1_flowline,
        trace2_junctions, trace2_flowline
    ]
    for item in this_tool_layers + this_tool_temp:
        try:
            DM.Delete(item)
        except:
            pass

    # Local variables:
    nhdflowline = os.path.join(nhd, "Hydrography", "NHDFLowline")
    nhdjunction = os.path.join(nhd, "Hydrography", "HYDRO_NET_Junctions")
    nhdwaterbody = os.path.join(nhd, "Hydrography", "NHDWaterbody")
    network = os.path.join(nhd, "Hydrography", "HYDRO_NET")

    # Get lakes, ponds and reservoirs over a hectare.
    #csi_population_filter = '''"AreaSqKm" >=0.01 AND\
    #"FCode" IN (39000,39004,39009,39010,39011,39012,43600,43613,43615,43617,43618,43619,43621)'''
    all_lakes_reservoirs_filter = '''"FType" IN (390, 436)'''

    # Can't see why we shouldn't just attribute all lakes and reservoirs
    # arcpy.Select_analysis(nhdwaterbody, "csiwaterbody", lake_population_filter)
    arcpy.AddMessage("Initializing output.")
    if exclude_intermit_flowlines:
        DM.CopyFeatures(out_feature_class, temp_fc)
        DM.Delete(out_feature_class)
    else:
        arcpy.Select_analysis(nhdwaterbody, temp_fc,
                              all_lakes_reservoirs_filter)

    # Get lakes, ponds and reservoirs over 10 hectares.
    lakes_10ha_filter = '''"AreaSqKm" >= 0.1 AND "FType" IN (390, 436)'''
    arcpy.Select_analysis(nhdwaterbody, csiwaterbody_10ha, lakes_10ha_filter)

    # Exclude intermittent flowlines, if requested
    if exclude_intermit_flowlines:
        flowline_where_clause = '''"FCode" NOT IN (46003,46007)'''
        nhdflowline = arcpy.Select_analysis(nhdflowline, nhdflowline_filtered,
                                            flowline_where_clause)

    # Make dangle points at end of nhdflowline
    DM.FeatureVerticesToPoints(nhdflowline, dangles, "DANGLE")
    DM.MakeFeatureLayer(dangles, "dangles_lyr")

    # Isolate start dangles from end dangles.
    DM.FeatureVerticesToPoints(nhdflowline, start, "START")
    DM.FeatureVerticesToPoints(nhdflowline, end, "END")

    DM.SelectLayerByLocation("dangles_lyr", "ARE_IDENTICAL_TO", start)
    DM.CopyFeatures("dangles_lyr", startdangles)
    DM.SelectLayerByLocation("dangles_lyr", "ARE_IDENTICAL_TO", end)
    DM.CopyFeatures("dangles_lyr", enddangles)

    # Special handling for lakes that have some intermittent flow in and some permanent
    if exclude_intermit_flowlines:
        DM.MakeFeatureLayer(nhdflowline, "nhdflowline_lyr")
        DM.SelectLayerByAttribute("nhdflowline_lyr", "NEW_SELECTION",
                                  '''"WBArea_Permanent_Identifier" is null''')
        DM.FeatureVerticesToPoints("nhdflowline_lyr", non_artificial_end,
                                   "END")
        DM.SelectLayerByAttribute("nhdflowline_lyr", "CLEAR_SELECTION")

    arcpy.AddMessage("Found source area nodes.")

    # Get junctions from lakes >= 10 hectares.
    DM.MakeFeatureLayer(nhdjunction, "junction_lyr")
    DM.SelectLayerByLocation("junction_lyr", "INTERSECT", csiwaterbody_10ha,
                             XY_TOLERANCE, "NEW_SELECTION")

    DM.CopyFeatures("junction_lyr", flags_10ha_lake_junctions)
    arcpy.AddMessage("Found lakes >= 10 ha.")

    # Make points shapefile and layer at flowline vertices to act as potential flags and/or barriers.
    arcpy.AddMessage("Tracing...")
    DM.FeatureVerticesToPoints(nhdflowline, midvertices, "MID")
    DM.MakeFeatureLayer(midvertices, "midvertices_lyr")

    # Get vertices that are not coincident with 10 hectare lake junctions.
    DM.SelectLayerByLocation("midvertices_lyr", "INTERSECT",
                             flags_10ha_lake_junctions, "", "NEW_SELECTION")
    DM.SelectLayerByLocation("midvertices_lyr", "INTERSECT",
                             flags_10ha_lake_junctions, "", "SWITCH_SELECTION")
    DM.CopyFeatures("midvertices_lyr", non10vertices)

    # Get junctions that are not coincident with 10 hectare lake junctions.
    DM.SelectLayerByLocation("junction_lyr", "INTERSECT",
                             flags_10ha_lake_junctions, "", "NEW_SELECTION")
    DM.SelectLayerByLocation("junction_lyr", "INTERSECT",
                             flags_10ha_lake_junctions, "", "SWITCH_SELECTION")
    DM.CopyFeatures("junction_lyr", non10junctions)

    # Merge non10vertices with non10junctions
    DM.Merge([non10junctions, non10vertices],
             all_non_flag_points)  # inputs both point fc in_memory
    DM.MakeFeatureLayer(all_non_flag_points, "all_non_flag_points_lyr")

    # Tests the counts...for some reason I'm not getting stable behavior from the merge.
    mid_n = int(DM.GetCount(non10vertices).getOutput(0))
    jxn_n = int(DM.GetCount(non10junctions).getOutput(0))
    merge_n = int(DM.GetCount(all_non_flag_points).getOutput(0))
    if merge_n < mid_n + jxn_n:
        arcpy.AddWarning(
            "The total number of flags ({0}) is less than the sum of the input junctions ({1}) "
            "and input midpoints ({2})".format(merge_n, jxn_n, mid_n))

    # For tracing barriers, select all_non_flag_points points that intersect a 10 ha lake.
    DM.SelectLayerByLocation("all_non_flag_points_lyr", "INTERSECT",
                             csiwaterbody_10ha, XY_TOLERANCE, "NEW_SELECTION")
    DM.CopyFeatures("all_non_flag_points_lyr", barriers)

    # Trace1-Trace downstream to first barrier (junctions+midvertices in 10 ha lake) starting from flags_10ha_lake_junctions flag points.
    DM.TraceGeometricNetwork(network, "trace1", flags_10ha_lake_junctions,
                             "TRACE_DOWNSTREAM", barriers)

    # Save trace1 flowlines and junctions to layers on disk.
    DM.CopyFeatures("trace1\HYDRO_NET_Junctions",
                    trace1_junctions)  # extra for debugging
    DM.CopyFeatures("trace1\NHDFlowline", trace1_flowline)

    # Select vertice midpoints that intersect trace1 flowlines selection for new flags for trace2.
    DM.MakeFeatureLayer(non10vertices, "non10vertices_lyr")
    DM.SelectLayerByLocation("non10vertices_lyr", "INTERSECT", trace1_flowline,
                             "", "NEW_SELECTION")

    # Trace2-Trace downstream from midpoints of flowlines that intersect the selected flowlines from trace1.
    DM.TraceGeometricNetwork(network, "trace2", "non10vertices_lyr",
                             "TRACE_DOWNSTREAM")

    # Save trace1 flowlines and junctions to layers and then shapes on disk.
    DM.CopyFeatures("trace2\HYDRO_NET_Junctions", trace2_junctions)
    DM.CopyFeatures("trace2\NHDFlowline",
                    trace2_flowline)  # extra for debugging
    arcpy.AddMessage("Done tracing.")

    # Make shapefile for seepage lakes. (Ones that don't intersect flowlines)
    if exclude_intermit_flowlines:
        class_field_name = "Lake_Connectivity_Permanent"
    else:
        class_field_name = "Lake_Connectivity_Class"
    DM.AddField(temp_fc, class_field_name, "TEXT", field_length=13)
    DM.MakeFeatureLayer(temp_fc, "out_fc_lyr")
    DM.SelectLayerByLocation("out_fc_lyr", "INTERSECT", nhdflowline,
                             XY_TOLERANCE, "NEW_SELECTION")
    DM.SelectLayerByLocation("out_fc_lyr", "INTERSECT", nhdflowline, "",
                             "SWITCH_SELECTION")
    DM.CalculateField("out_fc_lyr", class_field_name, """'Isolated'""",
                      "PYTHON")

    # New type of "Isolated" classification, mostly for "permanent" but there were some oddballs in "maximum" too
    DM.SelectLayerByLocation("out_fc_lyr", "INTERSECT", startdangles,
                             XY_TOLERANCE, "NEW_SELECTION")
    DM.SelectLayerByLocation("out_fc_lyr", "INTERSECT", enddangles,
                             XY_TOLERANCE, "SUBSET_SELECTION")
    DM.CalculateField("out_fc_lyr", class_field_name, """'Isolated'""",
                      "PYTHON")

    # Get headwater lakes.
    DM.SelectLayerByLocation("out_fc_lyr", "INTERSECT", startdangles,
                             XY_TOLERANCE, "NEW_SELECTION")
    DM.SelectLayerByAttribute(
        "out_fc_lyr", "REMOVE_FROM_SELECTION",
        '''"{}" = 'Isolated' '''.format(class_field_name))
    DM.CalculateField("out_fc_lyr", class_field_name, """'Headwater'""",
                      "PYTHON")

    # Select csiwaterbody that intersect trace2junctions
    arcpy.AddMessage("Beginning connectivity attribution...")
    DM.SelectLayerByLocation("out_fc_lyr", "INTERSECT", trace2_junctions,
                             XY_TOLERANCE, "NEW_SELECTION")
    DM.CalculateField("out_fc_lyr", class_field_name, """'DrainageLk'""",
                      "PYTHON")

    # Get stream drainage lakes. Either unassigned so far or convert "Headwater" if a permanent stream flows into it,
    # which is detected with "non_artificial_end"
    DM.SelectLayerByAttribute("out_fc_lyr", "NEW_SELECTION",
                              '''"{}" IS NULL'''.format(class_field_name))
    DM.CalculateField("out_fc_lyr", class_field_name, """'Drainage'""",
                      "PYTHON")
    if exclude_intermit_flowlines:
        DM.SelectLayerByAttribute(
            "out_fc_lyr", "NEW_SELECTION",
            '''"{}" = 'Headwater' '''.format(class_field_name))
        DM.SelectLayerByLocation("out_fc_lyr", "INTERSECT", non_artificial_end,
                                 XY_TOLERANCE, "SUBSET_SELECTION")
        DM.CalculateField("out_fc_lyr", class_field_name, """'Drainage'""",
                          "PYTHON")

        # Prevent 'upgrades' due to very odd flow situations and artifacts of bad digitization. The effects of these
        # are varied--to avoid confusion, just keep the class  assigned with all flowlines

        # 1--Purely hypothetical, not seen in testing
        DM.SelectLayerByAttribute(
            "out_fc_lyr", "NEW_SELECTION",
            '''"Lake_Connectivity_Class" = 'Isolated' AND "Lake_Connectivity_Permanent" <> 'Isolated' '''
        )
        DM.CalculateField("out_fc_lyr", class_field_name, """'Isolated'""",
                          "PYTHON")

        # 2--Headwater to Drainage upgrade seen in testing with odd multi-inlet flow situation
        DM.SelectLayerByAttribute(
            "out_fc_lyr", "NEW_SELECTION",
            '''"Lake_Connectivity_Class" = 'Headwater' AND "Lake_Connectivity_Permanent" IN ('Drainage', 'DrainageLk') '''
        )
        DM.CalculateField("out_fc_lyr", class_field_name, """'Headwater'""",
                          "PYTHON")

        # 3--Drainage to DrainageLk upgrade seen in testing when intermittent stream segments were used
        # erroneously instead of artificial paths
        DM.SelectLayerByAttribute(
            "out_fc_lyr", "NEW_SELECTION",
            '''"Lake_Connectivity_Class" = 'Drainage' AND "Lake_Connectivity_Permanent" = 'DrainageLk' '''
        )
        DM.CalculateField("out_fc_lyr", class_field_name, """'Drainage'""",
                          "PYTHON")
        DM.SelectLayerByAttribute("out_fc_lyr", "CLEAR_SELECTION")

        # Add change flag for users
        DM.AddField(temp_fc,
                    "Lake_Connectivity_Fluctuates",
                    "Text",
                    field_length="1")
        flag_codeblock = """def flag_calculate(arg1, arg2):
            if arg1 == arg2:
                return 'N'
            else:
                return 'Y'"""
        expression = 'flag_calculate(!Lake_Connectivity_Class!, !Lake_Connectivity_Permanent!)'
        DM.CalculateField(temp_fc, "Lake_Connectivity_Fluctuates", expression,
                          "PYTHON", flag_codeblock)

    # Project output once done with both. Switching CRS earlier causes trace problems.
    if not exclude_intermit_flowlines:
        DM.CopyFeatures(temp_fc, out_feature_class)
    else:
        DM.Project(temp_fc, out_feature_class, arcpy.SpatialReference(102039))

    # Clean up
    if not debug_mode:
        for item in this_tool_layers + this_tool_temp:
            if arcpy.Exists(item):
                DM.Delete(item)

    if not debug_mode:
        DM.Delete("trace1")
        DM.Delete("trace2")
    arcpy.AddMessage("{} classification is complete.".format(class_field_name))
Esempio n. 18
0
def process_zone(zone_fc, output, zone_name, zone_id_field, zone_name_field,
                 other_keep_fields, clip_hu8, lagosne_name):
    # dissolve fields by the field that zone_id is based on (the field that identifies a unique zone)
    dissolve_fields = [
        f for f in "{}, {}, {}".format(zone_id_field, zone_name_field,
                                       other_keep_fields).split(', ')
        if f != ''
    ]
    print("Dissolving...")
    dissolve1 = DM.Dissolve(zone_fc, 'dissolve1', dissolve_fields)

    # update name field to match our standard
    DM.AlterField(dissolve1, zone_name_field, 'name')

    # original area

    DM.AddField(dissolve1, 'originalarea', 'DOUBLE')
    DM.CalculateField(dissolve1, 'originalarea', '!shape.area@hectares!',
                      'PYTHON')

    #clip
    print("Clipping...")
    clip = AN.Clip(dissolve1, MASTER_CLIPPING_POLY, 'clip')
    if clip_hu8 == 'Y':
        final_clip = AN.Clip(clip, HU8_OUTPUT, 'final_clip')
    else:
        final_clip = clip

    print("Selecting...")
    # calc new area, orig area pct, compactness
    DM.AddField(final_clip, 'area_ha', 'DOUBLE')
    DM.AddField(final_clip, 'originalarea_pct', 'DOUBLE')
    DM.AddField(final_clip, 'compactness', 'DOUBLE')
    DM.JoinField(final_clip, zone_id_field, dissolve1, zone_id_field,
                 'originalarea_pct')

    uCursor_fields = [
        'area_ha', 'originalarea_pct', 'originalarea', 'compactness',
        'SHAPE@AREA', 'SHAPE@LENGTH'
    ]
    with arcpy.da.UpdateCursor(final_clip, uCursor_fields) as uCursor:
        for row in uCursor:
            area, orig_area_pct, orig_area, comp, shape_area, shape_length = row
            area = shape_area / 10000  # convert from m2 to hectares
            orig_area_pct = round(100 * area / orig_area, 2)
            comp = 4 * 3.14159 * shape_area / (shape_length**2)
            row = (area, orig_area_pct, orig_area, comp, shape_area,
                   shape_length)
            uCursor.updateRow(row)

    # if zones are present with <5% of original area and a compactness measure of <.2 (ranges from 0-1)
    # AND ALSO they are no bigger than 500 sq. km. (saves Chippewa County and a WWF), filter out
    # save eliminated polygons to temp database as a separate layer for inspection

    # Different processing for HU4 and HU8, so that they match the extent of HU8 more closely but still throw out tiny slivers
    # County also only eliminated if a tiny, tiny, tiny sliver (so: none should be eliminated)
    if zone_name not in ('hu4', 'hu12', 'county'):
        selected = AN.Select(
            final_clip, 'selected',
            "originalarea_pct >= 5 OR compactness >= .2 OR area_ha > 50000")
        not_selected = AN.Select(
            final_clip, '{}_not_selected'.format(output),
            "originalarea_pct < 5 AND compactness < .2 AND area_ha < 50000")

    else:
        selected = final_clip
    # eliminate small slivers, re-calc area fields, add perimeter and multipart flag
    # leaves the occasional errant sliver but some areas over 25 hectares are more valid so this is
    # CONSERVATIVE
    print("Trimming...")
    trimmed = DM.EliminatePolygonPart(selected,
                                      'trimmed',
                                      'AREA',
                                      '25 Hectares',
                                      part_option='ANY')

    # gather up a few calculations into one cursor because this is taking too long over the HU12 layer
    DM.AddField(trimmed, 'perimeter_m', 'DOUBLE')
    DM.AddField(trimmed, 'multipart', 'TEXT', field_length=1)
    uCursor_fields = [
        'area_ha', 'originalarea_pct', 'originalarea', 'perimeter_m',
        'multipart', 'SHAPE@'
    ]
    with arcpy.da.UpdateCursor(trimmed, uCursor_fields) as uCursor:
        for row in uCursor:
            area, orig_area_pct, orig_area, perim, multipart, shape = row
            area = shape.area / 10000  # convert to hectares from m2
            orig_area_pct = round(100 * area / orig_area, 2)
            perim = shape.length

            # multipart flag calc
            if shape.isMultipart:
                multipart = 'Y'
            else:
                multipart = 'N'
            row = (area, orig_area_pct, orig_area, perim, multipart, shape)
            uCursor.updateRow(row)

    # delete intermediate fields
    DM.DeleteField(trimmed, 'compactness')
    DM.DeleteField(trimmed, 'originalarea')

    print("Zone IDs....")
    # link to LAGOS-NE zone IDs
    DM.AddField(trimmed, 'zoneid', 'TEXT', field_length=40)
    trimmed_lyr = DM.MakeFeatureLayer(trimmed, 'trimmed_lyr')
    if lagosne_name:
        # join to the old master GDB path on the same master field and copy in the ids
        old_fc = os.path.join(LAGOSNE_GDB, lagosne_name)
        old_fc_lyr = DM.MakeFeatureLayer(old_fc, 'old_fc_lyr')
        if lagosne_name == 'STATE' or lagosne_name == 'COUNTY':
            DM.AddJoin(trimmed_lyr, zone_id_field, old_fc_lyr, 'FIPS')
        else:
            DM.AddJoin(trimmed_lyr, zone_id_field, old_fc_lyr,
                       zone_id_field)  # usually works because same source data

        # copy
        DM.CalculateField(trimmed_lyr, 'zoneid',
                          '!{}.ZoneID!.lower()'.format(lagosne_name), 'PYTHON')
        DM.RemoveJoin(trimmed_lyr)

    # generate new zone ids
    old_ids = [row[0] for row in arcpy.da.SearchCursor(trimmed, 'zoneid')]
    with arcpy.da.UpdateCursor(trimmed, 'zoneid') as cursor:
        counter = 1
        for row in cursor:
            if not row[
                    0]:  # if no existing ID borrowed from LAGOS-NE, assign a new one
                new_id = '{name}_{num}'.format(name=zone_name, num=counter)

                # ensures new ids don't re-use old numbers but fills in all positive numbers eventually
                while new_id in old_ids:
                    counter += 1
                    new_id = '{name}_{num}'.format(name=zone_name, num=counter)
                row[0] = new_id
                cursor.updateRow(row)
                counter += 1

    print("Edge flags...")
    # add flag fields
    DM.AddField(trimmed, 'onlandborder', 'TEXT', field_length=2)
    DM.AddField(trimmed, 'oncoast', 'TEXT', field_length=2)

    # identify border zones
    border_lyr = DM.MakeFeatureLayer(LAND_BORDER, 'border_lyr')
    DM.SelectLayerByLocation(trimmed_lyr, 'INTERSECT', border_lyr)
    DM.CalculateField(trimmed_lyr, 'onlandborder', "'Y'", 'PYTHON')
    DM.SelectLayerByAttribute(trimmed_lyr, 'SWITCH_SELECTION')
    DM.CalculateField(trimmed_lyr, 'onlandborder', "'N'", 'PYTHON')

    # identify coastal zones
    coastal_lyr = DM.MakeFeatureLayer(COASTLINE, 'coastal_lyr')
    DM.SelectLayerByLocation(trimmed_lyr, 'INTERSECT', coastal_lyr)
    DM.CalculateField(trimmed_lyr, 'oncoast', "'Y'", 'PYTHON')
    DM.SelectLayerByAttribute(trimmed_lyr, 'SWITCH_SELECTION')
    DM.CalculateField(trimmed_lyr, 'oncoast', "'N'", 'PYTHON')

    print("State assignment...")
    # State?
    DM.AddField(trimmed, "state", 'text', field_length='2')
    state_center = arcpy.SpatialJoin_analysis(
        trimmed,
        STATE_FC,
        'state_center',
        join_type='KEEP_COMMON',
        match_option='HAVE_THEIR_CENTER_IN')
    state_intersect = arcpy.SpatialJoin_analysis(trimmed,
                                                 STATE_FC,
                                                 'state_intersect',
                                                 match_option='INTERSECT')
    state_center_dict = {
        row[0]: row[1]
        for row in arcpy.da.SearchCursor(state_center, ['ZoneID', 'STUSPS'])
    }
    state_intersect_dict = {
        row[0]: row[1]
        for row in arcpy.da.SearchCursor(state_intersect, ['ZoneID', 'STUSPS'])
    }
    with arcpy.da.UpdateCursor(trimmed, ['ZoneID', 'state']) as cursor:
        for updateRow in cursor:
            keyValue = updateRow[0]
            if keyValue in state_center_dict:
                updateRow[1] = state_center_dict[keyValue]
            else:
                updateRow[1] = state_intersect_dict[keyValue]
            cursor.updateRow(updateRow)

    # glaciation status?
    # TODO as version 0.6

    # preface the names with the zones
    DM.DeleteField(trimmed, 'ORIG_FID')
    fields = [
        f.name for f in arcpy.ListFields(trimmed, '*')
        if f.type not in ('OID',
                          'Geometry') and not f.name.startswith('Shape_')
    ]
    for f in fields:
        new_fname = '{zn}_{orig}'.format(zn=zone_name, orig=f).lower()
        try:
            DM.AlterField(trimmed, f, new_fname, clear_field_alias='TRUE')
        # sick of debugging the required field message-I don't want to change required fields anyway
        except:
            pass

    DM.CopyFeatures(trimmed, output)

    # cleanup
    lyr_objects = [
        lyr_object for var_name, lyr_object in locals().items()
        if var_name.endswith('lyr')
    ]
    temp_fcs = arcpy.ListFeatureClasses('*')
    for l in lyr_objects + temp_fcs:
        DM.Delete(l)
def snap_points_to_mask_raster (in_file, mask, out_file, distance, workspace):
    
    if distance is None or len (distance) == 0:
        distance = "100 METERS"
    
    if arcpy.env.outputCoordinateSystem is None:
        arcpy.env.outputCoordinateSystem = mask
    print arcpy.env.outputCoordinateSystem.name

    if len(workspace):
        arcpy.env.workspace = workspace
    if arcpy.env.workspace is None or len(arcpy.env.workspace) == 0:
        arcpy.env.workspace = os.getcwd()

    arcpy.AddMessage ("workspace is %s" % arcpy.env.workspace)

    try:
        suffix = None
        wk = arcpy.env.workspace
        if not '.gdb' in wk:
            suffix = '.shp'
        poly_file = arcpy.CreateScratchName(None, suffix, 'POLYGON')
        arcpy.RasterToPolygon_conversion (mask, poly_file, 'NO_SIMPLIFY')
    except:
        raise

    arcpy.AddMessage ("poly_file is %s" % poly_file)

    #  handle layers and datasets
    desc = arcpy.Describe(in_file)
    in_file = desc.catalogPath

    #  add .shp extension if needed - clunky, but otherwise system fails below
    re_gdb = re.compile ('\.gdb$')
    re_shp = re.compile ('\.shp$')
    path = os.path.dirname(out_file)
    if len (path) == 0:
        path = arcpy.env.workspace
    if not re_gdb.search (path) and not re_shp.search (out_file):
        out_file += '.shp'

    arcpy.AddMessage ("Input point file is %s" % in_file)
    arcpy.AddMessage ("Output point file is %s" % out_file)

    arcmgt.CopyFeatures (in_file, out_file)

    try:
        snap_layer_name = 'get_layer_for_snapping'
        arcmgt.MakeFeatureLayer (out_file, snap_layer_name)
        arcmgt.SelectLayerByLocation (snap_layer_name, 'intersect', poly_file, '#', 'NEW_SELECTION')
        arcmgt.SelectLayerByAttribute(snap_layer_name, 'SWITCH_SELECTION')
        if arcmgt.GetCount(snap_layer_name) > 0:
            arcpy.Snap_edit (snap_layer_name, [[poly_file, "EDGE", distance]])
        else:
            arcpy.AddMessage ('No features selected, no snapping applied')
    except Exception as e:
        print arcpy.GetMessages()
        raise e

    arcmgt.Delete (snap_layer_name)
    arcmgt.Delete (poly_file)

    print arcpy.GetMessages()
    print "Completed"

    return
def get_path_residence_times (in_file, cost_rast, out_raster, t_diff_fld_name, workspace):
    
    if len (out_raster) == 0:
        arcpy.AddError ("Missing argument: out_rast")
        raise Exception
    if len (t_diff_fld_name) == 0:
        t_diff_fld_name = "T_DIFF_HRS"

    arcpy.env.overwriteOutput = True  #  This is underhanded.  It should be an argument.

    if arcpy.env.outputCoordinateSystem is None:
        arcpy.env.outputCoordinateSystem = cost_rast
    arcpy.AddMessage ("coordinate system is %s" % arcpy.env.outputCoordinateSystem.name)

    if len(workspace):
        arcpy.env.workspace = workspace
    if arcpy.env.workspace is None or len(arcpy.env.workspace) == 0:
        arcpy.env.workspace = os.getcwd()

    if '.gdb' in arcpy.env.workspace:
        arcpy.AddError (
            "Worskpace is a geodatabase.  " +
            "This brings too much pain for this script to work.\n" +
            "%s" % arcpy.env.workspace
        )
        raise WorkspaceIsGeodatabase


    r = Raster(cost_rast)
    
    if r.maximum == 0 and r.minimum == 0:
        arcpy.AddMessage ('Cost raster has only zero value.  Cannot calculate cost distances.')
        raise CostRasterIsZero

    size = r.height * r.width * 4
    if size > 2 * 1028 ** 3:
        import struct
        struct_size = struct.calcsize("P") * 8
        if struct_size == 32:
            size_in_gb = float (size) / (1028 ** 3)
            arcpy.AddMessage (
                'Cost raster exceeds 2 GiB in size (%s GiB).  This is too large for a 32 bit NumPy.' % size_in_gb
            )
            raise NumPyArrayExceedsSizeLimits

    if not check_points_are_in_cost_raster(in_file, cost_rast):
        arcpy.AddError ('One or more input points do not intersect the cost raster')
        raise PointNotOnRaster

    arcpy.env.snapRaster = cost_rast
    suffix = None
    wk = arcpy.env.workspace
    if not '.gdb' in wk:
        suffix = '.shp'


    ext = arcpy.env.extent
    if ext is None:
        arcpy.env.extent = r.extent

    arcpy.AddMessage ("Extent is %s" % arcpy.env.extent)

    arcpy.env.cellSize = r.meanCellWidth
    arcpy.AddMessage ("Cell size is %s" % arcpy.env.cellSize)
    cellsize_used = float (arcpy.env.cellSize)
    extent = arcpy.env.extent
    lower_left_coord = extent.lowerLeft
    
    arcpy.AddMessage ('Currently in directory: %s\n' % os.getcwd())
    arcpy.AddMessage ('Workspace is: %s' % arcpy.env.workspace)
    arcpy.AddMessage ("lower left is %s" % lower_left_coord)

    if arcpy.env.mask is None:
        arcpy.AddMessage ("Setting mask to %s" % cost_rast)
        arcpy.env.mask = cost_rast

    #  accumulated transits
    transit_array_accum = arcpy.RasterToNumPyArray (Raster(cost_rast) * 0)

    feat_layer = "feat_layer"
    arcmgt.MakeFeatureLayer(in_file, feat_layer)
    desc = arcpy.Describe (feat_layer)
    oid_fd_name = desc.OIDFieldName
    arcpy.AddMessage("oid_fd_name = %s" % oid_fd_name)

    #  variable name is redundant now??? - should all calls be to oid_fd_name?
    target_fld = oid_fd_name

    proc_layer = "process_layer"
    arcmgt.MakeFeatureLayer(in_file, proc_layer)
    rows = arcpy.SearchCursor(proc_layer)
    last_target = None

    for row_cur in rows:
        transit_time = row_cur.getValue (t_diff_fld_name)

        if last_target is None or transit_time == 0:
            message = 'Skipping %s = %s' % (oid_fd_name, row_cur.getValue(oid_fd_name))
            if transit_time == 0:
                message = message + "  Transit time is zero"
            arcpy.AddMessage(message)
            last_target = row_cur.getValue(target_fld)
            last_oid    = row_cur.getValue(oid_fd_name)
            continue

        arcpy.AddMessage ("Processing %s %i" % (oid_fd_name, row_cur.getValue(oid_fd_name)))

        arcmgt.SelectLayerByAttribute(
            feat_layer,
            "NEW_SELECTION",
            '%s = %s' % (target_fld, last_target)
        )
        backlink_rast  = arcpy.CreateScratchName("backlink")
        path_dist_rast = PathDistance(feat_layer, cost_rast, out_backlink_raster = backlink_rast)

        #  extract the distance from the last point
        shp = row_cur.shape
        centroid = shp.centroid
        (x, y) = (centroid.X, centroid.Y)
        result = arcmgt.GetCellValue(path_dist_rast, "%s %s" % (x, y), "1")
        res_val = result.getOutput(0)
        if res_val == "NoData":
            this_oid = row_cur.getValue(oid_fd_name)
            arcpy.AddMessage ("Got nodata for coordinate (%s, %s)" % (x, y))
            arcpy.AddMessage ("Is the path between features %s and %s wholly contained by the cost raster?" % (last_oid, this_oid))
            pras_name = "pth_%s_%s.tif" % (last_oid, this_oid)
            arcpy.AddMessage ("Attempting to save path raster as %s" % pras_name)
            try:
                path_dist_rast.save(pras_name)
            except Exception as e:
                arcpy.AddMessage (e)
            raise PathDistanceIsNoData
        try:
            path_distance = float (res_val)
        except:
            #  kludge around locale/radix issues 
            if res_val.find(","):
                res_val = res_val.replace(",", ".")
                path_distance = float (res_val)
            else:
                raise
        arcpy.AddMessage("Path distance is %s\nTransit time is %s" % (path_distance, transit_time))

        #  get a raster of the path from origin to destination
        condition = '%s in (%i, %i)' % (oid_fd_name, last_oid, row_cur.getValue(oid_fd_name))
        dest_layer = "dest_layer" + str (last_oid)
        arcmgt.MakeFeatureLayer(in_file, dest_layer, where_clause = condition)

        count = arcmgt.GetCount(dest_layer)
        count = int (count.getOutput(0))
        if count == 0:
            raise NoFeatures("No features selected.  Possible coordinate system issues.\n" + condition)

        try:
            path_cost_rast = CostPath(dest_layer, path_dist_rast, backlink_rast)
            #path_dist_rast.save("xx_pr" + str (last_oid))
        except Exception as e:
            raise

        try:
            pcr_mask       = 1 - IsNull (path_cost_rast)
            #pcr_mask.save ("xx_pcr_mask" + str (last_oid))
            dist_masked    = path_dist_rast * pcr_mask
            path_array     = arcpy.RasterToNumPyArray(dist_masked, nodata_to_value = -9999)
            path_array_idx = numpy.where(path_array > 0)
            transit_array  = numpy.zeros_like(path_array)  #  past experience suggests we might need to use a different approach to guarantee we get zeroes
        except:
            raise

        path_sum = None
        arcpy.AddMessage ("processing %i cells of path raster" % (len(path_array_idx[0])))

        if path_distance == 0 or not len(path_array_idx[0]):
            path_sum = 1 #  stayed in the same cell
            mask_array = arcpy.RasterToNumPyArray(pcr_mask, nodata_to_value = -9999)
            mask_array_idx = numpy.where(mask_array == 1)
            i = mask_array_idx[0][0]
            j = mask_array_idx[1][0]
            transit_array[i][j] = path_sum
        else:
            row_count = len (path_array) 
            col_count = len (path_array[0])

            for idx in range (len(path_array_idx[0])):
                i = path_array_idx[0][idx]
                j = path_array_idx[1][idx]
                val = path_array[i][j]
                nbrs = []
                for k in (i-1, i, i+1):
                    if k < 0 or k >= row_count:
                        continue
                    checkrow = path_array[k]
                    for l in (j-1, j, j+1):
                        if l < 0 or l >= col_count:
                            continue
                        if k == i and j == l:
                            continue  #  don't check self
                        checkval = checkrow[l]
                        #  negs are nodata, and this way we
                        #  don't need to care what that value is
                        if checkval >= 0:
                            diff = val - checkval
                            if diff > 0:
                                nbrs.append(diff)
                                #arcpy.AddMessage ("Check and diff vals are %s %s" % (checkval, diff))
                diff = min (nbrs)
                #arcpy.AddMessage ("Diff  val is %s" % diff)
                transit_array[i][j] = diff

            path_sum = path_array.max()  #  could use path_distance?
            #arcpy.AddMessage ("path_array.max is %s" % path_sum)

        #  sometimes we get a zero path_sum even when the path_distance is non-zero
        if path_sum == 0:
            path_sum = 1

        #  Increment the cumulative transit array by the fraction of the
        #  transit time spent in each cell.
        #  Use path_sum because it corrects for cases where we stayed in the same cell.
        transit_array_accum = transit_array_accum + ((transit_array / path_sum) * transit_time)

        #xx = arcpy.NumPyArrayToRaster (transit_array, lower_left_coord, cellsize_used, cellsize_used, 0)
        #tmpname = "xx_t_arr_" + str (last_oid)
        #print "Saving transit array to %s" % tmpname
        #xx.save (tmpname)


        try:
            arcmgt.Delete(backlink_rast)
            arcmgt.Delete(dest_layer)
        except Exception as e:
            arcpy.AddMessage (e)

        #  getting off-by-one errors when using the environment, so use this directly
        ext = path_cost_rast.extent
        lower_left_coord = ext.lowerLeft

        last_target = row_cur.getValue(target_fld)
        last_oid    = row_cur.getValue(oid_fd_name)

    #  need to use env settings to get it to be the correct size
    try:
        arcpy.AddMessage ("lower left is %s" % lower_left_coord)
        xx = arcpy.NumPyArrayToRaster (transit_array_accum, lower_left_coord, cellsize_used, cellsize_used, 0)
        print "Saving to %s" % out_raster
        xx.save (out_raster)
    except:
        raise


    print "Completed"

    return ()
def georeference_lakes(
    lake_points_fc,
    out_fc,
    lake_id_field,
    lake_name_field,
    lake_county_field='',
    state='',
    master_gdb=r'C:\Users\smithn78\Dropbox\CL_HUB_GEO\Lake_Georeferencing\Masters_for_georef.gdb'
):
    """
    Evaluate water quality sampling point locations and either assign the point to a lake polygon or flag the
    point for manual review.
    :param lake_points_fc:
    :param out_fc:
    :param lake_id_field:
    :param lake_name_field:
    :param lake_county_field:
    :param state:
    :param master_gdb: Location of master geodatabase used for linking
    :return:
    """
    master_lakes_fc = os.path.join(master_gdb, MASTER_LAKES_FC)
    master_lakes_lines = os.path.join(master_gdb, MASTER_LAKES_LINES)
    master_streams_fc = os.path.join(master_gdb, MASTER_STREAMS_FC)
    master_xwalk = os.path.join(master_gdb, MASTER_XWALK)

    # setup
    arcpy.AddMessage("Joining...")
    state = state.upper()
    if state not in STATES:
        raise ValueError('Use the 2-letter state code abbreviation')
    arcpy.env.workspace = 'in_memory'
    out_short = os.path.splitext(os.path.basename(out_fc))[0]
    join1 = '{}_1'.format(out_short)
    join2 = '{}_2'.format(out_short)
    join3 = '{}_3'.format(out_short)
    join3_select = join3 + '_select'
    join4 = '{}_4'.format(out_short)
    join5 = '{}_5'.format(out_short)
    joinx = '{}_x'.format(out_short)

    county_name_results = arcpy.ListFields(
        lake_points_fc, '{}*'.format(lake_county_field))[0].name
    if lake_county_field and not lake_county_field in county_name_results:
        print('{} field does not exist in dataset.'.format(lake_county_field))
        raise Exception

    point_fields = [f.name for f in arcpy.ListFields(lake_points_fc)]

    # update the lake id to a text field if not already
    lake_id_field_type = arcpy.ListFields(lake_points_fc,
                                          lake_id_field)[0].type
    if lake_id_field_type != 'String':
        temp_id_field = '{}_t'.format(lake_id_field)
        arcpy.AddField_management(lake_points_fc, '{}_t'.format(lake_id_field),
                                  'TEXT', '255')
        expr = '!{}!'.format(lake_id_field)
        arcpy.CalculateField_management(lake_points_fc, temp_id_field, expr,
                                        'PYTHON')
        arcpy.DeleteField_management(lake_points_fc, lake_id_field)
        arcpy.AlterField_management(lake_points_fc,
                                    temp_id_field,
                                    new_field_name=lake_id_field)

    # Try to make some spatial connections and fulfill some logic to assign a link
    join1 = AN.SpatialJoin(lake_points_fc,
                           master_lakes_fc,
                           join1,
                           'JOIN_ONE_TO_MANY',
                           'KEEP_ALL',
                           match_option='INTERSECT')
    join2 = AN.SpatialJoin(join1,
                           master_streams_fc,
                           join2,
                           'JOIN_ONE_TO_MANY',
                           'KEEP_ALL',
                           match_option='INTERSECT')
    join3 = AN.SpatialJoin(join2,
                           master_lakes_fc,
                           join3,
                           'JOIN_ONE_TO_MANY',
                           'KEEP_ALL',
                           match_option='INTERSECT',
                           search_radius='10 meters')
    join4 = AN.SpatialJoin(join3,
                           master_lakes_fc,
                           join4,
                           'JOIN_ONE_TO_MANY',
                           'KEEP_ALL',
                           match_option='INTERSECT',
                           search_radius='100 meters')

    # setup for editing lake assignment values
    DM.AddField(join4, 'Auto_Comment', 'TEXT', field_length=100)
    DM.AddField(join4, 'Manual_Review', 'SHORT')
    DM.AddField(join4, 'Shared_Words', 'TEXT', field_length=100)
    DM.AddField(join4, 'Linked_lagoslakeid', 'LONG')
    DM.AddField(join4, 'GEO_Discovered_Name', 'TEXT', field_length=255)
    DM.AddField(join4, 'Duplicate_Candidate', 'TEXT', field_length=1)
    DM.AddField(join4, 'Is_Legacy_Link', 'TEXT', field_length=1)

    update_fields = [
        lake_id_field,
        lake_name_field,
        MASTER_LAKE_ID,
        MASTER_GNIS_NAME,  # 0m match
        'PERMANENT_IDENTIFIER_1',
        'GNIS_NAME_1',  # stream match
        MASTER_LAKE_ID + '_1',
        MASTER_GNIS_NAME + '_12',  # 10m match
        MASTER_LAKE_ID + '_12',
        MASTER_GNIS_NAME + '_12_13',  # 100m match
        'Auto_Comment',
        'Manual_Review',
        'Shared_Words',
        'Linked_lagoslakeid'
    ]

    # use a cursor to go through each point and evaluate its assignment
    cursor = arcpy.da.UpdateCursor(join4, update_fields)
    arcpy.AddMessage("Calculating link status...")
    for row in cursor:
        id, name, mid_0, mname_0, stream_id, streamname_0, mid_10, mname_10, mid_100, mname_100, comment, review, words, lagosid = row
        if mid_0 is not None:  # if the point is directly in a polygon
            if name and mname_0:
                words = lagosGIS.list_shared_words(name,
                                                   mname_0,
                                                   exclude_lake_words=False)
            comment = 'Exact location link'
            lagosid = mid_0
            review = -1
        elif mid_0 is None and mid_10 is not None:  # if the point is only within 10m of a lake
            if name and mname_10:
                words = lagosGIS.list_shared_words(name,
                                                   mname_10,
                                                   exclude_lake_words=False)
            if words:
                comment = 'Linked by common name and location'
                lagosid = mid_10
                review = -1
            else:
                comment = 'Linked by common location'
                lagosid = mid_10
                review = 1
        elif mid_0 is None and mid_10 is None:
            if stream_id is not None:  # if there is a stream match
                comment = 'Not linked because represented as river in NHD'
                review = 2
            else:
                if mid_100 is not None:  # if the point is only within 100m of lake(s)
                    if name and mname_100:
                        words = lagosGIS.list_shared_words(
                            name, mname_100, exclude_lake_words=True)
                # TODO: Frequency check
                    if words:
                        comment = 'Linked by common name and location'
                        lagosid = mid_100
                        review = 1
                    else:
                        comment = 'Linked by common location'
                        lagosid = mid_100
                        review = 2
        cursor.updateRow(
            (id, name, mid_0, mname_0, stream_id, streamname_0, mid_10,
             mname_10, mid_100, mname_100, comment, review, words, lagosid))

    # # So I haven't been able to get the county logic to work and it hasn't been that important yet, ignore for now
    # Select down to a minimum set because we're about to join on county, which will create lots of duplicate matches
    # Then join calculated results back to full set
    # if lake_county_field:
    #     join5 = AN.Select(join4, join5, 'Manual_Review IS NULL')
    #     lakes_state = AN.Select(MASTER_LAKES_FC, 'lakes_state', "{0} = '{1}'".format(MASTER_STATE_NAME, state))
    #     lakes_state_lyr = DM.MakeFeatureLayer(lakes_state, 'lakes_state_lyr')
    #     join5_lyr = DM.MakeFeatureLayer(join5, 'join5_lyr')
    #     DM.AddJoin(join5_lyr, lake_county_field, lakes_state_lyr, MASTER_COUNTY_NAME)
    #     join5_with_county = DM.CopyFeatures(join5_lyr, 'join5_with_cty')
    #     j5 = 'DEDUPED_CA_SWAMP_data_linked_5.'
    #
    #     county_update_fields = [j5 + lake_id_field, j5 + lake_name_field, j5 + lake_county_field,
    #                             'lakes_state.' + MASTER_LAKE_ID, 'lakes_state.' + MASTER_GNIS_NAME, 'lakes_state.' + MASTER_COUNTY_NAME,
    #                             j5 + 'Auto_Comment', j5 + 'Manual_Review', j5 + 'Shared_Words',
    #                             j5 + 'Linked_lagoslakeid']
    #     with arcpy.da.UpdateCursor(join5_lyr, county_update_fields) as cursor:
    #         for row in cursor:
    #             id, name, county, mid_cty, mname_cty, mcounty, comment, review, words, lagosid = row
    #             if county is not None and mcounty is not None:
    #                 if name and mname_cty:
    #                     words = lagosGIS.list_shared_words(name, mname_cty, exclude_lake_words=True)
    #                 if words:
    #                     comment = 'PRELIMINARY: Linked by common name and location'
    #                     lagosid = mid_cty
    #                     review = 2
    #             cursor.updateRow((id, name, county, mid_cty, mname_cty, mcounty, comment, review, words, lagosid))
    #     DM.RemoveJoin(join5_lyr)
    #     join5_with_county = DM.CopyFeatures(join5_lyr, 'join5_with_county')
    #
    #     # join5 = DM.JoinField(join5, lake_county_field, lakes_state, MASTER_COUNTY_NAME,
    #                          fields = [MASTER_COUNTY_NAME, MASTER_LAKE_ID, MASTER_GNIS_NAME])
    #
    #     # This is a long way to make a join
    #     join_dict = {}
    #     with arcpy.da.SearchCursor(lakes_state, [MASTER_COUNTY_NAME, MASTER_LAKE_ID, MASTER_GNIS_NAME]) as cursor:
    #         for row in cursor:
    #             join_value, val1, val2 = row
    #             join_dict[join_value] = [val1, val2]
    #
    #     arcpy.AddField_management(join5, MASTER_LAKE_ID + 'cntyj', 'LONG')
    #     arcpy.AddField_management(join5, MASTER_GNIS_NAME + 'cntyj', 'TEXT', 255)
    #
    #     with arcpy.da.SearchCursor(join5, [lake_county_field, MASTER_LAKE_ID + 'cntyj', MASTER_GNIS_NAME + 'cntyj']) as cursor:
    #         for row in cursor:
    #             key_value = row[0]
    #             words = lagosGIS.list_shared_words()
    #             if join_dict.has_key(key_value):
    #                 row[1] = join_dict[key_value][0]
    #                 row[2] = join_dict[key_value][1]
    #             else:
    #                 row[1] = None
    #                 row[2] = None
    #             cursor.updateRow(row)
    #
    #
    #     county_update_fields = [lake_id_field, lake_name_field, lake_county_field,
    #                 MASTER_LAKE_ID + '_12_13_14', MASTER_GNIS_NAME + '_12_13',  MASTER_COUNTY_NAME + '_12_13', # county
    #                  'Auto_Comment', 'Manual_Review', 'Shared_Words',
    #                  'Linked_lagoslakeid']
    #     cursor = arcpy.da.UpdateCursor(join5, county_update_fields)
    #     for row in cursor:
    #         id, name, county, lagosid_cty, lagosname_cty, mcounty, comment, mreview, words, linked_lagosid = row
    #         if mcounty is not None:
    #             words = lagosGIS.list_shared_words()
    # else:
    #     join5 = join4
    #

    if state in LAGOSNE_STATES:
        DM.JoinField(join4, lake_id_field, master_xwalk, 'lagosne_legacyid',
                     ['lagoslakeid', 'lagos_lakename', 'lagos_state'])
        update_fields = [
            lake_id_field,
            lake_name_field,
            MASTER_LAKE_ID + '_12_13',
            'lagos_lakename',
            'lagos_state',  # crosswalk match
            'Auto_Comment',
            'Manual_Review',
            'Shared_Words',
            'Linked_lagoslakeid',
            'Is_Legacy_Link'
        ]

        with arcpy.da.UpdateCursor(join4, update_fields) as uCursor:
            for uRow in uCursor:
                id, name, mid_x, mname_x, state_x, comment, review, words, lagosid, legacy_flag = uRow
                # fields are populated already from links above. Revise only if legacy links
                if mid_x is not None:
                    if state == state_x:
                        legacy_flag = 'Y'  # set to Y regardless of whether using legacy comment if state matches
                    if comment != 'Exact location link':
                        review = 1
                        if state != state_x:
                            review = 3  # downgrade if states mismatch--border lakes OK, random common IDs NOT. Check.
                        legacy_flag = 'Y'
                        comment = 'LAGOS-NE legacy link'  # only comment non-exact location matches
                        lagosid = mid_x
                        if name and mname_x:
                            words = lagosGIS.list_shared_words(
                                name,
                                mname_x)  # update words only if legacy comment

                new_row = id, name, mid_x, mname_x, state_x, comment, review, words, lagosid, legacy_flag
                uCursor.updateRow(new_row)

        # # Undo the next line if you ever bring this chunk back.
    join5 = join4

    # then re-code the no matches as a 3 and copy comments to the editable field
    # compress the joined lake ids into one field
    # having two fields lets us keep track of how many of the auto matches are bad
    if arcpy.ListFields(join5, 'Comment'):
        comment_field_name = 'Comment_LAGOS'
    else:
        comment_field_name = 'Comment'

    DM.AddField(join5, comment_field_name, 'TEXT', field_length=100)
    with arcpy.da.UpdateCursor(
            join5, ['Manual_Review', 'Auto_Comment', 'Comment']) as cursor:
        for flag, ac, comment in cursor:
            if flag is None:
                flag = 3
                ac = 'Not linked'
            comment = ac
            cursor.updateRow((flag, ac, comment))

    # Re-code points more than 100m into the polygon of the lake as no need to check
    DM.MakeFeatureLayer(join5, 'join5_lyr')
    DM.MakeFeatureLayer(master_lakes_lines, 'lake_lines_lyr')
    DM.SelectLayerByAttribute('join5_lyr', 'NEW_SELECTION',
                              "Auto_Comment = 'Exact location link'")
    DM.SelectLayerByLocation('join5_lyr', 'INTERSECT', 'lake_lines_lyr',
                             '100 meters', 'SUBSET_SELECTION', 'INVERT')
    DM.CalculateField('join5_lyr', 'Manual_Review', '-2', 'PYTHON')
    DM.Delete('join5_lyr', 'lake_lines_lyr')

    # Then make sure to only keep the fields necessary when you write to an output
    copy_fields = point_fields + [
        'Linked_lagoslakeid', 'Auto_Comment', 'Manual_Review',
        'Is_Legacy_Link', 'Shared_Words', 'Comment', 'Duplicate_Candidate',
        'GEO_Discovered_Name'
    ]
    copy_fields.remove('Shape')
    copy_fields.remove('OBJECTID')

    lagosGIS.select_fields(join5, out_fc, copy_fields)

    DM.AssignDomainToField(out_fc, 'Comment', 'Comment')

    DM.AddField(out_fc, 'Total_points_in_lake_poly', 'Short')

    # Remove any duplicates. (These originate from the join3/join4 transition because a point can be both
    # within 10m and 100m of lakes, this code takes the closest lake as true for my current sanity.)
    # Or, in other words, this is a hack solution.
    out_fc_fields = [
        f.name for f in arcpy.ListFields(out_fc) if f.name != 'OBJECTID'
    ]
    DM.DeleteIdentical(out_fc, out_fc_fields)

    # Get the join_count for each limno lake ID
    # De-dupe anything resulting from limno ID duplicates first before counting
    id_pairs = list(
        set(
            arcpy.da.SearchCursor(out_fc,
                                  [lake_id_field, 'Linked_lagoslakeid'])))
    # THEN pull out LAGOS id. Any duplicate now are only due to multiple distinct points within lake
    lagos_ids = [ids[1] for ids in id_pairs]
    sample_ids = [ids[0] for ids in id_pairs]
    lagos_lake_counts = Counter(lagos_ids)
    linked_multiple_lake_counts = Counter(sample_ids)

    # Get the count of points in the polygon
    with arcpy.da.UpdateCursor(
            out_fc,
        ['Linked_lagoslakeid', 'Total_points_in_lake_poly']) as cursor:
        for lagos_id, join_count in cursor:
            join_count = lagos_lake_counts[lagos_id]
            cursor.updateRow((lagos_id, join_count))

    # Mark any samples linked to more than one lake so that the analyst can select the correct lake in the
    # manual process
    with arcpy.da.UpdateCursor(
            out_fc, [lake_id_field, 'Duplicate_Candidate']) as cursor:
        for sample_id, duplicate_flag in cursor:
            duplicate_count = linked_multiple_lake_counts[sample_id]
            if duplicate_count > 1:
                duplicate_flag = "Y"
            else:
                duplicate_flag = "N"
            cursor.updateRow((sample_id, duplicate_flag))

    # clean up
    DM.AddField(out_fc, 'Note', 'TEXT', field_length=140)
    DM.Delete('in_memory')
    arcpy.AddMessage('Completed.')
Esempio n. 22
0
    DM.SelectLayerByAttribute(wine, "NEW_SELECTION", "Type = 'Winery'")
    ANALYSIS.SpatialJoin(drought, wine, intermediate_output, "JOIN_ONE_TO_ONE",
                         "KEEP_ALL")
    try:
        DM.DeleteField(intermediate_output, "NAME")
    except:
        pass
    final_wine_drought = "Wine_Drought_Summary"
    DM.MakeFeatureLayer(intermediate_output, final_wine_drought)

    lf = DM.SaveToLayerFile(
        final_wine_drought,
        OS.path.join(working_dir, '{}.lyr'.format(final_wine_drought)))
    DM.ApplySymbologyFromLayer(lf, lyr_template)

    pw = "test"  #GETPASS.getpass("Enter AGOL password:"******"Drought_and_Wine"

    agol = AGOLHandler("analytics", pw, service_name)

    publish_service(agol, service_name, mxd_path, lf[0])
    TIME.sleep(5)
    fs_url = agol.findItemURL('Feature Service')
    TIME.sleep(35)
    gp_url, jsondata = enrich(fs_url + '/0',
                              '{}_Enriched'.format(service_name), agol.token)
    check_job_status(gp_url, jsondata, agol.token)

    DM.Delete(OS.path.join(working_dir, shp_name))
    DM.Delete(OS.path.join(working_dir, lf[0]))
Esempio n. 23
0
def lake_from_to(nhd_subregion_gdb, output_table):
    arcpy.env.workspace = 'in_memory'
    waterbody0 = os.path.join(nhd_subregion_gdb, 'NHDWaterbody')
    network = os.path.join(nhd_subregion_gdb, 'Hydrography', 'HYDRO_NET')
    junctions0 = os.path.join(nhd_subregion_gdb, 'HYDRO_NET_Junctions')

    # use layers for selections. We will only work with lakes over 1 hectare for this tool.
    waterbody = DM.MakeFeatureLayer(waterbody0,
                                    'waterbody',
                                    where_clause=LAGOS_LAKE_FILTER)
    num_wbs = int(arcpy.GetCount_management(waterbody).getOutput(0))
    junctions = DM.MakeFeatureLayer(junctions0, 'junctions')

    DM.SelectLayerByLocation(junctions, 'INTERSECT', waterbody, '1 Meters',
                             'NEW_SELECTION')
    junctions_1ha = DM.MakeFeatureLayer(junctions, 'junctions_1ha')

    # insert results into output table
    DM.CreateTable(os.path.dirname(output_table),
                   os.path.basename(output_table))
    DM.AddField(output_table, 'FROM_PERMANENT_ID', 'TEXT', field_length=40)
    DM.AddField(output_table, 'TO_PERMANENT_ID', 'TEXT', field_length=40)

    # create a dictionary to hold results in memory
    results = []

    counter = 0
    progress = .01
    arcpy.AddMessage("Starting network tracing...")
    with arcpy.da.SearchCursor(waterbody, 'Permanent_Identifier') as cursor:
        for row in cursor:
            # set up a progress printer
            counter += 1
            if counter >= float(num_wbs) * progress:
                progress += .01
                arcpy.AddMessage("{}% complete...".format(
                    round(progress * 100), 1))

            # select this lake
            id = row[0]
            where_clause = """"{0}" = '{1}'""".format('Permanent_Identifier',
                                                      id)
            this_waterbody = DM.MakeFeatureLayer(waterbody, 'this_waterbody',
                                                 where_clause)

            # select junctions overlapping this lake. only the downstream one matters, rest have no effect
            DM.SelectLayerByLocation(junctions_1ha, 'INTERSECT',
                                     this_waterbody, '1 Meters')
            count_junctions = int(
                arcpy.GetCount_management(junctions_1ha).getOutput(0))
            if count_junctions == 0:
                # add a row with no "TO" lake to the results
                results.append({'FROM': id, 'TO': None})
            else:
                # copy with selection on
                this_junctions = DM.MakeFeatureLayer(junctions_1ha,
                                                     'this_junctions')
                DM.TraceGeometricNetwork(network, 'downstream', this_junctions,
                                         'TRACE_DOWNSTREAM')
                # select lakes that intersect the downstream network with a tolerance of 1 meters
                DM.SelectLayerByLocation(waterbody, 'INTERSECT',
                                         'downstream/NHDFlowline', '1 Meters',
                                         'NEW_SELECTION')
                # remove this lake
                DM.SelectLayerByAttribute(waterbody, 'REMOVE_FROM_SELECTION',
                                          where_clause)
                # get the count, if it's 0 then there should be no table entry or something?
                count_waterbody = int(
                    arcpy.GetCount_management(waterbody).getOutput(0))
                # copy those into the table that you're storing stuff in
                if count_waterbody == 0:
                    # add a row with no "TO" lake to the results
                    results.append({'FROM': id, 'TO': None})
                else:
                    # for each ID, how am I getting those
                    to_ids = [
                        row[0] for row in arcpy.da.SearchCursor(
                            waterbody, 'Permanent_Identifier')
                    ]
                    for to_id in to_ids:
                        result = {'FROM': id, 'TO': to_id}
                        results.append(result)

                # delete all the intermediates
            DM.SelectLayerByAttribute(waterbody, 'CLEAR_SELECTION')
            for item in [this_waterbody, this_junctions, 'downstream']:
                DM.Delete(item)

    # insert the results in the table
    insert_cursor = arcpy.da.InsertCursor(
        output_table, ['FROM_PERMANENT_ID', 'TO_PERMANENT_ID'])
    for result in results:
        insert_cursor.insertRow([result['FROM'], result['TO']])

    # delete everything
    for item in [waterbody, junctions, junctions_1ha, 'in_memory']:
        DM.Delete(item)
    arcpy.AddMessage("Completed.")
Esempio n. 24
0
    def createOutputShapes(self, outputFC):
        #### Shorthand Attributes ####
        ssdoBase = self.ssdoBase
        ssdoCand = self.ssdoCand

        #### Validate Output Workspace ####
        ARCPY.overwriteOutput = True
        ERROR.checkOutputPath(outputFC)

        #### Create Output Feature Class ####
        ARCPY.SetProgressor("default", ARCPY.GetIDMessage(84003))
        outPath, outName = OS.path.split(outputFC)
        tempFC = UTILS.returnScratchName("TempSS_FC", fileType = "FEATURECLASS",
                                         scratchWS = outPath)
        outTempPath, outTempName = OS.path.split(tempFC)

        try:
            DM.CreateFeatureclass(outTempPath, outTempName, ssdoBase.shapeType, 
                                  "", ssdoBase.mFlag, 
                                  ssdoBase.zFlag, ssdoBase.spatialRefString)
        except:
            ARCPY.AddIDMessage("ERROR", 210, outputFC)
            raise SystemExit()

        #### Add Null Value Flag ####
        outIsShapeFile = UTILS.isShapeFile(outputFC)
        setNullable = outIsShapeFile == False

        #### Make Feature Layer and Select Result OIDs/Shapes ####
        featureCount = ssdoBase.numObs + ssdoCand.numObs
        ARCPY.SetProgressor("step", ARCPY.GetIDMessage(84003), 0,
                                                 featureCount, 1)

        #### Add Shape/ID Field Names ####
        matchID, candID = outputIDFieldNames
        outFieldNames = ["SHAPE@"] + outputIDFieldNames
        inFieldNames = ["OID@", "SHAPE@"]
        UTILS.addEmptyField(tempFC, matchID, "LONG", nullable = True)
        UTILS.addEmptyField(tempFC, candID, "LONG", nullable = True)

        #### Add Append Fields ####
        lenAppend = len(self.appendFields) 
        appendIsDate = []
        in2OutFieldNames = {}
        if lenAppend:
            for fieldName in self.appendFields:
                fcField = ssdoCand.allFields[fieldName]
                fieldType = UTILS.convertType[fcField.type]
                fieldOutName = UTILS.validQFieldName(fcField, outPath)
                in2OutFieldNames[fieldName] = fieldOutName
                if fieldType == "DATE":
                    appendIsDate.append(fieldName)
                UTILS.addEmptyField(tempFC, fieldOutName, fieldType,
                                    alias = fcField.alias)
                outFieldNames.append(fieldOutName)

        #### Add Analysis Fields ####
        for fieldName in self.fieldNames:
            fcField = ssdoBase.allFields[fieldName]
            fieldType = UTILS.convertType[fcField.type]
            fieldOutName = UTILS.validQFieldName(fcField, outPath)
            in2OutFieldNames[fieldName] = fieldOutName
            UTILS.addEmptyField(tempFC, fieldOutName, fieldType,
                                alias = fcField.alias)
            outFieldNames.append(fieldOutName)

        dataFieldNames = matchFieldInfo[self.similarType]
        dataFieldInfo = outputFieldInfo[self.matchMethod]
        baseValues = []
        for fieldName in dataFieldNames:
            outAlias, outType, baseValue = dataFieldInfo[fieldName]
            UTILS.addEmptyField(tempFC, fieldName, outType, 
                                alias = outAlias, 
                                nullable = setNullable) 
            outFieldNames.append(fieldName)
            baseValues.append(baseValue)

        #### Get Insert Cursor ####
        baseRows = DA.SearchCursor(ssdoBase.inputFC, inFieldNames)
        candRows = DA.SearchCursor(ssdoCand.inputFC, inFieldNames)
        rows = DA.InsertCursor(tempFC, outFieldNames)

        #### Set Base Data ####
        useShapeNull = outIsShapeFile
        if useShapeNull:
            nullIntValue = UTILS.shpFileNull['LONG']
        else:
            nullIntValue = None

        #### Set Base Null For Append ####
        appendNull = {}
        for fieldName in self.appendFields:
            if fieldName not in ssdoBase.fields:
                if useShapeNull:
                    outType = ssdoCand.fields[fieldName].type
                    outNullValue = UTILS.shpFileNull[outType]
                else:
                    outNullValue = None
                appendNull[fieldName] = outNullValue

        #### Add Base Data ####
        for masterID, shp in baseRows:
            orderID = ssdoBase.master2Order[masterID]

            #### Insert Shape, Match_ID and NULL (Cand_ID) ####
            rowRes = [shp, masterID, nullIntValue]

            #### Add Append Fields ####
            for fieldName in self.appendFields:
                if fieldName in appendNull:
                    rowRes.append(appendNull[fieldName])
                else:
                    value = ssdoBase.fields[fieldName].data[orderID]
                    if fieldName in appendIsDate:
                        value = TUTILS.iso2DateTime(value)
                    rowRes.append(value)

            #### Add Analysis Fields ####
            for fieldName in self.fieldNames:
                rowRes.append(ssdoBase.fields[fieldName].data[orderID])

            #### Add Null Base Values ####
            rowRes += baseValues

            rows.insertRow(rowRes)
            ARCPY.SetProgressorPosition()
        del baseRows
        
        #### First Add Similar Results ####
        for masterID, shp in candRows:
            orderID = ssdoCand.master2Order[masterID]
            indTop = NUM.where(self.topIDs == orderID)[0]
            indBot = NUM.where(self.botIDs == orderID)[0]
            if self.similarType in ['MOST_SIMILAR', 'BOTH'] and len(indTop):
                ind = indTop[0]
                #### Insert Shape, NULL (Match_ID) and Cand_ID ####
                rowRes = [shp, nullIntValue, masterID]
                
                #### Add Append Fields ####
                for fieldName in self.appendFields:
                    rowRes.append(ssdoCand.fields[fieldName].data[orderID])

                #### Add Analysis Fields ####
                for fieldName in self.fieldNames:
                    rowRes.append(ssdoCand.fields[fieldName].data[orderID])

                #### Add Results ####
                rank = ind + 1
                ss = self.totalDist[orderID]

                if self.similarType == 'BOTH':
                    rowRes += [rank, nullIntValue, ss, rank]
                else:
                    rowRes += [rank, ss, rank]

                rows.insertRow(rowRes)
            if self.similarType in ['LEAST_SIMILAR', 'BOTH'] and len(indBot):
                ind = indBot[0]
                #### Insert Shape, NULL (Match_ID) and Cand_ID ####
                rowRes = [shp, nullIntValue, masterID]

                #### Add Append Fields ####
                for fieldName in self.appendFields:
                    rowRes.append(ssdoCand.fields[fieldName].data[orderID])

                #### Add Analysis Fields ####
                for fieldName in self.fieldNames:
                    rowRes.append(ssdoCand.fields[fieldName].data[orderID])

                #### Add Results ####
                rank = ind + 1
                labRank = rank * -1
                ss = self.totalDist[orderID]

                if self.similarType == 'BOTH':
                    rowRes += [nullIntValue, rank, ss, labRank]
                else:
                    rowRes += [rank, ss, labRank]

                rows.insertRow(rowRes)

            ARCPY.SetProgressorPosition()
        del candRows
        del rows

        #### Do Final Sort ####
        if self.matchMethod == 'ATTRIBUTE_PROFILES':
            if self.similarType == 'MOST_SIMILAR':
                sortString = "SIMINDEX DESCENDING;SIMRANK DESCENDING"
            else:
                sortString = "SIMINDEX DESCENDING"
        else:
            if self.similarType == 'MOST_SIMILAR':
                sortString = "SIMINDEX ASCENDING;SIMRANK ASCENDING"
            else:
                sortString = "SIMINDEX ASCENDING"
        DM.Sort(tempFC, outputFC, sortString, "UR")

        #### Clean Up ####
        DM.Delete(tempFC)

        #### Symbology ####
        params = ARCPY.gp.GetParameterInfo()
        try:
            renderType = UTILS.renderType[self.ssdoBase.shapeType.upper()]
            renderKey = (self.similarType, renderType)
            renderLayerFile = outputRenderInfo[renderKey]
            templateDir = OS.path.dirname(OS.path.dirname(SYS.argv[0]))
            fullRLF = OS.path.join(templateDir, "Templates",
                                   "Layers", renderLayerFile)
            params[2].Symbology = fullRLF
        except:
            ARCPY.AddIDMessage("WARNING", 973)