示例#1
0
def add_snapped_attribute(conflict_points, brat_output):
    """ Adds attribute to points indicating whether point was snapped to network, and therefore used in the validation
    : param conflict_points: Shapefile of snapped known points of beaver dam-human conflict
    : param brat_output: Path to network with BRAT constraints/opportunities results
    : return:
    """
    out_pts = os.path.join(os.path.dirname(conflict_points),
                           'ConflictPoints_Snapped.shp')
    arcpy.SpatialJoin_analysis(conflict_points,
                               brat_output,
                               out_pts,
                               join_operation='JOIN_ONE_TO_ONE',
                               join_type='KEEP_ALL',
                               match_option='INTERSECT')
    arcpy.AddField_management(out_pts, 'SNAPPED', 'TEXT')
    with arcpy.da.UpdateCursor(out_pts, ['Join_Count', 'SNAPPED']) as cursor:
        for row in cursor:
            if row[0] > 0:
                row[1] = 'Snapped to network'
            else:
                row[1] = 'Not snapped to network'
            cursor.updateRow(row)
    # clean up points fields
    conflict_fields = [f.name for f in arcpy.ListFields(conflict_points)]
    conflict_fields.append('SNAPPED')
    out_fields = [f.name for f in arcpy.ListFields(out_pts)]
    for field in out_fields:
        if field not in conflict_fields:
            arcpy.DeleteField_management(out_pts, field)
    # only keep edited points shapefile and rename as original filename
    arcpy.Delete_management(conflict_points)
    arcpy.Rename_management(out_pts, conflict_points)
示例#2
0
    def crear_limite_manzanas(self):
        arcpy.env.overwriteOutput = True
        path_manzana = path.join(self.path_trabajo, 'tb_manzana_procesar')
        path_eje_vial = path.join(self.path_trabajo, 'tb_eje_vial_procesar')
        path_zona = path.join(self.path_trabajo, 'tb_zona_procesar')

        fragmentos = arcpy.FeatureToPolygon_management(
            [path_eje_vial, path_zona, path_manzana],
            path.join(self.path_trabajo,
                      'fragmentos'), "10 Meters", "NO_ATTRIBUTES", "")
        x = arcpy.SpatialJoin_analysis(
            fragmentos, path_manzana,
            path.join(self.path_trabajo, 'fragmentos_manzanas_x'),
            'JOIN_ONE_TO_ONE', 'KEEP_ALL', '', 'HAVE_THEIR_CENTER_IN')
        arcpy.SelectLayerByAttribute_management(x, "NEW_SELECTION",
                                                " Join_Count=1")

        dissolveFields = [
            'CODDPTO', 'CODPROV', 'CODDIST', 'CODZONA', 'SUFZONA', 'CODMZNA',
            'UBIGEO	'
            'CODCCPP', 'DEPARTAMENTO', 'PROVINCIA', 'DISTRITO', 'NOMCCCPP'
            'SUF_MZNA', 'PK_MANZANA', 'ZONA', 'MANZANA'
        ]

        fragmentos_manzanas = arcpy.Dissolve_management(
            x, path.join(self.path_trabajo, 'fragmentos_manzanas'),
            dissolveFields, "", "SINGLE_PART", "DISSOLVE_LINES")
示例#3
0
def prep_islands():
    # Dissolve the layer on the STRONG field -- there is now only 1 polyline per island
    arcpy.Dissolve_management("islands_proj", "islands_dissolved", "STRONG")
    # Remove the islands where STRONG is 0, as 0 stands in for a catch all category
    arcpy.SelectLayerByAttribute_management("islands_dissolved", "NEW_SELECTION", "STRONG > 0")
    # Save to a new feature class and do some clean up
    arcpy.CopyFeatures_management("islands_dissolved", "islands_gt_0")
    arcpy.SelectLayerByAttribute_management("islands_dissolved", "CLEAR_SELECTION")
    # Keep only the islands greater than 1000 meters in length, as the really tiny islands do not seem worth our attention.
    arcpy.SelectLayerByAttribute_management("islands_gt_0", "NEW_SELECTION", "Shape_Length >= 1000")
    # Save to a new feature class and do some clean up
    arcpy.CopyFeatures_management("islands_gt_0", "islands_gte_1000m")
    arcpy.SelectLayerByAttribute_management("islands_gt_0", "CLEAR_SELECTION")

    # Select only the island polylines that intersects with 4 PA counties, with a spatial join
    arcpy.SpatialJoin_analysis("islands_gte_1000m", "extent_4_counties", "islands",
                               "JOIN_ONE_TO_ONE", "KEEP_COMMON", match_option="INTERSECT")

    # Add new field "Orig_Length". We will use it later.
    arcpy.AddField_management("islands", "Orig_Length", "DOUBLE")
    arcpy.CalculateField_management("islands", "Orig_Length", "!Shape_Length!", "PYTHON_9.3")

    # Create a 100 meter buffer arounds the islands
    arcpy.Buffer_analysis("islands", "buffered_islands", "100 Meters", "FULL", "ROUND")

    # Cleanup
    remove_intermediary_layers(["islands_proj","islands_dissolved",
                                "islands_gt_0", "islands_gte_1000m"])
def get_stream_order(scratch_gdb, stream_burn_dem, seg_network_a, DEM_orig,
                     FlowDir, net_raster):

    orderMethod = "STRAHLER"

    print("running Stream order")
    outStreamOrder = StreamOrder(net_raster, FlowDir, orderMethod)

    strord_path = scratch_gdb + "/streamord_out"
    outStreamOrder.save(strord_path)

    print("fixing dodgy first order streams")
    str_ras = Raster(strord_path)
    Cor_Str_Ord_b = Con(str_ras == 1, 1, str_ras - 1)

    Cor_Str_Ord = scratch_gdb + "/Cor_Str_Ord"
    Cor_Str_Ord_b.save(Cor_Str_Ord)

    max_val = arcpy.GetRasterProperties_management(Cor_Str_Ord, "MAXIMUM")
    int_max_val = int(max_val.getOutput(0)) + 1
    val_range = list(range(2, int_max_val))

    print("expand values to remove 1st order errors")
    str_ord_exp = Expand(Cor_Str_Ord, 1, val_range)

    str_ord_exp_path = (scratch_gdb + "/str_ord_exp")
    str_ord_exp.save(str_ord_exp_path)

    print("convert Raster to Polygon")
    str_ord_exp_poly = scratch_gdb + "/st_or_ex_poly"
    arcpy.RasterToPolygon_conversion(str_ord_exp_path, str_ord_exp_poly,
                                     "NO_SIMPLIFY", "Value")

    net_fields = [f.name for f in arcpy.ListFields(seg_network_a)]
    if "Str_order" in net_fields:
        arcpy.DeleteField_management(seg_network_a, "Str_order")
    if "gridcode" in net_fields:
        arcpy.DeleteField_management(seg_network_a, "gridcode")
    del net_fields

    print("join network and StrOrd Polygon fields")
    seg_network_b = scratch_gdb + "/seg_network_b"

    arcpy.SpatialJoin_analysis(seg_network_a, str_ord_exp_poly, seg_network_b,
                               "JOIN_ONE_TO_ONE", "KEEP_ALL", "",
                               "HAVE_THEIR_CENTER_IN")

    arcpy.AddField_management(seg_network_b, "Str_order", "SHORT")

    with arcpy.da.UpdateCursor(seg_network_b,
                               ["Str_order", "gridcode"]) as cursor:
        for row in cursor:
            row[0] = row[1]
            cursor.updateRow(row)
    del row
    del cursor

    arcpy.DeleteField_management(seg_network_b, "gridcode")

    return seg_network_b
示例#5
0
def find_trail_island_intersections():
    # Create the field mapping object that will be used in the spatial join
    field_mappings = arcpy.FieldMappings()
    # Populate the field mapping object with the fields from both feature classes of interest
    field_mappings.addTable("trails")
    field_mappings.addTable("islands_with_score")
    #Set up merge rules
    # Orig_length -- we will sum up the length of all intersecting islands for each trail
    set_up_merge_rules("Orig_length","Sum", field_mappings)
    # STRONG (i.e., island ID) -- we will count the number of all intersecting islands for each trail
    set_up_merge_rules("STRONG", "Count", field_mappings)
    # CII_Score_Overall -- we will compute the CII score average among all intersecting islands for each trail
    set_up_merge_rules("CII_Score_Overall", "Mean", field_mappings)

    # Do the spatial join to find all the islands that intersect with each trail
    arcpy.SpatialJoin_analysis("trails", "islands_with_score", "trails_intersecting",
                               "JOIN_ONE_TO_ONE", "KEEP_ALL", field_mappings, "INTERSECT", search_radius="50 Meters")
    # Rename fields
    arcpy.AlterField_management("trails_intersecting", "Orig_length", "Length_of_All_Islands", "Length_of_All_Islands")
    arcpy.AlterField_management("trails_intersecting", "STRONG", "Num_of_Islands", "Num_of_Islands")
    arcpy.AlterField_management("trails_intersecting", "CII_Score_Overall", "Trail_CII_Score", "Trail_CII_Score")
    # Delete an unnecessary field
    drop_fields = ["TARGET_FID"]
    arcpy.DeleteField_management("trails_intersecting", drop_fields)

    # Clean up
    remove_intermediary_layers([])
    turn_off_layers(["trails_intersecting"])
示例#6
0
def main():

    #  import required modules and extensions
    import arcpy

    #  environment settings
    arcpy.env.workspace = 'in_memory'  # set workspace to temporary workspace
    arcpy.env.overwriteOutput = 'TRUE'

    # make a temp copy of input flowlines
    flowlines = arcpy.CopyFeatures_management(flowline_path,
                                              'in_memory/flowlines')

    # --find gaps in flowline network--
    # need this fix to resolve issues where flowlines were omitted in nhd area and waterbody polygons

    # create points at start and end of each flowlines
    line_pts = arcpy.FeatureVerticesToPoints_management(
        flowlines, 'in_memory/line_pts', 'BOTH_ENDS')

    # join start/end points with flowlines to get intersecting flowline count
    # here we want to isolate points that aren't connected to another line that indicate gaps in network
    line_pts_join = arcpy.SpatialJoin_analysis(line_pts, flowlines,
                                               'in_memory/line_pts_join',
                                               'JOIN_ONE_TO_ONE', 'KEEP_ALL',
                                               '', 'INTERSECT')

    # remove points that intersect > 1 line feature (these are not on gaps)
    with arcpy.da.UpdateCursor(line_pts_join, ['Join_Count']) as cursor:
        for row in cursor:
            if row[0] > 1:
                cursor.deleteRow()

    # copy gap points to output path
    arcpy.CopyFeatures_management(line_pts_join, outpath)
def spatialJoin(target_feature, source_feature, in_field, out_field, match_option, stats, output):
    fieldmappings = arcpy.FieldMappings()
    fieldmappings.addTable(target_feature)
    fieldmappings.addTable(source_feature)

    # Remove unnecessary fields
    # We'll ultimately use length and ID so we keep it here
    #keepers = [in_field, "Id"]
    keepers = ["Id"] + in_field     # list ==> easier way of keeping all the necessary columns
    for field in fieldmappings.fields:
        if field.name not in keepers:
             fieldmappings.removeFieldMap(fieldmappings.findFieldMapIndex(field.name))

    zonal_field_stats = fieldmappings.findFieldMapIndex(in_field[0])    # get the field of interest from the field list
    fieldmap = fieldmappings.getFieldMap(zonal_field_stats)
    field = fieldmap.outputField
    field.name = out_field
    field.aliasName = out_field
    fieldmap.outputField = field
    fieldmap.mergeRule = stats
    fieldmappings.replaceFieldMap(zonal_field_stats, fieldmap)

    # Now joining. 10 Feet is my assumption of tolerance based on my method
    return arcpy.SpatialJoin_analysis(target_features=target_feature, join_features=source_feature,\
                                    out_feature_class=output, join_operation="JOIN_ONE_TO_ONE",\
                                    join_type="KEEP_ALL", field_mapping=fieldmappings,\
                                    match_option=match_option)#, "10 Feet") # using INTERSECT may extract unnecessary segments
示例#8
0
    def initialize_find_rest(f, network, mode, direction, num_fac):
        # Initialize network analysis
        # Create a new closest facility analysis layer.
        arcpy.AddMessage(" ... initializing closest facility analysis")
        closest_fac_lyr_obj = arcpy.na.MakeClosestFacilityAnalysisLayer(
            network,
            "Closest_Facility",
            mode,
            direction,
            number_of_facilities_to_find=num_fac).getOutput(0)
        # Sublayer names
        sublayer_names = arcpy.na.GetNAClassNames(closest_fac_lyr_obj)
        cf_fac_lyr_name = sublayer_names["Facilities"]
        cf_incidents_lyr_name = sublayer_names["Incidents"]
        cf_routes_lyr_name = sublayer_names["CFRoutes"]

        # Load facilities
        arcpy.SelectLayerByAttribute_management(f, "NEW_SELECTION")
        arcpy.na.AddLocations(closest_fac_lyr_obj, cf_fac_lyr_name, f)

        fac_join = os.path.join(arcpy.env.workspace, "fac_join")
        arcpy.SpatialJoin_analysis(cf_fac_lyr_name,
                                   f,
                                   fac_join,
                                   match_option="CLOSEST")

        return closest_fac_lyr_obj, fac_join
示例#9
0
def generate_trail_subsets_per_county():
    # Use a spatial join to add the name of the county for each trail
    arcpy.SpatialJoin_analysis("trails_intersecting_gte_2", "boundaries_4_PA_counties", "trails_intersect_gte2_counties",
                                "JOIN_ONE_TO_ONE", "KEEP_all", match_option="INTERSECT")
    # Generate one feature class per county
    for county in county_list:
        expr = "CO_NAME = '" + county + "'"
        print(expr)
        arcpy.SelectLayerByAttribute_management("trails_intersect_gte2_counties",
                                                "NEW_SELECTION", expr)
        arcpy.CopyFeatures_management("trails_intersect_gte2_counties", "trails_intersect_gte_2_" + county)
        arcpy.SelectLayerByAttribute_management("trails_intersect_gte2_counties", "CLEAR_SELECTION")
        symbolize_vectors("trails_intersect_gte_2_" + county, lyr_file_param = "trails_intersect_gte_2")

        # Add a field where we put a normalized overall score so that each county
        # has a max value that is a true 20
        arcpy.AddField_management("trails_intersect_gte_2_" + county, "Norm_Overall_Score_Per_County", "Double")
        max_overall_score = get_max("trails_intersect_gte_2_" + county,"Overall_Score")
        if county == "Delaware":
            max_overall_score = 14.749317
        print("trails_intersect_gte_2_" + county)
        print(max_overall_score)
        overall_score_norm_expr = "(float(!Overall_Score!) / " + str(max_overall_score) + ")*20"
        print(overall_score_norm_expr)
        arcpy.CalculateField_management("trails_intersect_gte_2_" + county, "Norm_Overall_Score_Per_County", overall_score_norm_expr, "PYTHON_9.3")
示例#10
0
def SpatJoin_1to1_Closest(Out_Field, Dist_Field, Join_Fields_list, fc_target, fc_join, out_fc, search_distance):

    # Create a new fieldmappings object including field maps for all columns in fc_target
    fieldmappings = arcpy.FieldMappings()
    fieldmappings.addTable(fc_target)

    # remove Join_Count and TARGET_FID from field mappings
    list = [x.name for x in fieldmappings.fields]
    if u'Join_Count' in list:
        fieldmappings.removeFieldMap(list.index(u'Join_Count'))
        list.remove(u'Join_Count')
    if u'TARGET_FID' in list:
        fieldmappings.removeFieldMap(list.index(u'TARGET_FID'))

    # Create field map for the new column that gets added by spatial join.
    FieldMap = arcpy.FieldMap()
    # Set input fields from fc_join
    for x in Join_Fields_list:
        FieldMap.addInputField(fc_join, x)
    # Edit output field properties
    field = FieldMap.outputField #get field with current properties
    if field.type == u'String':
        field.length = 3000
    field.name = Out_Field
    field.aliasName = Out_Field
    FieldMap.outputField = field #overwrite field with editted properties

    # Add field map to field mappings.
    fieldmappings.addFieldMap(FieldMap)

    # Run the Spatial Join tool.
    arcpy.SpatialJoin_analysis(fc_target, fc_join, out_fc,"JOIN_ONE_TO_ONE","KEEP_ALL",fieldmappings,"CLOSEST",search_distance,Dist_Field)
示例#11
0
def spatial_join(intersect_fc_master, in_location, out_location, spe_info_dict,
                 final_fields_index, previous_suffix, suffix):
    # Check if out location was already created
    if not arcpy.Exists(out_location):
        path, gdb_file = os.path.split(out_location)
        create_gdb(path, gdb_file, out_location)
    # Set workspace location
    arcpy.env.workspace = in_location
    fc_list = arcpy.ListFeatureClasses()
    # loop through all files and run intersect
    for fc in fc_list:
        print fc
        out_name = fc.replace(previous_suffix, suffix)
        out_feature = out_location + os.sep + out_name
        in_features = in_location + os.sep + fc

        arcpy.Delete_management('infc')
        arcpy.Delete_management('intersect')
        arcpy.MakeFeatureLayer_management(in_features, "infc")
        arcpy.MakeFeatureLayer_management(intersect_fc_master, "intersect")
        try:
            if not arcpy.Exists(out_feature):
                arcpy.SpatialJoin_analysis("infc", "intersect", out_feature,
                                           'JOIN_ONE_TO_MANY')
                print 'Spatial Join CompFile {0}'.format(fc)
                updateFilesloop(out_feature, DissolveFields, spe_info_dict,
                                final_fields_index)
            else:
                continue

        except Exception as error:
            print(error.args[0])
            arcpy.Delete_management(out_feature)
 def get_near_task2(self):
     inraster_path = os.path.normcase(
         "D:/Environment Factors/Ozone aot layer")
     inmask_path = os.path.normcase(
         os.path.join(self.state_path, self.state_name + " parks"))
     output_path = os.path.normcase(
         os.path.join(self.state_path, "near_aot_aprsep"))
     if os.path.exists(output_path) is False:
         os.mkdir(output_path)
     inraster_file_list = glob.glob((os.path.join(inraster_path, "*.shp")))
     inmask_file_list = glob.glob((os.path.join(inmask_path, "*.shp")))
     for inmask_file in inmask_file_list:
         inmask_file_name = os.path.split(inmask_file)[-1][:-4]  # park name
         output_dir = os.path.join(output_path, inmask_file_name)
         if os.path.exists(output_dir) is False:
             os.mkdir(output_dir)
         for inraster_file in inraster_file_list:
             inraster_file_name = os.path.split(inraster_file)[-1]
             year_list = range(2000, 2014)
             inraster_file_year = [
                 year for year in year_list
                 if inraster_file_name.find(str(year)) >= 0
             ][0]
             output_file = os.path.join(
                 output_dir, "near_" + str(inraster_file_year) + ".shp")
             try:
                 arcpy.SpatialJoin_analysis(inmask_file, inraster_file,
                                            output_file, "#", "#", "#",
                                            "CLOSEST")
             except:
                 os.rmdir(output_dir)
                 print("%s fails" % output_file)
                 break
             print("%s has been produced" % output_file)
def countyinfo(elementGDB, counties):
    '''function that assigns county name to all elements and creates a new local layer for all elements'''

    for input, output in zip(input_features, elementShapefiles):
        target_features = os.path.join(elementGDB, input)
        element_features = os.path.join(env.workspace, output)

        fieldmappings = arcpy.FieldMappings()
        fieldmappings.addTable(target_features)
        fieldmappings.addTable(counties)

        # removing fields that are not needed - all fields in keepFields list will be kept
        keepFields = [
            "OID", "COUNTY_NAM", "refcode", "created_by", "created_on",
            "dm_stat", "dm_stat_comm", "last_up_by", "last_up_on",
            "element_type", "created_by", "elem_name", "id_prob",
            "id_prob_comm", "specimen_taken", "specimen_count",
            "specimen_desc", "curatorial_meth", "specimen_repo",
            "voucher_photo"
        ]
        for field in fieldmappings.fields:
            if field.name not in keepFields:
                fieldmappings.removeFieldMap(
                    fieldmappings.findFieldMapIndex(field.name))

        # run the spatial join tool
        arcpy.SpatialJoin_analysis(target_features, counties, element_features,
                                   "#", "#", fieldmappings)
示例#14
0
def spatialjoinmean(intar,injoin,infieldname,outfieldname,jointype):
    tempData = arcpy.env.scratchGDB + os.path.sep+"output"
    targetFeatures = intar
    joinFeatures = injoin
  
    # Create a new fieldmappings and add the two input feature classes.
    fieldmappings = arcpy.FieldMappings()
    fieldmappings.addTable(targetFeatures)
    fieldmappings.addTable(joinFeatures)
 
    FieldIndex = fieldmappings.findFieldMapIndex(infieldname)
    fieldmap = fieldmappings.getFieldMap(FieldIndex)
 
    # Get the output field's properties as a field object
    field = fieldmap.outputField
 
    # Rename the field and pass the updated field object back into the field map
    field.name = outfieldname
    fieldmap.outputField = field
 
    # Set the merge rule to mean and then replace the old fieldmap in the mappings object
    # with the updated one
    fieldmap.mergeRule = "mean"
    fieldmappings.replaceFieldMap(FieldIndex, fieldmap)
 
    # Remove all output fields from the field mappings, except fields "Street_Class", "Street_Name", & "Distance"
    for field in fieldmappings.fields:
        if field.name not in ["FID",outfieldname,"NEAR_DIST"]:
            fieldmappings.removeFieldMap(fieldmappings.findFieldMapIndex(field.name))
 
    #Run the Spatial Join tool, using the defaults for the join operation and join type
    arcpy.SpatialJoin_analysis(targetFeatures, joinFeatures, tempData, "#", "#", fieldmappings,jointype)
    arcpy.DeleteField_management(intar, outfieldname)
    arcpy.JoinField_management(intar, "FID", tempData, "TARGET_FID", [outfieldname])
    arcpy.Delete_management(tempData)
def statistical_spatial_join(target_feature,
                             join_features,
                             out_feature_class,
                             prepended_field_name="",
                             join_operation="JOIN_ONE_TO_ONE",
                             join_type=True,
                             match_option="INTERSECT",
                             search_radius=None,
                             merge_rule_dict={}):
    """This function will join features to a target feature class using merge fields that are chosen based on the
     chosen summary statistics fields from the join feature class while keeping all the fields in the target."""
    try:
        arcpy.env.overwriteOutput = True
        # Start Analysis
        san.arc_print("Generating fieldmapping...")
        f_map = san.generate_statistical_fieldmap(target_feature,
                                                  join_features,
                                                  prepended_field_name,
                                                  merge_rule_dict)
        san.arc_print("Conducting spatial join...")
        arcpy.SpatialJoin_analysis(target_features=target_feature,
                                   join_features=join_features,
                                   out_feature_class=out_feature_class,
                                   join_operation=join_operation,
                                   join_type=join_type,
                                   match_option=match_option,
                                   search_radius=search_radius,
                                   field_mapping=f_map)
        san.arc_print("Script Completed Successfully.", True)
    except arcpy.ExecuteError:
        san.arc_print(arcpy.GetMessages(2))
    except Exception as e:
        san.arc_print(e.args[0])
示例#16
0
def main():
    arcpy.env.workspace = r"E:\Data Collection\ProcessedSurvey\SurveyTrees.gdb"
    path = r"E:\Data Collection\ProcessedSurvey\SurveyTrees.gdb"
    addr = 'WORTaxPar'  #We'll have to append all the parcels together for this
    keep = [i for i in arcpy.ListFeatureClasses() if i[-2:] != '_A']
    ftc = [f for f in arcpy.ListFeatureClasses() if f not in keep]
    for x in ftc:
        ftcPath = os.path.join(arcpy.env.workspace, x)
        addrOut = x + "A"
        joinOut = os.path.join(path, addrOut)
        arcpy.SpatialJoin_analysis(
            ftcPath, addr, joinOut, "JOIN_ONE_TO_ONE", "KEEP_ALL",
            """Species "Species" true true false 255 Text 0 0 ,First,#,E:/Data Collection/ProcessedSurvey/SurveyTrees.gdb/"""
            """Template,Species,-1,-1;DBH "DBH" true true false 4 Long 0 0 ,First,#,E:/Data Collection/"""
            """ProcessedSurvey/SurveyTrees.gdb/Template,DBH,-1,-1;Survey_Date "Survey_Date" true true false 8 Date 0 0 ,First,"""
            """#,E:/Data Collection/ProcessedSurvey/SurveyTrees.gdb/Template,Survey_Date,-1,-1;ALB_Area """
            """"ALB_Area" true true false 12 Text 0 0 ,First,#,E:/Data Collection/ProcessedSurvey/"""
            """SurveyTrees.gdb/Template,ALB_Area,-1,-1;ALB_Zone "ALB_Zone" true true false 5 Text 0 0 ,First,#,E:/"""
            """Data Collection/ProcessedSurvey/SurveyTrees.gdb/Template,ALB_Zone,-1,-1;ALB_Unit "ALB_Unit" true true false 5 Text 0 0 ,First,"""
            """#,E:/Data Collection/ProcessedSurvey/SurveyTrees.gdb/Template,ALB_Unit,-1,-1;"""
            """A_Z_U "A_Z_U" true true false 20 Text 0 0 ,First,#,E:/Data Collection/ProcessedSurvey/SurveyTrees.gdb/"""
            """Template,A_Z_U,-1,-1;LOC_ID "LOC_ID" true false false 18 Text 0 0 ,First,#,E:/Data Collection/"""
            """ProcessedSurvey/SurveyTrees.gdb/WORTaxPar,LOC_ID,-1,-1;SITE_ADDR "SITE_ADDR" true true false 255 Text 0 0 ,First,#,E:/Data """
            """Collection/ProcessedSurvey/SurveyTrees.gdb/WORTaxPar,SITE_ADDR,-1,-1;ADDR_NUM "ADDR_NUM" true true false 255 Text 0 0 ,First,#,E:/Data """
            """Collection/ProcessedSurvey/SurveyTrees.gdb/WORTaxPar,ADDR_NUM,-1,-1;FULL_STR "FULL_STR" true true false 255 Text 0 0 ,First,#,E:/Data """
            """Collection/ProcessedSurvey/SurveyTrees.gdb/WORTaxPar,FULL_STR,-1,-1;CITY "CITY" true true false 255 Text 0 0 ,First,#,E:/Data """
            """Collection/ProcessedSurvey/SurveyTrees.gdb/WORTaxPar,CITY,-1,-1""",
            "INTERSECT", "#", "#")
    return
示例#17
0
def dissolveGetFzh(xzkPath, dissolvepath, outputxzkpath):
    """融合得到新分组号"""

    arcpy.MakeFeatureLayer_management(xzkPath, "xzkPath")

    arcpy.SelectLayerByAttribute_management("xzkPath", 'NEW_SELECTION',
                                            "shpvary = 'Y'")

    arcpy.Dissolve_management("xzkPath", dissolvepath, 'shpvary', '',
                              'SINGLE_PART', 'UNSPLIT_LINES')

    arcpyDeal.ensureFields(dissolvepath, ['relfzh'], "LONG")

    tmpcur = arcpy.da.UpdateCursor(dissolvepath, ['relfzh'])

    num = 5000000

    for row in tmpcur:

        row[0] = num

        num = num + 1

        tmpcur.updateRow(row)

    arcpy.SpatialJoin_analysis(xzkPath,
                               dissolvepath,
                               outputxzkpath,
                               join_operation="JOIN_ONE_TO_ONE",
                               join_type="KEEP_ALL",
                               match_option="WITHIN")
示例#18
0
    def JoinSpatialTableToLayer(self, definition):
        """ Uses Spatial join to find points within polygons.

        This module is likely more flexible than that, but that's what I
        have currently tested.
        """
        #print('In JoinSpatialTableToLayer.  definition: {}'.format(definition))
        layer_name = definition['layer_name']
        layer_path = definition['layer_path']
        table_name = definition['table_name']

        try:
            layer_style = definition['layer_style']
        except KeyError:
            layer_style = False

        out_name = layer_name + '__' + table_name

        out_feature_class = self.gdb_path + '\\' + out_name
        join_features = self.gdb_path + '\\' + table_name

        arcpy.SpatialJoin_analysis(target_features=layer_path,
                                   join_features=join_features,
                                   out_feature_class=out_feature_class)

        new_layer = layer.Layer({
            'path': out_feature_class,
            'name': out_name,
            'style': layer_style,
            'definition_query': 'Join_Count > 0'
        })

        self.new_layers.append(new_layer)
def addSA2():
    shapeCursor = 'AccidentLocations.shp'
    workspace = "./"
    sa2 = 'SA2_2016_AUST.shp'
    output = 'AccidentLocation_with_SA2.shp'
    
    # This deletes the AccidentLocations_with_SA2.shp if it exists already.
    arcpy.Delete_management(workspace + '/' + output)

    # This defines the fieldmappings of the new shapefile.
    fieldmappings = arcpy.FieldMappings()
    fieldmappings.addTable(workspace + shapeCursor)
    fieldmappings.addTable(workspace + sa2)

    # This defines the fields that we need.
    to_keep = ['A_ID', 'Vehicles', 'Day', 'Severe', 'SA2_NAME16']

    # This loop deletes the fields that we do not need.
    for field in fieldmappings.fields:
        if field.name not in to_keep:
            fieldmappings.removeFieldMap(fieldmappings.findFieldMapIndex(field.name))

    # This spatial joins the two shapefiles and adds the SA2 information into the file.
    arcpy.AddField_management(shapeCursor, "SA2", "TEXT")
    arcpy.SpatialJoin_analysis(workspace + shapeCursor, workspace + sa2, workspace + output, "#", "#", fieldmappings)
示例#20
0
def JoinDrainage(streams, linesplits):
    StreamWorkspace = env.workspace  #r"R:\_GIS_Data\Site_Search_Project\Data\StreamsByHUC8.gdb"
    SplitWorkspace = r"C:\Users\graham.farley\Downloads\Data\Empty\General.gdb"
    OutWorkspace = StreamWorkspace  #r"R:\_GIS_Data\Site_Search_Project\Data\StreamsWithDrainageByHUC8.gdb"

    targetFeatures = os.path.join(SplitWorkspace, linesplits)
    joinFeatures = os.path.join(StreamWorkspace, streams)

    output_name = streams + "_Joined"
    outfc = os.path.join(OutWorkspace, output_name)

    fieldmappings = arcpy.FieldMappings()

    fieldmappings.addTable(joinFeatures)

    fieldmappings.addTable(targetFeatures)

    drainage = fieldmappings.findFieldMapIndex("Drainage_mi2")
    fieldmap = fieldmappings.getFieldMap(drainage)

    fieldmap.mergeRule = "Max"
    fieldmappings.replaceFieldMap(drainage, fieldmap)

    arcpy.SpatialJoin_analysis(targetFeatures, joinFeatures, outfc, "#", "#",
                               fieldmappings, "SHARE_A_LINE_SEGMENT_WITH")
    return outfc
def main():
    arcpy.env.workspace = r"E:\Data Collection\ProcessedSurvey\SurveyTrees.gdb"
    path = r"E:\Data Collection\ProcessedSurvey\SurveyTrees.gdb"
    keep = ['UnitBoundaries_112913','WORTaxPar', 'Template']
    azu = 'UnitBoundaries_112913'
    ftc = [f for f in arcpy.ListFeatureClasses() if f not in keep]
    for x in ftc:
        ftcPath = os.path.join(arcpy.env.workspace,x)
        azuOut = x + "_A"
        joinOut = os.path.join(path, azuOut)
        arcpy.SpatialJoin_analysis(ftcPath,azu,joinOut,
                                   "JOIN_ONE_TO_ONE","KEEP_ALL",
                               """Species "Species" true true false 255 Text 0 0 ,First,"""
                               """#,E:/Data Collection/ProcessedSurvey/SurveyTrees.gdb/Template,Species,-1,-1;DBH "DBH" true"""
                               """true false 2 Short 0 0 ,First,#,E:/Data Collection/ProcessedSurvey/SurveyTrees.gdb/Template"""
                               """,DBH,-1,-1;Survey_Date "Survey_Date" true true false 8 Date 0 0 ,First,#,E:/Data"""
                               """Collection/ProcessedSurvey/SurveyTrees.gdb/Template,Survey_Date,-1,-1;ALB_Area"""
                               """ "ALB_Area" true true false 12 Text 0 0 ,First,#,E:/Data Collection/"""
                               """ProcessedSurvey/SurveyTrees.gdb/UnitBoundaries_112913,ALB_Area,-1,-1;ALB_Zone"""
                               """ "ALB_Zone" true true false 5 Text 0 0 ,First,#,E:/Data Collection/ProcessedSurvey/"""
                               """SurveyTrees.gdb/UnitBoundaries_112913,ALB_Zone,-1,-1;ALB_Unit "ALB_Unit" true true false"""
                               """ 5 Text 0 0 ,First,#,E:/Data Collection/ProcessedSurvey/SurveyTrees.gdb/UnitBoundaries_112913,"""
                               """ALB_Unit,-1,-1;A_Z_U "A_Z_U" true true false 20 Text 0 0 ,First,#,E:/Data """
                               """Collection/ProcessedSurvey/SurveyTrees.gdb/UnitBoundaries_112913,A_Z_U,-1,-"""
                               """1""","INTERSECT","#","#")
    return
def sbdd_Overlay():
    arcpy.AddMessage("     Performing overlay")
    myFCs = ["myBuffer", "myOverlay"]
    for FC in myFCs:
        if arcpy.Exists(FC):
            arcpy.Delete_management(FC)
    #buffer first
    #buffer the addres/road feature class 500'
    #create the random pt feature layer
    #arcpy.AddMessage("theFD:" + theFD)
    #arcpy.AddMessage("theFc:" + theFC)
    arcpy.Buffer_analysis(theFD + theFC, "myBuffer", "500 FEET")
    myLyr = theST + theFC

    #arcpy.AddMessage("myLyr:" + myLyr)
    theFIPS = sbdd_ReturnFIPS()
    #myQry = "state2 = '" + theFIPS + "'"
    myQry = "BLOCKID LIKE '" + theFIPS + "%'"
    #arcpy.AddMessage("theRndPt:" + theRndPT)
    #arcpy.AddMessage("myQry:" + myQry)
    arcpy.MakeFeatureLayer_management(theRndPT, myLyr, myQry)
    #create the overlay
    arcpy.AddMessage("      about to perform spatial join")
    #join the random points in that state to the buffer
    arcpy.SpatialJoin_analysis(myLyr, "myBuffer", "myOverlay", \
                               "JOIN_ONE_TO_MANY", "KEEP_COMMON")
    if arcpy.Exists(myLyr):
        arcpy.Delete_management(myLyr)
    del myFCs, FC, myLyr, myQry, theFIPS
    return ()
def county_injuries():
    """Joins prepped alcohol related crash data points with county polygons to find number of injuries per capita in each
    county for year on year trend analysis and to find enforcement patterns on a regional scale"""
    arcpy.MakeFeatureLayer_management("Master_DWIpoints", "Master_lyr")

    # Dictionary of dictionaries for building county injuries data frame
    data_dict_injuries = {name: {} for name in county_list}
    for key in data_dict_injuries:
        data_dict_injuries[key] = {year: 0 for year in data_years}

    for year in data_years:
        # Calculate county injury counts for each year and save to new fc
        arcpy.SelectLayerByAttribute_management(
            in_layer_or_view="Master_lyr",
            selection_type="NEW_SELECTION",
            where_clause=
            "Crash_Year = {0} AND Crash_Severity <> 'Not Injured' AND Crash_Severity <> 'Unknown'"
            .format(year))
        arcpy.SpatialJoin_analysis(
            target_features=counties,
            join_features="Master_lyr",
            out_feature_class='countyinjuries{0}'.format(year),
            join_operation="JOIN_ONE_TO_ONE",
            join_type="KEEP_ALL",
            field_mapping=
            "CNTY_NM 'CNTY_NM' true true false 13 Text 0 0 ,First,#," +
            counties +
            ",CNTY_NM,-1,-1;CNTY_NBR 'CNTY_NBR' true true false 2 Short 0 0 ,First,#,"
            + counties +
            ",CNTY_NBR,-1,-1;FIPS 'FIPS' true true false 2 Short 0 0 ,First,#,"
            + counties +
            ",FIPS,-1,-1;pop{0} 'pop{1}' true true false 4 Long 0 0 ,First,#,".
            format(year, year) + counties +
            ",pop{0},-1,-1;Crash_Severity 'Crash_Severity{1}' true true false 8000 Text 0 0 ,Count,#,Master_DWIpoints,Crash_Severity,-1,-1"
            .format(year, year),
            match_option='CONTAINS')
        arcpy.AddField_management('countyinjuries{0}'.format(year),
                                  "InjuriesPer1000{0}".format(year),
                                  "DOUBLE",
                                  field_scale=5)
        arcpy.CalculateField_management(
            in_table='countyinjuries{0}'.format(year),
            field="InjuriesPer1000{0}".format(year),
            expression="calcinjuryrate( !Crash_Severity!, !pop{0}!)".format(
                year),
            expression_type="PYTHON_9.3",
            code_block=
            "def calcinjuryrate(injuries, pop):\n    if injuries == 0 or injuries is None:\n        return 0\n    else:\n        rate = float(injuries)/(float( pop)/1000)\n        return rate"
        )

        with arcpy.da.SearchCursor(
                'countyinjuries{0}'.format(year),
            ["CNTY_NM", "InjuriesPer1000{0}".format(year)]) as cursor2:
            for row in cursor2:
                data_dict_injuries[row[0]][year] = row[1]

        # Convert to data frame and save as csv
        county_df = pd.DataFrame.from_dict(data_dict_injuries)
        county_df.to_csv('{0}\CSVResults\CountyInjuries.csv'.format(directory))
示例#24
0
    def run(self):
        self.e.load()
        print "Starting Facet Classification processing..."
        arcpy.gp.Times_sa(self.i.sdg_f, 0, self.i.empty)
        arcpy.gp.ZonalGeometry_sa(self.i.facets, self.Zone_field,
                                  self.i.facet_a, "AREA", self.i.empty)
        arcpy.gp.Int_sa(self.i.empty, self.i.empty_i)
        arcpy.gp.ZonalStatistics_sa(self.i.empty_i, "VALUE", self.i.facet_a,
                                    self.i.facet_mn_a, "MINIMUM", "DATA")
        arcpy.gp.ZonalStatistics_sa(self.i.empty_i, "VALUE", self.i.facet_a,
                                    self.i.facet_mx_a, "MAXIMUM", "DATA")
        self.process_facet_a_r()

        arcpy.gp.ZonalStatistics_sa(self.i.facets, self.Zone_field,
                                    self.i.sdg_f, self.i.m_slp, "MEAN", "DATA")
        arcpy.gp.ZonalStatistics_sa(self.i.empty_i, "VALUE", self.i.m_slp,
                                    self.i.m_slp_mn, "MINIMUM", "DATA")
        arcpy.gp.ZonalStatistics_sa(self.i.empty_i, "VALUE", self.i.m_slp,
                                    self.i.m_slp_mx, "MAXIMUM", "DATA")
        self.process_m_slp_r()

        arcpy.gp.ZonalStatistics_sa(self.i.facets, self.Zone_field, self.e.d,
                                    self.i.m_d, "MEAN", "DATA")
        arcpy.gp.ZonalStatistics_sa(self.i.empty_i, "VALUE", self.i.m_d,
                                    self.i.m_d_mn, "MINIMUM", "DATA")
        arcpy.gp.ZonalStatistics_sa(self.i.empty_i, "VALUE", self.i.m_d,
                                    self.i.m_d_mx, "MAXIMUM", "DATA")
        self.process_m_d_r()

        print "Please wait...",
        if sleep(180) == None:
            self.e.load()
            print "Processing algorithm...",
            inputs = ";".join([
                self.i.m_slp_r, self.i.m_d_r
            ])  # only rasters zoned by facets can be entered here
            arcpy.gp.IsoClusterUnsupervisedClassification_sa(
                self.i.m_slp_r, self.Number_of_classes, self.i.fc,
                self.Minimum_class_size, self.Sample_interval, self.i.sig)
            arcpy.RasterToPolygon_conversion(self.i.fc, self.i.fc_shp,
                                             "NO_SIMPLIFY", "VALUE")
            print "Dissolving...",
            arcpy.Dissolve_management(self.i.fc_shp, self.i.fc_shp_d,
                                      "GRIDCODE", "", "MULTI_PART",
                                      "DISSOLVE_LINES")
            print "Joining features..."
            # Now it is a point feature
            # instead of a polygon one.
            # This was modified in 11/30/2019
            arcpy.SpatialJoin_analysis(target_features=self.e.s,
                                       join_features=self.i.fc_shp_d,
                                       out_feature_class=self.i.fc_shp_osj,
                                       join_operation="JOIN_ONE_TO_MANY",
                                       join_type="KEEP_ALL",
                                       match_option="INTERSECT",
                                       search_radius="#",
                                       distance_field_name="#")
            # arcpy.SpatialJoin_analysis(target_features=self.i.fc_shp_d, join_features=self.e.s, out_feature_class=self.i.fc_shp_osj, join_operation="JOIN_ONE_TO_ONE", join_type="KEEP_ALL", match_option="CLOSEST",search_radius="5000", distance_field_name="#")  # modified match_option="INTERSECT" in 06/15/2019 to "CLOSEST"
        print "Ending Facet Classification processing..."
示例#25
0
def spatialJoin(newParcels, parcelPtsFinal, newParcelsFinal):

    if arcpy.Exists(newParcelsFinal):
        arcpy.Delete_management(newParcelsFinal)

    print('Creating Spatial Join Layer', newParcelsFinal)

    arcpy.SpatialJoin_analysis(newParcels, parcelPtsFinal, newParcelsFinal)
示例#26
0
def spjoinTb(tablaPrincipal, tablaCopia):
    tb_join = arcpy.SpatialJoin_analysis(tablaPrincipal, tablaCopia,
                                         "in_memory\\tb_join2",
                                         "JOIN_ONE_TO_ONE", "KEEP_ALL", "#",
                                         "INTERSECT")
    fields = ["CODIGO", "ZONAL_1", "JEFATURA", "EE_CC"]
    fieldsURA = [x for x in arcpy.da.SearchCursor(tb_join, fields)]
    return fieldsURA
示例#27
0
    def execute(self, parameters, messages):
        """The source code of the tool."""
        in_layer = parameters[0].valueAsText
        out_layer = parameters[1].valueAsText

        geometries_list = arcpy.CopyFeatures_management(in_layer, arcpy.Geometry())
        result_geometry = [polygon.extent.polygon for polygon in geometries_list]
        return arcpy.SpatialJoin_analysis(result_geometry, in_layer, out_layer)
示例#28
0
 def joinCentroid(self):
     out_feature = "streetCent_bike"
     arcpy.SpatialJoin_analysis(self.street_centroid,
                                self.bike_centroid,
                                out_feature,
                                search_radius=50
                                )
     self.street_centroid_bike = out_feature
示例#29
0
def join_sw_structures_with_pipe_data():
    data_dir = '{}Stormwater Infrastructure/'.format(gis_proj_dir)
    sw_str = '{}Norfolk_SW_Structures.shp'.format(data_dir)
    sw_pipe = '{}Norfolk_SW_Pipes.shp'.format(data_dir)
    out_file_name = '{}sw_structures_joined_pipes.shp'.format(data_dir)
    print "SpatialJoin_analysis"
    arcpy.SpatialJoin_analysis(sw_str, sw_pipe, out_file_name, match_option='CLOSEST')
    return out_file_name
示例#30
0
def HighCrashRate_BlockGroup(Para_Workspace, Para_Blockgroups,
                             Para_CountyBoundary, Para_MajorRoad, Para_Crash,
                             Para_Distance):
    global intermediate
    local_intermediate = []
    ## Calculate the road length for each block
    arcpy.MakeFeatureLayer_management(Para_Blockgroups, "blockgroups")
    arcpy.SelectLayerByLocation_management("blockgroups",
                                           "HAVE_THEIR_CENTER_IN",
                                           Para_CountyBoundary, "",
                                           "NEW_SELECTION")
    arcpy.CopyFeatures_management("blockgroups", "M_blg")
    arcpy.Intersect_analysis(["M_blg", Para_MajorRoad], "M_mjr_seg")
    statsFields = [["Shape_Length", "SUM"]]
    arcpy.AddMessage("complete")
    arcpy.Statistics_analysis("M_mjr_seg", "M_mjr_seg_stat", statsFields,
                              "FID_M_blg")
    arcpy.AddField_management("M_mjr_seg_stat", "Miles", "FLOAT")
    arcpy.CalculateField_management("M_mjr_seg_stat", "Miles",
                                    '!SUM_Shape_Length! * 0.000621371192')
    JoinField = ["SUM_Shape_Length", "Miles"]
    arcpy.JoinField_management("M_blg", "OBJECTID", "M_mjr_seg_stat",
                               "FID_M_blg", JoinField)

    ## Calculate the crash rate for each block
    arcpy.Buffer_analysis(Para_MajorRoad, "major_roads_Buffer", Para_Distance)
    arcpy.Clip_analysis(Para_Crash, "major_roads_Buffer", "crashes_300feet")
    arcpy.SpatialJoin_analysis("M_blg", "crashes_300feet", "M_blg_crashcount",
                               "JOIN_ONE_TO_ONE")
    arcpy.AddField_management("M_blg_crashcount", "crash_rate", "FLOAT")
    arcpy.CalculateField_management("M_blg_crashcount", "crash_rate",
                                    '!Join_Count!/!Miles!')
    arcpy.AddMessage("crash rate compute complete")
    crash_rate_statField = [["crash_rate", "MEAN"], ["crash_rate", "MIN"],
                            ["crash_rate", "MAX"]]
    arcpy.Statistics_analysis("M_blg_crashcount", "M_blg_crashcount_stat",
                              crash_rate_statField)

    new_item = []
    for item in ["MEAN", "MIN", "MAX"]:
        new_item.append(item + "_crash_rate")

    Stat_list = list(arcpy.da.SearchCursor("M_blg_crashcount_stat", new_item))

    LowRate = Stat_list[0][1]
    MidRate = (Stat_list[0][0] + Stat_list[0][1]) / 2
    HighRate = (Stat_list[0][1] + Stat_list[0][2]) / 2
    SQL_highrate = '"crash_rate" > {value}'.format(value=HighRate)
    arcpy.FeatureClassToFeatureClass_conversion("M_blg_crashcount",
                                                Para_Workspace,
                                                "HighCrashRate", SQL_highrate)
    arcpy.AddMessage("Complete the Part 1: HighCrashRate_BlockGroup")
    local_intermediate = [
        "M_blg", "M_mjr_seg", "M_mjr_seg_stat", "major_roads_Buffer",
        "crashes_300feet", "M_blg_crashcount", "M_blg_crashcount_stat",
        "HighCrashRate"
    ]
    intermediate = intermediate + local_intermediate