Example #1
0
def runJoinTables(hexBins, projectGDBPath):
    #
    print(
        "\nJoining hexagon polygons with nitrate level and cancer rate tables..."
    )

    # Set environment settings
    arcpy.env.workspace = r"C:\Users\rkpalmerjr\Documents\School\WISC\Fall_2019\GEOG_777_Capstone_in_GIS_Development\Project_1\TESTING\Test_1\Scratch"

    # Set local variables for Add Join
    hexBinsLyr = arcpy.MakeFeatureLayer_management(hexBins, "hexBinsLyr")
    nitrateZSaT = os.path.join(projectGDBPath, "nitrateZSat")
    canrateZSaT = os.path.join(projectGDBPath, "canrateZSat")

    #
    arcpy.JoinField_management(hexBinsLyr, "GRID_ID", nitrateZSaT,
                               "nit_GRID_ID")
    arcpy.JoinField_management(hexBinsLyr, "GRID_ID", canrateZSaT,
                               "can_GRID_ID")

    #
    del hexBinsLyr

    #
    print(
        "\nHexagon polygons joined with nitrate level and cancer rate tables.")
Example #2
0
def create_texture_map(in_soil):
    arcpy.MakeRasterLayer_management(in_raster=in_soil, out_rasterlayer='soil_raster_layer', band_index="1")
    fields = [field.name for field in arcpy.ListFields('soil_raster_layer')]
    if 'texcl' not in [field.lower() for field in fields]:
        component = str(Path(in_soil).parents[0] / 'component')
        arcpy.JoinField_management(in_data='soil_raster_layer', in_field='MUKEY', join_table=component,
                                   join_field='mukey',
                                   fields='cokey')

        chorizon = str(Path(in_soil).parents[0] / 'chorizon')
        arcpy.MakeRasterLayer_management(in_raster=in_soil, out_rasterlayer='soil_raster_layer', band_index="1")
        soil_raster_layer = arcpy.JoinField_management(in_data='soil_raster_layer', in_field='cokey',
                                                       join_table=chorizon,
                                                       join_field='cokey', fields='chkey')

        chtexturegrp = str(Path(in_soil).parents[0] / 'chtexturegrp')
        arcpy.MakeRasterLayer_management(in_raster=in_soil, out_rasterlayer='soil_raster_layer', band_index="1")
        soil_raster_layer = arcpy.JoinField_management(in_data='soil_raster_layer', in_field='chkey',
                                                       join_table=chtexturegrp,
                                                       join_field='chkey', fields='chtgkey')

        chtexture = str(Path(in_soil).parents[0] / 'chtexture')
        arcpy.MakeRasterLayer_management(in_raster=in_soil, out_rasterlayer='soil_raster_layer', band_index="1")
        soil_raster_layer = arcpy.JoinField_management(in_data='soil_raster_layer', in_field='chtgkey',
                                                       join_table=chtexture,
                                                       join_field='chtgkey')
Example #3
0
def growthRatesJoin(urbanGrowthFc, ruralGrowthFc, countryBoundaries, urbanAreasShp, iso3, outGDB):

    try:
        # Extract polygons by country iso
        arcpy.FeatureClassToFeatureClass_conversion(countryBoundaries,
                                                    "in_memory",
                                                    "countryBoundary",
                                                    """ iso_alpha3 = '%s' """ % (iso3,))
        arcpy.FeatureClassToFeatureClass_conversion(urbanAreasShp,
                                                    "in_memory",
                                                    "urban_extract",
                                                    """ ISO3 = '%s' """ % (iso3,))
        # Union of urban and boundary polygons
        arcpy.Union_analysis(["in_memory/countryBoundary", "in_memory/urban_extract"],
                             "in_memory/countryUrbanRural")
        # Separate urban and rural polygons
        arcpy.FeatureClassToFeatureClass_conversion("in_memory/countryUrbanRural",
                                                    "in_memory",
                                                    "countryUrban",
                                                    """ ONES = 1 """)
        arcpy.FeatureClassToFeatureClass_conversion("in_memory/countryUrbanRural",
                                                    "in_memory",
                                                    "countryRural",
                                                    """ ONES = 0 """)
        # Join growth rates data
        arcpy.JoinField_management("in_memory/countryUrban", "iso_alpha2", urbanGrowthFc, "ISO2", ["Growth20102015"])
        arcpy.JoinField_management("in_memory/countryRural", "iso_alpha2", ruralGrowthFc, "ISO2", ["Growth20102015"])
        # Merge urban and rural data back together
        arcpy.Merge_management(["in_memory/countryUrban", "in_memory/countryRural"], outGDB + "/growthRates%s" % iso3)

    finally:
        # Tidy up
        arcpy.Delete_management("in_memory")
Example #4
0
def manipulateTable(pivotTable):
    with arcpy.da.UpdateCursor(pivotTable, 'refcode') as cursor:
        for row in cursor:
            if row[0] == None:
                cursor.deleteRow()

    # add field
    arcpy.AddField_management(pivotTable,
                              "total_records",
                              "LONG",
                              field_length=3,
                              field_alias="Total Records")

    # populate total_records field with sum of all records
    expression = "!dmpend! + !dmproc! + !dmready! + !dr! + !idrev!"

    arcpy.CalculateField_management(pivotTable, "total_records", expression,
                                    "PYTHON_9.3")

    join = os.path.join(env.workspace, "surveysite")
    arcpy.JoinField_management(pivotTable, "refcode", join, "refcode",
                               ["survey_site_dmstat", "dm_stat_comm"])

    join = os.path.join(env.workspace, "elementRecords")
    arcpy.JoinField_management(pivotTable, "refcode", join, "refcode", [
        "county", "created_by", "created_on", "last_up_by", "last_up_on",
        "element_type"
    ])

    arcpy.AddField_management(pivotTable, "EastWest", "TEXT", "", "", 1,
                              "East West", "", "", "")

    # fill field with E or W depending upon county
    West = [
        "ERIE", "CRAWFORD", "MERCER", "LAWRENCE", "BEAVER", "WASHINGTON",
        "GREENE", "VENANGO", "BUTLER", "ALLEGHENY", "FAYETTE", "WESTMORELAND",
        "ARMSTORNG", "INDIANA", "CLARION", "JEFFERSON", "FOREST", "WARREN",
        "MCKEAN", "ELK", "CLEARFIELD", "CAMBRIA", "SOMERSET", "BEDFORD",
        "BLAIR", "CENTRE", "CLINTON", "POTTER", "CAMERON", "HUNTINGDON",
        "FULTON", "FRANKLIN"
    ]
    with arcpy.da.UpdateCursor(pivotTable, ["county", "EastWest"]) as cursor:
        for row in cursor:
            if row[0] in West:
                row[1] = "W"
                cursor.updateRow(row)
            else:
                row[1] = "E"
                cursor.updateRow(row)

    fields = arcpy.ListFields(pivotTable)
    keepFields = [
        "OID", "county", "refcode", "created_by", "created_on", "dmpend",
        "dmproc", "dmready", "dr", "idrev", "survey_site_dmstat",
        "total_records", "dm_stat", "dm_stat_comm", "last_up_by", "last_up_on",
        "element_type", "created_by"
    ]
    dropFields = [x.name for x in fields if x.name not in keepFields]
    arcpy.DeleteField_management(pivotTable, dropFields)
Example #5
0
def main(line, seg_length):

    arcpy.AddMessage("Plotting segment endpoints...")
    arcpy.MakeFeatureLayer_management(line, "in_line_lyr")
    fields = ["SHAPE@", "LineOID"]

    # Plot endpoints for all segments
    endPnt_all = plot_end(line, fields)
    arcpy.MakeFeatureLayer_management(endPnt_all, "endPnt_all_lyr")

    # Find duplicate endpoints
    arcpy.FindIdentical_management("endPnt_all_lyr", "dup_table", ["Shape"],
                                   0.5, "#", "ONLY_DUPLICATES")
    arcpy.MakeTableView_management(r"dup_table", "dup_tblview")
    arcpy.JoinField_management("endPnt_all_lyr", "LineOID", "dup_tblview",
                               "IN_FID", "#")
    arcpy.SelectLayerByAttribute_management("endPnt_all_lyr", "NEW_SELECTION",
                                            """"IN_FID" IS NOT NULL""")
    arcpy.FeatureClassToFeatureClass_conversion("endPnt_all_lyr", "in_memory",
                                                "endPnt_dup")

    # Find segments with duplicate endpoints
    arcpy.JoinField_management("in_line_lyr", "OBJECTID", "dup_tblview",
                               "IN_FID", "#")
    arcpy.SelectLayerByAttribute_management("in_line_lyr", "NEW_SELECTION",
                                            """"IN_FID" IS NOT NULL""")
    arcpy.FeatureClassToFeatureClass_conversion("in_line_lyr", "in_memory",
                                                "line_dup")
    arcpy.SelectLayerByAttribute_management("in_line_lyr", "SWITCH_SELECTION")
    arcpy.FeatureClassToFeatureClass_conversion("in_line_lyr", "in_memory",
                                                "line_nodup")

    # Re-plot endpoints for segments with duplicate endpoints
    endPnt_dup_final = plot_end(r"in_memory\line_dup", fields)
    arcpy.FeatureClassToFeatureClass_conversion(endPnt_dup_final, "in_memory",
                                                "endPnt_dup_final")
    endPnt_nodup_final = plot_end(r"in_memory\line_nodup", fields)
    arcpy.FeatureClassToFeatureClass_conversion(endPnt_nodup_final,
                                                "in_memory",
                                                "endPnt_nodup_final")
    finalEndpnt = arcpy.Merge_management(
        ["in_memory\endPnt_nodup_final", "in_memory\endPnt_dup_final"],
        r"in_memory\finalEndPnt")

    # clean up temp files
    arcpy.Delete_management("in_line_lyr")
    arcpy.Delete_management(r"dup_table")
    arcpy.Delete_management("dup_tblview")
    arcpy.Delete_management(endPnt_all)
    arcpy.Delete_management("endPnt_all_lyr")
    arcpy.Delete_management(r"in_memory\endPnt_dup")
    arcpy.Delete_management(r"in_memory\line_dup")
    arcpy.Delete_management(r"in_memory\line_nodup")
    arcpy.Delete_management(r"in_memory\endPnt_nodup_final")
    arcpy.Delete_management(r"in_memory\endPnt_dup_final")

    return finalEndpnt
Example #6
0
def convertAltStreets(Project_Folder):
    arcpy.env.overwriteOutput = True

    Model_Inputs_gdb = os.path.join(Project_Folder, 'Model_Inputs.gdb')
    Model_Outputs_gdb = os.path.join(Project_Folder, 'Model_Outputs.gdb')

    streets_simple = os.path.join(Model_Outputs_gdb, 'Streets_Simple')
    altstreets = os.path.join(Model_Inputs_gdb, 'AltStreets')

    arcpy.env.workspace = Model_Inputs_gdb

    # Simplify AltStreets and Streets Lines
    # removes some of the nodes that make up the lines to make the files low resolution enough to be uploaded through mapmaker
    altstreets_simple = arcpy.SimplifyLine_cartography(in_features=altstreets, out_feature_class=os.path.join(Model_Outputs_gdb, "AltStreet_simple"), algorithm="POINT_REMOVE",
                                                       tolerance="5 Feet", error_resolving_option="RESOLVE_ERRORS", collapsed_point_option="KEEP_COLLAPSED_POINTS", error_checking_option="CHECK", in_barriers=[])[0]

    # add ref_zlev and dom fields for alias classification and linking to streets file
    arcpy.AddFields_management(in_table=altstreets_simple, field_description=[
                               ["REF_ZLEV", "SHORT"], ["DOM", "LONG"]])
    print('added fields to altstreets')

    arcpy.AddIndex_management(altstreets_simple, fields=[
                              "LINK_ID"], index_name="LINK_ID", unique="NON_UNIQUE", ascending="ASCENDING")
    print('added altstreet index')

    arcpy.JoinField_management(in_data=altstreets_simple, in_field="LINK_ID",
                               join_table=streets_simple, join_field="LINK_ID", fields=["NUM_STNMES"])
    print('joined altstreets to streets')

    # Filter out all of the altstreet rows that do not have multiple names
    altstreets_filter = arcpy.FeatureClassToFeatureClass_conversion(
        in_features=altstreets_simple, out_path=Model_Outputs_gdb, out_name="AltStreets_Filter", where_clause="NUM_STNMES > 1")
    print('altstreets filtered if less than 2')

    # Create Statistics Table from AltStreets_Simple
    # add in the count of all the street names added to the altstreets simple
    altstreet_stats = os.path.join(Model_Outputs_gdb, "Altstreets_Stats")
    arcpy.Statistics_analysis(in_table=altstreets_filter, out_table=altstreet_stats, statistics_fields=[
                              ["LINK_ID", "FIRST"]], case_field=["LINK_ID", "ST_NAME"])

    # Join AltStreets_Simple with AltStreets_Stats
    arcpy.JoinField_management(in_data=altstreets_simple, in_field="LINK_ID",
                               join_table=altstreet_stats, join_field="LINK_ID", fields=["NUM_STNMES"])

    arcpy.CalculateField_management(in_table=altstreets_simple, field="Dom",
                                    expression="1", expression_type="PYTHON3", code_block="", field_type="TEXT")

    # Alias streetname identifier calculation (Alias == -9)
    # MapMaker REQUIRES it to be -9 in order to find it as an alias field
    arcpy.CalculateField_management(in_table=altstreets_simple, field="REF_ZLEV",
                                    expression="-9", expression_type="PYTHON3", code_block="", field_type="TEXT")

    # updated the schema to match mapmaker schema
    updateSchema(altstreets_simple)

    # returns altstreets_final gdb location
    return arcpy.FeatureClassToFeatureClass_conversion(in_features=altstreets_simple, out_path=Model_Outputs_gdb, out_name="AltStreets_Final")[0]
Example #7
0
def manipulateTable(pivotTable):
    '''function that makes format changes to pivot table'''

    # delete records with null or blank reference code
    with arcpy.da.UpdateCursor(pivotTable, 'refcode') as cursor:
        for row in cursor:
            if row[0] == None or row[0] == "":
                cursor.deleteRow()

    # add field and populate with total number of records by adding all records
    arcpy.AddField_management(pivotTable,
                              "total_records",
                              "DOUBLE",
                              field_length=3,
                              field_alias="Total Records")
    expression = "!dmpend! + !dmproc! + !dmready! + !dr! + !idrev!"
    arcpy.CalculateField_management(pivotTable, "total_records", expression,
                                    "PYTHON_9.3")

    # join dm status and dm status comments data from survey site to pivot table
    join = os.path.join(env.workspace, "survey_site1")
    arcpy.AlterField_management(join, "dm_stat", "survey_site_dmstat",
                                "Survey Site - DM Status")
    arcpy.JoinField_management(pivotTable, "refcode", join, "refcode",
                               ["survey_site_dmstat", "dm_stat_comm"])

    # join original data from elementRecords table to pivot table
    join = os.path.join(env.workspace, "elementRecords")
    arcpy.JoinField_management(pivotTable, "refcode", join, "refcode", [
        "COUNTY_NAM", "created_by", "created_on", "last_up_by", "last_up_on",
        "element_type"
    ])

    # add new field for east or west location
    arcpy.AddField_management(pivotTable, "Location", "TEXT", "", "", 1,
                              "Location", "", "", "")
    # list of western counties
    West = [
        "ERIE", "CRAWFORD", "MERCER", "LAWRENCE", "BEAVER", "WASHINGTON",
        "GREENE", "VENANGO", "BUTLER", "ALLEGHENY", "FAYETTE", "WESTMORELAND",
        "ARMSTORNG", "INDIANA", "CLARION", "JEFFERSON", "FOREST", "WARREN",
        "MCKEAN", "ELK", "CLEARFIELD", "CAMBRIA", "SOMERSET", "BEDFORD",
        "BLAIR", "CENTRE", "CLINTON", "POTTER", "CAMERON", "HUNTINGDON",
        "FULTON", "FRANKLIN"
    ]
    # populate location field with east or west depending if they are in list
    with arcpy.da.UpdateCursor(pivotTable,
                               ["COUNTY_NAM", "Location"]) as cursor:
        for row in cursor:
            if row[0] in West:
                row[1] = "W"
                cursor.updateRow(row)
            else:
                row[1] = "E"
                cursor.updateRow(row)
Example #8
0
    def execute(self, params, messages):
        MarxanDB = params[0].valueAsText
        cost = params[1].valueAsText
        stat = params[2].valueAsText
        protected_lands = params[3].valueAsText
        threshold = params[4].valueAsText

        arcpy.env.workspace = "in_memory"

        pulayer = os.path.join(MarxanDB, "pulayer", "pulayer.shp")

        zone_stat = ZonalStatisticsAsTable(pulayer, "id", cost, "zone_stat",
                                           "", stat)
        arcpy.AlterField_management(zone_stat, stat, "cost")

        with arcpy.da.UpdateCursor(zone_stat, "cost") as cursor:
            for row in cursor:
                row[0] = round(row[0], 1)
                cursor.updateRow(row)
        arcpy.JoinField_management(pulayer, "id", zone_stat, "id", "cost")

        dissolve_pad = arcpy.Dissolve_management(protected_lands,
                                                 "dissolve_pad", "", "",
                                                 "MULTI_PART")
        tab_intersect = arcpy.TabulateIntersection_analysis(
            pulayer, "id", dissolve_pad, "tab_intersect")

        arcpy.AddField_management(tab_intersect, "status", "SHORT")
        with arcpy.da.UpdateCursor(tab_intersect,
                                   ["PERCENTAGE", "status"]) as cursor:
            for row in cursor:
                if row[0] >= int(threshold):
                    row[1] = 2
                    cursor.updateRow(row)
                elif row[0] < int(threshold):
                    row[1] = 0
                    cursor.updateRow(row)

        arcpy.JoinField_management(pulayer, "id", tab_intersect, "id",
                                   "status")

        pu_dat = os.path.join(MarxanDB, "input", "pu.dat")

        fields = ["id", "cost", "status"]

        with open(pu_dat, "a+") as f:
            f.write('\t'.join(fields) + '\n')
            with arcpy.da.SearchCursor(pulayer, fields) as cursor:
                for row in cursor:
                    f.write('\t'.join([str(r) for r in row]) + '\n')
        f.close()
        return
Example #9
0
def create_state_ssurgo(state, replace):
    state_lcc_dir = constants.lcc_dir + state + os.sep

    state_ssurgo = state_lcc_dir + state + 'ssurgo'
    lu_ssurgo = state_lcc_dir + state + '_lu_ssurgo'
    out_state_sgo = state_lcc_dir + state + '_sgo_' + constants.METRIC.lower()

    # Join with LCC csv
    if arcpy.Exists(out_state_sgo) and not (replace):
        pass
    else:
        arcpy.BuildRasterAttributeTable_management(state_ssurgo, "Overwrite")

        try:
            if (constants.METRIC == 'LCC'):
                arcpy.JoinField_management(state_ssurgo, "VALUE",
                                           constants.LCC_CR, "mukey", "")
            else:  # DI or PI
                arcpy.JoinField_management(state_ssurgo, "VALUE", DI_PI_CR,
                                           "mukey", "")
        except:
            logging.info(arcpy.GetMessages())

        # Lookup to create new raster with new VALUE field
        # Execute Lookup
        lup_column = ''
        remap_file = ''

        if (constants.METRIC == 'LCC'):
            lup_column = 'NICCDCD'
            remap_file = constants.SSURGO_REMAP_FILE
        elif (constants.METRIC == 'PI'):
            lup_column = 'PI'
            remap_file = constants.PI_REMAP_FILE
        elif (constants.METRIC == 'DI'):
            lup_column = 'DI'
            remap_file = constants.DI_REMAP_FILE

        lu_tmp = Lookup(state_ssurgo, lup_column)
        # Save the output
        lu_tmp.save(lu_ssurgo)

        # Reclass raster to group LCC values into 3 classes:
        # Productive, Moderate and Marginal
        out_reclass = ReclassByASCIIFile(lu_ssurgo, remap_file, "NODATA")
        out_reclass.save(out_state_sgo)

    logging.info('\t SSURGO state ' + state)
    return out_state_sgo
Example #10
0
def spatialjoinsum(intar,injoin,infieldname,outfieldname,jointype):
    tempData = arcpy.env.scratchGDB + os.path.sep + "outputs"
    targetFeatures = intar
    joinFeatures = injoin
  
    # Create a new fieldmappings and add the two input feature classes.
    fieldmappings = arcpy.FieldMappings()
    fieldmappings.addTable(targetFeatures)
    fieldmappings.addTable(joinFeatures)
 
    FieldIndex = fieldmappings.findFieldMapIndex(infieldname)
    fieldmap = fieldmappings.getFieldMap(FieldIndex)
 
    # Get the output field's properties as a field object
    field = fieldmap.outputField
 
    # Rename the field and pass the updated field object back into the field map
    field.name = outfieldname
    fieldmap.outputField = field
 
    # Set the merge rule to mean and then replace the old fieldmap in the mappings object
    # with the updated one
    fieldmap.mergeRule = "sum"
    fieldmappings.replaceFieldMap(FieldIndex, fieldmap)
 
    # Remove all output fields from the field mappings, except fields "Street_Class", "Street_Name", & "Distance"
    for field in fieldmappings.fields:
        if field.name not in ["FID",outfieldname]:
            fieldmappings.removeFieldMap(fieldmappings.findFieldMapIndex(field.name))
 
    #Run the Spatial Join tool, using the defaults for the join operation and join type
    arcpy.SpatialJoin_analysis(targetFeatures, joinFeatures, tempData, "#", "#", fieldmappings,jointype)
    arcpy.DeleteField_management(intar, outfieldname)
    arcpy.JoinField_management(intar, "FID", tempData, "TARGET_FID", [outfieldname])
    arcpy.Delete_management(tempData)
Example #11
0
def create_grls(grid, population):
    """Creates a table to join to the grid dataset"""
    try:
        output_features = os.path.join(env.scratchGDB, "temp_grid")
        reclass_population = os.path.join(env.scratchFolder, "rast_temp.tif")
        zonal_table = os.path.join(env.scratchGDB, 'zonalstats') #in_memory\\table"
        if arcpy.Exists(reclass_population):
            arcpy.Delete_management(reclass_population)
        if arcpy.Exists(zonal_table):
            arcpy.Delete_management(zonal_table)
        output_features = arcpy.CopyFeatures_management(grid, output_features)[0]
        arcpy.AddMessage(output_features)
        arcpy.AddMessage(reclass_population)
        arcpy.AddMessage(zonal_table)


        arcpy.gp.Reclassify_sa(population, "VALUE", "0 0;1 2;2 2;3 2;4 2;5 2;6 1;7 1;8 1;9 1;10 1", reclass_population, "DATA")
        arcpy.gp.ZonalStatisticsAsTable_sa(output_features, "OBJECTID", reclass_population,zonal_table, "DATA", "ALL")
        #zonal_oid = arcpy.Describe(zonal_table).OIDFieldName
        arcpy.JoinField_management(output_features, "OBJECTID",
                                   zonal_table, "OBJECTID_1",
                                   "Count;Area;Min;Max;Range;Variety;Majority;Minority;Median;Mean;Std;Sum")
        arcpy.Delete_management(reclass_population)
        return output_features
    except:
        line, filename, synerror = trace()
        raise FunctionError(
            {
                "function": "create_grls",
                "line": line,
                "filename": filename,
                "synerror": synerror,
                "arc" : str(arcpy.GetMessages(2))
                }
        )
Example #12
0
    def AddStates(mxd):

        #Join table to states feature class
        arcpy.JoinField_management(
            'statesSelection', 'NAME', 'nnDistance', 'StateName',
            ['NNdistance', 'LOCATION_COUNT', 'LOCATION_DENSITY'])
        print 'Done joining nnDistance to states_selection'

        #Create temp layer for adding
        arcpy.MakeFeatureLayer_management('statesSelection', 'states')

        #Add the new layer to the map
        newLayer = arcpy.mapping.Layer('states')

        #Update label expression and turn on labels
        for lblClass in newLayer.labelClasses:
            #VBcode for label exrepssion
            lblClass.expression = "[STUSPS]"

        newLayer.showLabels = True

        #Update symbology based on lyrfile
        #lyrFile = arcpy.mapping.Layer(r'H:\GEO 4393C\FinalProject\statesSym.lyr')
        #arcpy.mapping.UpdateLayer(df,newLayer,lyrFile,True)
        #sym = newLayer.symbology
        #sym.updateRenderer('GraduatedColorsRenderer')
        #sym.renderer.classificationField = 'NNdistance'
        #sym.classificationMethod = 'NaturalBreaks'
        #sym.renderer.breakCount = 5

        #Set extent of df to states
        df.zoomToSelectedFeatures()

        arcpy.mapping.AddLayer(df, newLayer)
        print "Added states to map"
def make_rand_road_pts():
    """
    makes the 'rd_far_fld.shp' file which is points on roads that are spaced at least 300 ft from
    each other and at least 200 ft from any flood points
    :return: 
    """
    road_shapefile = "nor_roads_centerlines.shp"
    arcpy.Densify_edit(road_shapefile, densification_method='DISTANCE', distance=30)
    road_pts_file = 'rd_pts_all_1.shp'
    arcpy.FeatureVerticesToPoints_management(road_shapefile, road_pts_file)
    rand_rd_pts_file = 'rand_road.shp'
    rand_rd_pts_lyr = 'rand_road_lyr'
    arcpy.CreateRandomPoints_management(gis_proj_dir, rand_rd_pts_file, road_pts_file,
                                        number_of_points_or_field=50000,
                                        minimum_allowed_distance='200 Feet')
    print "rand_rd_points_file"
    fld_pts_file = 'flooded_points.shp'
    fld_pts_buf = 'fld_pt_buf.shp'
    arcpy.Buffer_analysis(fld_pts_file, fld_pts_buf, buffer_distance_or_field="200 Feet",
                          dissolve_option='ALL')
    print "buffer"
    arcpy.MakeFeatureLayer_management(rand_rd_pts_file, rand_rd_pts_lyr)
    arcpy.SelectLayerByLocation_management(rand_rd_pts_lyr, overlap_type='WITHIN',
                                           select_features=fld_pts_buf,
                                           invert_spatial_relationship='INVERT')
    rd_pts_outside_buf = 'rd_far_fld.shp'
    arcpy.CopyFeatures_management(rand_rd_pts_lyr, rd_pts_outside_buf)
    arcpy.JoinField_management(rd_pts_outside_buf, in_field='CID', join_table=road_pts_file,
                               join_field='FID')
    print "rd_points_outside_buf"
Example #14
0
def CalcuZJBNTMJ(JCTBshp,JBNTTB,saveFolder):
    Input_Features = []
    Input_Features.append(JCTBshp)
    Input_Features.append(JBNTTB)
#     print Input_Features
    Output_Feature_Class = os.path.join(saveFolder,JCTBshp[:-4] + "_JBNTTB" + "_intersect.shp")
#     print Output_Feature_Class
    arcpy.Intersect_analysis(Input_Features, Output_Feature_Class, "ALL", "", "INPUT")
    
    # Process: Add Field
    #arcpy.AddField_management(Output_Feature_Class, "ZJBNTMJ", "DOUBLE", "", "", "", "", "NULLABLE", "NON_REQUIRED", "")
    
    # Process: Calculate Field
    arcpy.CalculateField_management(Output_Feature_Class, "ZJBNTMJ", "!SHAPE.AREA!", "PYTHON_9.3", "")
    
    # Process: Dissolve
    Dissolve_Output_Feature_Class = os.path.join(saveFolder,JCTBshp[:-4] + "_JBNTTB" + "_intersect_Dissolve.shp")
    print "dissolving..."
    arcpy.Dissolve_management(Output_Feature_Class,Dissolve_Output_Feature_Class, "BSM", "ZJBNTMJ SUM", "MULTI_PART", "DISSOLVE_LINES")
    
    print "JoinFielding..."
    arcpy.JoinField_management(JCTBshp, "BSM",Dissolve_Output_Feature_Class, "BSM", "BSM;SUM_ZJBNTM")
    
    print "CalculateFielding..."
    arcpy.CalculateField_management(JCTBshp, "ZJBNTMJ", "!SUM_ZJBNTM!","PYTHON_9.3", "")
    arcpy.CalculateField_management(JCTBshp, "ZJBNTMJ", '!SUM_ZJBNTM!/666.7',"PYTHON_9.3", "")
    
    arcpy.DeleteField_management(JCTBshp,"BSM_1")
    arcpy.DeleteField_management(JCTBshp,"SUM_ZJBNTM")
Example #15
0
def addNotation(notationType, fieldsToAdd, joinFieldName, outputTable,
                scratchTable, inputXField, inputYField, inputCoordinateFormat,
                inputSpatialReference):
    ''' '''
    try:
        arcpy.AddMessage(
            "Converting & appending {0} with fields {1} ...".format(
                notationType, fieldsToAdd))
        arcpy.ConvertCoordinateNotation_management(outputTable, scratchTable,
                                                   inputXField, inputYField,
                                                   inputCoordinateFormat,
                                                   notationType, joinFieldName,
                                                   inputSpatialReference)
        arcpy.JoinField_management(outputTable, joinFieldName, scratchTable,
                                   joinFieldName, fieldsToAdd)

        # TRICKY DDLat, DDLon names are hard-coded in ConvertCoordinateNotation so
        # We need to rename one of these to have both DD and DD_NUMERIC in same output table
        if notationType == 'DD_NUMERIC':
            arcpy.AlterField_management(outputTable, 'DDLat', 'DDLatNumeric',
                                        'DDLatNumeric')
            arcpy.AlterField_management(outputTable, 'DDLon', 'DDLonNumeric',
                                        'DDLonNumeric')

        return True
    except arcpy.ExecuteError:
        error = True
        # Get the tool error messages
        msgs = arcpy.GetMessages()
        arcpy.AddError(msgs)
        #print msgs #UPDATE
        print(msgs)
Example #16
0
def add_join(fc, table, new_field, old_field):
    ##new field to copy data over to
    arcpy.AddField_management(fc, "wdy_ht_sd", "DOUBLE")
    arcpy.AddField_management(fc, "Wdy_Size", "TEXT", "", "", "32", "",
                              "NULLABLE", "NON_REQUIRED", "")
    ##join based FID values
    arcpy.JoinField_management(fc, "FID", table, "FID")
def join_nytimes_cases_and_google_mobility_data_to_counties(csv, counties_fc):
    #make a copy of the counties fc
    #join csv to counties fc copy by geoid
    arcpy.env.overwriteOutput = True

    table_tmp = tmp_dir + "/county_covid_and_mobility_trends_table.dbf"
    #counties_fc_trends = output_dir + "/county_covid_and_mobility_trends_poly"

    #copy csv to table
    #print("Copying " + csv + " to " + table_tmp)
    #arcpy.CopyRows_management(csv, table_tmp)

    #make a copy of the counties fc class in preparation for the attribute join
    #arcpy.CopyFeatures_management(counties_fc, counties_fc_trends)

    #join to counties
    fields = [
        "fips", "cases_trnd", "pst_cases", "prv_cases", "cases_date",
        "rtl_trend", "rtl_pstmov", "rtl_prvmov", "prk_trend", "prk_pstmov",
        "prk_prvmov", "trn_trend", "trn_pstmov", "trn_prvmov", "wkp_trend",
        "wkp_pstmov", "wkp_prvmov", "res_trend", "res_pstmov", "res_prvmov",
        "mob_date"
    ]
    #pdb.set_trace()
    print("joining " + table_tmp + " to " + counties_fc)
    arcpy.JoinField_management(in_data=counties_fc + ".shp",
                               in_field="GEOID_dbl",
                               join_table=table_tmp,
                               join_field="fips",
                               fields=fields)
Example #18
0
def create_ancillary_features(grid, pop, output_features):

    gdb = os.path.dirname(output_features)
    print(gdb)

    if arcpy.Exists(gdb):
        print("Geodatabase already exists")
    else:
        print("Creating: " + str(gdb))
        arcpy.CreateFileGDB_management(os.path.dirname(gdb), os.path.basename(gdb))

    zonal_table = os.path.join(gdb, 'zonalstats')  # in_memory\\table"

    output_features = arcpy.CopyFeatures_management(grid, output_features)[0]
    print("Done Copy Features")

    arcpy.gp.ZonalStatisticsAsTable_sa(output_features, "OBJECTID", pop, zonal_table, "DATA", "ALL")
    print("Done Zonal Stats")
    # zonal_oid = arcpy.Describe(zonal_table).OIDFieldName
    arcpy.JoinField_management(output_features, "OBJECTID",
                               zonal_table, "OBJECTID_1",
                               "Count;Area;Min;Max;Range;Variety;Majority;Minority;Median;Mean;Std;Sum")

    score_field = "GRLS_SCORE"
    arcpy.AddField_management(output_features, score_field, "TEXT", field_length=4)

    print("Getting Value Thresholds")
    g_thresh, r_thresh, l_thresh = get_thresholds(output_features, 'MEAN')

    print("Adding Score to Raster")
    add_score(output_features, "MEAN", score_field, g_thresh, r_thresh, l_thresh)

    print("Done.")

    return output_features
def joinStoresAndBRMDL(workspace,storesFeatureClass, joinField1, joinTable, joinField2):
    try:
        # Disable qualified field names which is the default for add join tool
        env.qualifiedFieldNames = False

        # Join two feature classes by the zonecode field and only carry
        # over the land use and land cover fields
        arcpy.JoinField_management (storesFeatureClass, joinField1, joinTable, joinField2)

    except:
            ## Return any Python specific errors and any error returned by the geoprocessor
            ##
            tb = sys.exc_info()[2]
            tbinfo = traceback.format_tb(tb)[0]
            pymsg = "PYTHON ERRORS:\n joinStoresAndBRMDL Function : Traceback Info:\n" + tbinfo + "\nError Info:\n    " + \
                    str(sys.exc_type)+ ": " + str(sys.exc_value) + "\n" +\
                    "Line {0}".format(tb.tb_lineno)
            msgs = "Geoprocesssing  Errors :\n" + arcpy.GetMessages(2) + "\n"

            ##Add custom informative message to the Python script tool
            arcpy.AddError(pymsg) #Add error message to the Python script tool(Progress dialog box, Results windows and Python Window).
            arcpy.AddError(msgs)  #Add error message to the Python script tool(Progress dialog box, Results windows and Python Window).

            ##For debugging purposes only
            ##To be commented on python script scheduling in Windows
            print pymsg
            print "\n" +msgs

    return ""
Example #20
0
def downsample(city_id):
    log('Downsampling points for %s', city_id)

    output_dir = join(DOWNSAMPLE_DIR, str(city_id))
    if not exists(output_dir):
        os.makedirs(output_dir)
        log('Created %s', output_dir)
    else:
        log('%s already exists!', output_dir)

    samples_shp = join(LATLNGS_SHP_DIR, '%s.shp' % city_id)

    downsampling_fishnet_poly_shp = join(output_dir,
                                         'downsampling_fishnet.shp')
    downsampling_fishnet_label_shp = join(output_dir,
                                          'downsampling_fishnet_label.shp')

    if not exists(downsampling_fishnet_poly_shp):
        log('Creating fishnet...')
        desc = arcpy.Describe(samples_shp)
        arcpy.CreateFishnet_management(
            downsampling_fishnet_poly_shp, str(desc.extent.lowerLeft),
            str(desc.extent.XMin) + ' ' + str(desc.extent.YMax + 10), '0.0012',
            '0.0012', '0', '0', str(desc.extent.upperRight), 'LABELS', '#',
            'POLYGON')
        log('Fishnet creation complete')

    samples_identity_shp = join(output_dir, 'samples_identity.shp')
    if not exists(samples_identity_shp):
        log('Computing identity...')
        arcpy.Identity_analysis(samples_shp, downsampling_fishnet_poly_shp,
                                samples_identity_shp)
        log('Identity complete')

    samples_stats = join(output_dir, 'samples_stats')
    if not exists(join(output_dir, 'info')):
        log('Starting summary statistics...')
        arcpy.Statistics_analysis(samples_identity_shp, samples_stats,
                                  [['price', 'MEAN']], 'FID_downsa')
        log('Summary statistics complete')

    log('Detecting if join has already been done...')
    join_done = False
    fields = arcpy.ListFields(downsampling_fishnet_label_shp)
    for field in fields:
        if field.name == 'MEAN_PRICE': join_done = True

    if not join_done:
        log('Performing table join on FID:FID_DOWNSA...')
        arcpy.JoinField_management(downsampling_fishnet_label_shp, 'FID',
                                   samples_stats, 'FID_DOWNSA', ['MEAN_PRICE'])
        log('Table join on FID:FID_DOWNSA done.')

    log('Defining projection...')
    arcpy.DefineProjection_management(downsampling_fishnet_label_shp,
                                      PROJECTION_FILE)

    log('FINISHED downsampling %s', city_id)
    return downsampling_fishnet_label_shp
    log('======================END==========================')
 def joinField(self,TablaEntrada,LlaveEntrada,TablaCampos,Llavedestino,listaCampos): # realiza un joinfield entre dos capas
     inFeatures = TablaEntrada
     in_field=LlaveEntrada
     joinField = Llavedestino
     joinTable = TablaCampos
     fieldList = listaCampos
     arcpy.JoinField_management (inFeatures, in_field, joinTable, joinField, fieldList)
Example #22
0
def joinParcelsToBuildings(pathBuildingFootprints, parcels, pathGDB):
	tempFeaturesList = []
	# Process: Copy Footprint File
	print('Copying input building footprints.')
	buildingFootprints = os.path.join(pathGDB, os.path.splitext(pathBuildingFootprints)[0] + '_Copy')
	print('Building footprint copy path: ', buildingFootprints)
	arcpy.CopyFeatures_management(pathBuildingFootprints, buildingFootprints)
	tempFeaturesList.append(buildingFootprints)

	# Process: Standard Spatial Join
#	print('Spatial joining buildings to parcels.')
#	buildingFootprintsAPN = os.path.join(pathGDB, os.path.splitext(pathBuildingFootprints)[0] + '_ParcelJoin')
#	arcpy.SpatialJoin_analysis(buildingFootprints, parcels, buildingFootprintsAPN, "JOIN_ONE_TO_ONE", "KEEP_COMMON", "", "WITHIN", "", "")

	# Process: Spatial Join (Largest Overlap Toolbox)
	print('Spatial joining buildings to parcels.')
	buildingFootprintsAPN = os.path.join(pathGDB, os.path.splitext(pathBuildingFootprints)[0] + '_ParcelJoin')
	arcpy.SpatialJoinLargestOverlap(buildingFootprints, parcels, buildingFootprintsAPN, "false", "LARGEST_OVERLAP")

	# Process: Summary Statistics
	print('Performing summary statistics of buildings in parcels.')
	buildingSummaryStatistics = os.path.join(pathGDB, os.path.splitext(buildingFootprintsAPN)[0] + '_summstats')
	arcpy.Statistics_analysis(buildingFootprintsAPN, buildingSummaryStatistics, "Area_ft SUM", "APN")
	tempFeaturesList.append(buildingSummaryStatistics)

	# Process: Join Field for Summary Statistics
	print('Joining summary statistics to building footprints.')
	arcpy.JoinField_management(buildingFootprintsAPN, "APN", buildingSummaryStatistics, "APN", "FREQUENCY;SUM_AREA")

	# Delete temporary files
	for tempFeature in tempFeaturesList:
		arcpy.Delete_management(tempFeature)

	return buildingFootprintsAPN
Example #23
0
def biomass30m_calcs(merged_dir, filename):
    arcpy.env.workspace = merged_dir
    area30 = arcpy.ListTables("*" + filename + "_biomassweight")[0]
    biomass30 = arcpy.ListTables("*" + filename + "_biomass30m")[0]
    arcpy.AddField_management(biomass30, "MgBiomass", "DOUBLE")
    arcpy.CalculateField_management(biomass30, "MgBiomass", "!SUM!",
                                    "PYTHON_9.3", "")
    arcpy.DeleteField_management(biomass30, "SUM")

    arcpy.AddField_management(biomass30, "uID", "TEXT")
    arcpy.CalculateField_management(biomass30, "uID",
                                    """!ID!+"_"+str( !Value!)""", "PYTHON_9.3",
                                    "")

    arcpy.AddField_management(area30, "uID", "TEXT")
    arcpy.CalculateField_management(area30, "uID",
                                    """!ID!+"_"+str( !Value!)""", "PYTHON_9.3",
                                    "")

    arcpy.JoinField_management(area30, "uID", biomass30, "uID", ["MgBiomass"])
    arcpy.AddField_management(area30, "MgBiomassPerHa", "DOUBLE")
    arcpy.AddMessage("calculating MgBiomassPerHa")
    arcpy.CalculateField_management(area30, "MgBiomassPerHa",
                                    "!MgBiomass!/(!SUM!/10000)", "PYTHON_9.3",
                                    "")

    fields_to_delete = ("Value", "COUNT", "AREA")
    for f in fields_to_delete:
        arcpy.DeleteField_management(area30, f)
    arcpy.Delete_management(biomass30)
def fstats(radius, thresh):

    num = str(round(thresh, 1))

    # Integrity raster for each radii
    rastFocal = focalFolder + "\\FocI" + str(radius)

    # Conditioning out all cell below the threshold
    outCon = Con(Raster(rastFocal) > thresh, 1, 0)

    # Calculating zonal stats and joining back to spatial data
    outTable = projectGDB + "\\" + "integrity_zonalstatstable"

    outZSaT = ZonalStatisticsAsTable(polyBounds, Name_PolyBin, outCon,
                                     outTable, "DATA", "MEAN", "CURRENT_SLICE")
    tabJoin = arcpy.JoinField_management(polyBounds, Name_PolyBin, outTable,
                                         Name_PolyBin, ["MEAN"])

    # Calculating threshold means
    IRT_Field = "Grit_" + str(radius) + "_" + num[0]
    arcpy.arcpy.AddField_management(polyBounds, IRT_Field, "DOUBLE")
    arcpy.arcpy.CalculateFields_management(polyBounds, "PYTHON3",
                                           [[IRT_Field, '!MEAN!']])
    arcpy.DeleteField_management(polyBounds, ["MEAN"])

    return
def fstats_mask(radius, thresh):

    num = str(round(thresh, 1))

    # Integrity raster for each radii
    rastFocal = focalFolder + "\\FocI" + str(radius)

    # keeping on the values in the integrity map above the thresh
    outCon = Con(Raster(rastFocal) > thresh, 1, 0)

    # masking out the lands with base integrity of 1
    outMult = Times(outCon, maskHum)

    # Calculating zonal stats and joining back to spatial data
    outTable = projectGDB + "\\" + "integrity_zonalstatstable"
    outZSaT = ZonalStatisticsAsTable(polyBounds, Name_PolyBin, outMult,
                                     outTable, "DATA", "MEAN", "CURRENT_SLICE")
    tabJoin = arcpy.JoinField_management(polyBounds, Name_PolyBin, outTable,
                                         Name_PolyBin, ["MEAN"])

    # Calculating threshold means
    IRT_Field = "GritM_" + str(radius) + "_" + num[0]
    arcpy.arcpy.AddField_management(polyBounds, IRT_Field, "DOUBLE")
    arcpy.arcpy.CalculateFields_management(polyBounds, "PYTHON3",
                                           [[IRT_Field, '!MEAN!']])
    arcpy.DeleteField_management(polyBounds, ["MEAN"])

    return
 def sect_and_dissolve(self, sub_features):
     feat_list = [self.infile, sub_features]
     temp_list = ["temp", "temp_int"]
     try:
         for item in temp_list:
             if arcpy.Exists(item):
                 arcpy.Delete_management(item)
         arcpy.Intersect_analysis(feat_list, "temp")
         arcpy.Dissolve_management("temp", "temp_int", "unique", "Shape_Area SUM", "MULTI_PART", "DISSOLVE_LINES")
         arcpy.MakeFeatureLayer_management("temp_int", "temp_lyr")
         arcpy.JoinField_management(self.infile, "unique", "temp_lyr", "unique", "SUM_Shape_Area;unique")
         srows = arcpy.UpdateCursor(self.infile)
         for srow in srows:
             try:
                 sum_area = srow.getValue("SUM_Shape_Area")
                 area = srow.getValue("Shape_Area")
                 result = sum_area/area
                 srow.setValue(self.fields, result)
                 srows.updateRow(srow)
             except(TypeError, NameError, ValueError, IOError):
                 pass
         del srow
         del srows
         fielddrop = ["unique","unique_1", "SUM_Shape_Area"]
         arcpy.DeleteField_management(self.infile, fielddrop)
     except(SyntaxError, NameError, ValueError, IOError):
         pass
Example #27
0
def joinDeleteFieldTable(inputLayer, joinField, joinTable, transferField):
    arcpy.JoinField_management(in_data=inputLayer,
                               in_field=joinField,
                               join_table=joinTable,
                               join_field=joinField,
                               fields=transferField)
    arcpy.DeleteField_management(inputLayer, joinField)
Example #28
0
def one_in_one_out(tool_table, calculated_fields, zone_fc, zone_field,
                   output_table):
    """ Occasionally, ArcGIS tools we use do not produce an output record for
    every input feature. This function is used in the toolbox whenever we need
    to correct this problem, and should be called at the end of the script to
    create the final output. This function also takes care of cleaning up
    the output table so that it only has the ID field and the newly calculated
    fields.
    tool_table: the intermediate table with missing features
    calculated_fields: the fields newly calculated by the tool that you wish to
    appear in the output
    zone_fc: the feature class with the zones
    zone_field: the field uniquely identifying each feature that was used in
    the creation of tool_table. Because this function is called within our
    scripts, the zone_field should always be the same in tool_table and
    extent_fc
    output_table: the final output table
    tool_fields: the calculated fields you want to retain, besides zone_field
    """

    # This function is mostly a hack on an outer right join. Want to join to
    # zone_fc but select ONLY the ID field, and keep all records in zone_fc
    # If you find a better way, update this.
    arcpy.CopyRows_management(zone_fc, output_table)
    arcpy.JoinField_management(output_table, zone_field, tool_table,
                               zone_field)
    calculated_fields.append(zone_field)
    field_names = [f.name for f in arcpy.ListFields(output_table)]
    for f in field_names:
        if f not in calculated_fields:
            try:
                arcpy.DeleteField_management(output_table, f)
            except:
                continue
def split_strahler(stream_area_fc, streams, out_area_fc):
    """This function splits up the NHDArea feature class, which does not
    start and stop polygons at confluences, by creating break points near the
    confluences to split up the polygons. Then, it adds the Strahler value from
    the stream centerline."""
    # 1) Generate euclidean allocation raster from streams (use OBJECTID)
    # 2) Convert euclidean allocation raster to polygons
    # 3) Join allocation polygons "gridcode" to streams "OBJECTID" so that
    #    Strahler value is attached to allocation polygon
    # 4) Use identity function to split up the StreamRiver polygons at the
    #    allocation polygon boundaries, and add the Strahler values
    old_workspace = env.workspace
    env.workspace = 'in_memory'
    cu.multi_msg(
        "Splitting stream area polygons between confluences and joining 1) Strahler order to them..."
    )
    cu.multi_msg('next messages for testing')
    arcpy.CheckOutExtension('Spatial')
    cu.multi_msg('euc')
    euc = EucAllocation(streams, cell_size='50', source_field='OBJECTID')
    arcpy.CheckInExtension('Spatial')
    cu.multi_msg('conversion')
    arcpy.RasterToPolygon_conversion(euc, 'allocation_polys')
    stream_id_field = arcpy.ListFields(streams, 'Permanent_')[0].name
    cu.multi_msg('join')
    arcpy.JoinField_management('allocation_polys', 'grid_code', streams,
                               'OBJECTID',
                               ['Strahler', 'LengthKm', stream_id_field])
    cu.multi_msg('identity')
    arcpy.Identity_analysis(stream_area_fc, 'allocation_polys', out_area_fc)
    env.workspace = old_workspace
    cu.multi_msg("Splitting strema area polygons finished.")
Example #30
0
def main():
    startTime = dt.datetime.now()
    print("Script run at {0}.".format(startTime))
    
    p = Path(__file__).parents[0]
    
    # `Scenario_1024` should be changed to the appropriate scenario number 
    # output by Emme.
    links_shapefile = os.path.join(str(p), 'New_Project', 'Media', 
                                           'Scenario_1024', 'emme_links.shp') 
    in_field = 'ID'
    join_table = os.path.join(str(p), 'volumes.csv')
    join_field = 'UNIQUEID'


    arcpy.TableToTable_conversion(join_table, str(p), 'volumes_converted.dbf' )
    converted_table = os.path.join(str(p), 'volumes_converted.dbf')

    
    joined_file = arcpy.JoinField_management(
                                            links_shapefile,
                                            in_field,
                                            converted_table,
                                            join_field)

    arcpy.FeatureClassToShapefile_conversion(joined_file, os.path.join(str(p)))
    
    endTime = dt.datetime.now()
    print("Script finished in {0}.".format(endTime - startTime))