Beispiel #1
0
def DropSyncFields():
    from arcpy import management
    #when we truncate and append, I don't care about these fields for now
    #eventually the change detection capture scripts will evolve, for now, I just want to get this data into SQL server for validation
        
    #print (dict["Railroad"])  
    #print (dict.items())
    for key, value in dict.items():
        #print(key)
        for featureclass in value:
            
            try:
                print(railroadurlroot+str(key)+remainder+str(featureclass))
                #management.DeleteField(railroadurlroot+str(key)+remainder+str(featureclass), "sync_guid;CreationDate;Creator;EditDate;Editor")
                management.DeleteField(railroadurlroot+str(key)+remainder+str(featureclass), "editor;editdate;creator;creationdate")
                management.DisableEditorTracking(railroadurlroot+str(key)+remainder+str(featureclass), "DISABLE_CREATOR", "DISABLE_CREATION_DATE", "DISABLE_LAST_EDITOR", "DISABLE_LAST_EDIT_DATE")
                management.DeleteField(railroadurlroot+str(key)+remainder+str(featureclass), "loaded_date;sync_user;sync_over_user;sync_over_date;sync_GUID")
                #management.DeleteField("MastCrossbuckAssembly_SQL", "last_edited_date;last_edited_user;created_date;created_user")
                management.DeleteField(railroadurlroot+str(key)+remainder+str(featureclass), "sync_guid;created_user;created_date;last_edited_user;last_edited_date")
                
                print("deleted fields sync_guid;CreationDate;Creator;EditDate;Editor in ", railroadurlroot+str(key)+remainder+str(featureclass))
            except Exception:
                print("Error...")
                e = sys.exc_info()[1]
                print(e.args[0])
                continue
def createBoundingBoxPolygon(mxd_path, bkmk_name, out_fc):
	"""Create a polygon that from the coordinates of a bookmark bounding box"""

	geom_type = 'POLYGON'
	oregon_spn = arcpy.SpatialReference(2913)
	management.CreateFeatureclass(os.path.dirname(out_fc),
		os.path.basename(out_fc), geom_type, spatial_reference=oregon_spn)

	name_field, f_type = 'name', 'TEXT'
	management.AddField(out_fc, name_field, f_type)

	# drop defualt field
	drop_field = 'Id'
	management.DeleteField(out_fc, drop_field)

	i_fields = ['Shape@', name_field]
	i_cursor = da.InsertCursor(out_fc, i_fields)

	mxd = mapping.MapDocument(mxd_path)
	for bkmk in arcpy.mapping.ListBookmarks(mxd, bkmk_name):
		extent = bkmk.extent
		pt_array = arcpy.Array()

		pt_array.add(arcpy.Point(extent.XMin, extent.YMin))
		pt_array.add(arcpy.Point(extent.XMin, extent.YMax))
		pt_array.add(arcpy.Point(extent.XMax, extent.YMax))
		pt_array.add(arcpy.Point(extent.XMax, extent.YMin))
		# add first point again to close polygon
		pt_array.add(arcpy.Point(extent.XMin, extent.YMin))

		i_cursor.insertRow((arcpy.Polygon(pt_array), bkmk.name))

	del i_cursor
Beispiel #3
0
def find_states(fc, state_fc):
    """Populate *_states field. States fc must have field 'states' with length 255 and state abbreviations within."""
    states_field = '{}_states'.format(os.path.basename(fc))
    if arcpy.ListFields(fc, states_field):
        DM.DeleteField(fc, states_field)

    # reverse buffer the states slightly to avoid "D", "I", "J"  situations in "INTERSECT" illustration
    # from graphic examples of ArcGIS join types "Select polygon using polygon" section in Help


    # make a field mapping that gathers all the intersecting states into one new value
    field_list = [f.name for f in arcpy.ListFields(fc) if f.type <> 'OID' and f.type <> 'Geometry']
    field_mapping = arcpy.FieldMappings()
    for f in field_list:
        map = arcpy.FieldMap()
        map.addInputField(fc, f)
        field_mapping.addFieldMap(map)
    map_states = arcpy.FieldMap()
    map_states.addInputField(state_fc, 'states')
    map_states.mergeRule = 'Join'
    map_states.joinDelimiter = ' '
    field_mapping.addFieldMap(map_states)

    # perform join and use output to replace original fc
    spjoin = AN.SpatialJoin(fc, state_fc, 'in_memory/spjoin_intersect', 'JOIN_ONE_TO_ONE',
                            field_mapping=field_mapping, match_option='INTERSECT')
    DM.AlterField(spjoin, 'states', new_field_name=states_field, clear_field_alias=True)
    DM.Delete(fc)
    DM.CopyFeatures(spjoin, fc)
    DM.Delete(spjoin)
Beispiel #4
0
def process_ws(ws_fc, zone_name):

    # generate new zone ids
    DM.AddField(ws_fc, 'zoneid', 'TEXT', field_length=10)
    DM.CalculateField(ws_fc, 'zoneid', '!lagoslakeid!', 'PYTHON')
    ws_fc_lyr = DM.MakeFeatureLayer(ws_fc)

    # multipart
    DM.AddField(ws_fc, 'ismultipart', 'TEXT', field_length=2)
    with arcpy.da.UpdateCursor(ws_fc, ['ismultipart', 'SHAPE@']) as u_cursor:
        for row in u_cursor:
            if row[1].isMultipart:
                row[0] = 'Y'
            else:
                row[0] = 'N'
            u_cursor.updateRow(row)


    print("Edge flags...")
    # add flag fields
    DM.AddField(ws_fc, 'onlandborder', 'TEXT', field_length = 2)
    DM.AddField(ws_fc, 'oncoast', 'TEXT', field_length = 2)

    # identify border zones
    border_lyr = DM.MakeFeatureLayer(LAND_BORDER, 'border_lyr')
    DM.SelectLayerByLocation(ws_fc_lyr, 'INTERSECT', border_lyr)
    DM.CalculateField(ws_fc_lyr, 'onlandborder', "'Y'", 'PYTHON')
    DM.SelectLayerByAttribute(ws_fc_lyr, 'SWITCH_SELECTION')
    DM.CalculateField(ws_fc_lyr, 'onlandborder' ,"'N'", 'PYTHON')

    # identify coastal zones
    coastal_lyr = DM.MakeFeatureLayer(COASTLINE, 'coastal_lyr')
    DM.SelectLayerByLocation(ws_fc_lyr, 'INTERSECT', coastal_lyr)
    DM.CalculateField(ws_fc_lyr, 'oncoast', "'Y'", 'PYTHON')
    DM.SelectLayerByAttribute(ws_fc_lyr, 'SWITCH_SELECTION')
    DM.CalculateField(ws_fc_lyr, 'oncoast' ,"'N'", 'PYTHON')

    print("State assignment...")
    # States
    state_geo = r'D:\Continental_Limnology\Data_Working\LAGOS_US_GIS_Data_v0.6.gdb\Spatial_Classifications\state'
    find_states(ws_fc, STATES_GEO)
    # glaciation status?
    calc_glaciation(ws_fc, 'zoneid')

    # preface the names with the zones
    DM.DeleteField(ws_fc, 'ORIG_FID')
    fields = [f.name for f in arcpy.ListFields(ws_fc, '*') if f.type not in ('OID', 'Geometry') and not f.name.startswith('Shape_')]
    for f in fields:
        new_fname = '{zn}_{orig}'.format(zn=zone_name, orig = f).lower()
        try:
            DM.AlterField(ws_fc, f, new_fname, clear_field_alias = 'TRUE')
        # sick of debugging the required field message-I don't want to change required fields anyway
        except:
            pass

    # cleanup
    lyr_objects = [lyr_object for var_name, lyr_object in locals().items() if var_name.endswith('lyr')]
    for l in lyr_objects:
        DM.Delete(l)
Beispiel #5
0
def RepublishService():
    ''''when a service needs to be updated:

    communicate update to field community, have them all sync and remain on-hold
    update the data in the local repository, for collector apps the SQL server geodatabase
    once it's updated,  delete the service from KanPlan, it should be set to prevent accidental deletion (by over publishing)
    uncheck "prevent accidental deletion", save, delete, save, delete again, check it's deleted thoroughly 
    disable edit tracking in the local repository
    delete the edit tracking fields from the local repository, we will add them back post-publish
    
    once re-published, users with disconnected edit workflows in collector app need to go to manage, remove the features only, then re-initiate a synchronnization
    this is because the GUIDS for replication/distributed geodatabase will be reset upon the new publish operation 
    perform additions again now to add back the static hosted GUIDs and synch tracking
    '''
    from arcpy import management
    management.DisableEditorTracking(r"\\gisdata\planning\cart\projects\KanPLan\MXD\HostedServices\Collect17\[email protected]\KLEKT17.SDE.Railroad\KLEKT17.SDE.MastCrossbuckAssembly", "DISABLE_CREATOR", "DISABLE_CREATION_DATE", "DISABLE_LAST_EDITOR", "DISABLE_LAST_EDIT_DATE")
    management.DeleteField("MastCrossbuckAssembly_SQL", "loaded_date;sync_user;sync_over_user;sync_over_date;sync_GUID")
    management.DeleteField("MastCrossbuckAssembly_SQL", "last_edited_date;last_edited_user;created_date;created_user")
def drought_analysis(date_string):
    ARCPY.env.overwriteOutput = True
    working_dir = r"C:\Data\git\devsummit-14-python"
    zip_name = "USDM_" + date_string + "_M.zip"
    url = "http://droughtmonitor.unl.edu/data/shapefiles_m/" + zip_name
    mxd_path = OS.path.join(working_dir, "MapTemplate.mxd")
    lyr_template = OS.path.join(working_dir, "CurrentDroughtConditions.lyr")
    zip_name = OS.path.basename(url)

    drought_zip_file = URLLIB.URLopener()
    dzf = drought_zip_file.retrieve(url, OS.path.join(r"C:\Temp", zip_name))
    zf = ZIPFILE.ZipFile(dzf[0], "r")
    shp_name = [n for n in zf.namelist() if n.endswith('.shp')][0]
    zf.extractall(working_dir)

    drought = OS.path.splitext(shp_name)[0]
    DM.MakeFeatureLayer(OS.path.join(working_dir, shp_name), drought)

    #### Add Winery Data ####
    beerWinePath = OS.path.join(working_dir, "BeerWine", "BeerWine.gdb",
                                "BeerWine")
    intermediate_output = OS.path.join(working_dir, "BeerWine", "BeerWine.gdb",
                                       "BeerWineDrought")
    wine = "BeerWine"
    wine_drought = "Wine_Drought"
    DM.MakeFeatureLayer(beerWinePath, wine)
    DM.SelectLayerByAttribute(wine, "NEW_SELECTION", "Type = 'Winery'")
    ANALYSIS.SpatialJoin(drought, wine, intermediate_output, "JOIN_ONE_TO_ONE",
                         "KEEP_ALL")
    try:
        DM.DeleteField(intermediate_output, "NAME")
    except:
        pass
    final_wine_drought = "Wine_Drought_Summary"
    DM.MakeFeatureLayer(intermediate_output, final_wine_drought)

    lf = DM.SaveToLayerFile(
        final_wine_drought,
        OS.path.join(working_dir, '{}.lyr'.format(final_wine_drought)))
    DM.ApplySymbologyFromLayer(lf, lyr_template)

    pw = "PASSWORDHERE"  #GETPASS.getpass("Enter AGOL password:"******"Drought_Wine_Service"

    agol = AGOLHandler("USERNAMEHERE", pw, service_name)

    publish_service(agol, service_name, mxd_path, lf[0])
    TIME.sleep(5)
    fs_url = agol.findItemURL('Feature Service')
    TIME.sleep(35)
    gp_url, jsondata = enrich(agol, fs_url + '/0',
                              '{}_Enriched'.format(service_name), agol.token)
    check_job_status(gp_url, jsondata, agol.token)

    DM.Delete(OS.path.join(working_dir, shp_name))
    DM.Delete(OS.path.join(working_dir, lf[0]))
def deduplicate(merged_file, rule_dictionary, unique_id='lagoslakeid'):
    order_fields = []
    sort_fields = []
    for i in range(1, len(rule_dictionary) + 1):  # priority order
        rule = rule_dictionary[i]
        if rule['rule'] == 'min':
            order_fields.append('{} asc'.format(rule['field']))
        elif rule['rule'] == 'max':
            order_fields.append('{} desc'.format(rule['field']))
        else:
            sort_field = '{}_SORT'.format(rule['field'])
            sort_fields.append(sort_field)
            if not arcpy.ListFields(merged_file, sort_field):
                DM.AddField(merged_file, sort_field, 'SHORT')
            # Calculate new sort field with numeric order based on custom sort order
            with arcpy.da.UpdateCursor(merged_file,
                                       [rule['field'], sort_field]) as cursor:
                for row in cursor:
                    row[1] = rule['sort'].index(row[0])
                    cursor.updateRow(row)

            order_fields.append('{} asc'.format(sort_field))
    order_by_clause = 'ORDER BY {}'.format(', '.join(order_fields))
    print(order_by_clause)

    print("Finding duplicate ids...")
    freq = arcpy.Frequency_analysis(merged_file, 'in_memory/freq', unique_id)
    dupe_ids = [
        row[0] for row in arcpy.da.SearchCursor(freq, unique_id,
                                                '''"FREQUENCY" > 1''')
    ]

    for id in dupe_ids:
        if arcpy.ListFields(merged_file,
                            '{}*'.format(unique_id))[0].type == 'String':
            filter = '''{} = '{}' '''.format(unique_id, id)
        else:
            filter = '''{} = {} '''.format(unique_id, id)
        with arcpy.da.UpdateCursor(
                merged_file, '*', filter,
                sql_clause=(None, order_by_clause)) as dupes_cursor:
            counter = 0
            # Deletes all but the first sorted row.
            for dupe_row in dupes_cursor:
                print(dupe_row)
                time.sleep(.1)
                if counter != 0:
                    print("DUPLICATE")
                    dupes_cursor.deleteRow()
                counter += 1
        print(' ')

    arcpy.Delete_management('in_memory/freq')
    for f in sort_fields:
        DM.DeleteField(merged_file, f)
def createExtentFeatureClass():
    """Create a feature class to the hold the map extent geometries"""

    geom_type = 'POLYGON'
    oregon_spn = arcpy.SpatialReference(2913)

    management.CreateFeatureclass(path.dirname(pylon_extents),
                                  path.basename(pylon_extents),
                                  geom_type,
                                  spatial_reference=oregon_spn)

    f_type = 'TEXT'
    management.AddField(pylon_extents, name_field, f_type)

    drop_field = 'Id'
    management.DeleteField(pylon_extents, drop_field)
Beispiel #9
0
def validateGeometry():
    """Check for geometry errors and multipart features that may have been introduced
	in the manual editing process of creating the offsets"""

    # Check for geometry errors, this tool doesn't flag a lot of the aspects I'm
    # interested in, thus the other steps below
    error_table = os.path.join(temp_dir, 'carto_errors.dbf')
    management.CheckGeometry(offset_routes, error_table)

    # Identify any multipart features
    multipart_dump = os.path.join(temp_dir, 'multipart_dump.shp')
    management.MultipartToSinglepart(offset_routes, multipart_dump)

    multipart_dict = {}
    dump_fields = ['OID@', 'ORIG_FID']
    with da.SearchCursor(multipart_dump, dump_fields) as s_cursor:
        for oid, orig_id in s_cursor:
            if orig_id not in multipart_dict:
                multipart_dict[orig_id] = 1
            else:
                multipart_dict[orig_id] += 1

    print "Features with the following fid's are multipart:"
    for orig_id, count in multipart_dict.iteritems():
        if count > 1:
            print orig_id

    # Find other errors like shared geometries and deadends using the merge divided
    # roads tool, I'm not actually interested in the output of this tool but rather the
    # validation output that it generates
    merge_field, field_type = 'merge_id', 'LONG'
    management.AddField(offset_routes, merge_field, field_type)

    # validation output will be logged here (not that user name portion may be variable):
    # C:\Users\humphrig\AppData\Local\ESRI\Geoprocessing
    merge_distance = 100  # feet
    validation_merge = os.path.join('in_memory', 'validation_merge')
    cartography.MergeDividedRoads(offset_routes, merge_field, merge_distance,
                                  validation_merge)

    # drop the merge field as it no longer needed
    management.DeleteField(offset_routes, merge_field)
Beispiel #10
0
            if row.getValue(flds[i]) == 1:
                sppCd = flds[i][11:]
                sppString = sppString + ", " + sppCd
                #find the full name
                Sci = sciNames[sppCd.lower()]
                sciString = sciString + ", " + Sci
                Comm = commNames[sppCd.lower()]
                comnString = comnString + ", " + Comm
                Hab = habitat_Use[sppCd.lower()]
                ##Consider
                if Hab == "":
                    habString = habString
                else:
                    habString = habString + ", " + Sci + ": " + Hab
                #habString = habString + ", " + Hab
        row.codeList = sppString[2:]  #chop the first comma-space
        row.SciNames = sciString[2:]
        row.CommNames = comnString[2:]
        row.HabitatUse = habString[2:]
        rows.updateRow(row)
    del rows, row
    ## delete unwanted fields
    outFeat = "zon_FullLists_C_test" + classLevel
    man.CopyFeatures(curZo, outFeat)
    fldList = [f.name for f in arcpy.ListFields(outFeat, "VAT_zon_*")]
    man.DeleteField(outFeat, fldList)
    fldList = [f.name for f in arcpy.ListFields(outFeat, "hyp_backOut*")]
    man.DeleteField(outFeat, fldList)
    print("  final file is " + outFeat)

# consider automating the labeling of all pro polys with Facility name, and also other attribute fields we need to add
Beispiel #11
0
def process_zone(zone_fc, output, zone_name, zone_id_field, zone_name_field,
                 other_keep_fields, clip_hu8, lagosne_name):
    # dissolve fields by the field that zone_id is based on (the field that identifies a unique zone)
    dissolve_fields = [
        f for f in "{}, {}, {}".format(zone_id_field, zone_name_field,
                                       other_keep_fields).split(', ')
        if f != ''
    ]
    print("Dissolving...")
    dissolve1 = DM.Dissolve(zone_fc, 'dissolve1', dissolve_fields)

    # update name field to match our standard
    DM.AlterField(dissolve1, zone_name_field, 'name')

    # original area

    DM.AddField(dissolve1, 'originalarea', 'DOUBLE')
    DM.CalculateField(dissolve1, 'originalarea', '!shape.area@hectares!',
                      'PYTHON')

    #clip
    print("Clipping...")
    clip = AN.Clip(dissolve1, MASTER_CLIPPING_POLY, 'clip')
    if clip_hu8 == 'Y':
        final_clip = AN.Clip(clip, HU8_OUTPUT, 'final_clip')
    else:
        final_clip = clip

    print("Selecting...")
    # calc new area, orig area pct, compactness
    DM.AddField(final_clip, 'area_ha', 'DOUBLE')
    DM.AddField(final_clip, 'originalarea_pct', 'DOUBLE')
    DM.AddField(final_clip, 'compactness', 'DOUBLE')
    DM.JoinField(final_clip, zone_id_field, dissolve1, zone_id_field,
                 'originalarea_pct')

    uCursor_fields = [
        'area_ha', 'originalarea_pct', 'originalarea', 'compactness',
        'SHAPE@AREA', 'SHAPE@LENGTH'
    ]
    with arcpy.da.UpdateCursor(final_clip, uCursor_fields) as uCursor:
        for row in uCursor:
            area, orig_area_pct, orig_area, comp, shape_area, shape_length = row
            area = shape_area / 10000  # convert from m2 to hectares
            orig_area_pct = round(100 * area / orig_area, 2)
            comp = 4 * 3.14159 * shape_area / (shape_length**2)
            row = (area, orig_area_pct, orig_area, comp, shape_area,
                   shape_length)
            uCursor.updateRow(row)

    # if zones are present with <5% of original area and a compactness measure of <.2 (ranges from 0-1)
    # AND ALSO they are no bigger than 500 sq. km. (saves Chippewa County and a WWF), filter out
    # save eliminated polygons to temp database as a separate layer for inspection

    # Different processing for HU4 and HU8, so that they match the extent of HU8 more closely but still throw out tiny slivers
    # County also only eliminated if a tiny, tiny, tiny sliver (so: none should be eliminated)
    if zone_name not in ('hu4', 'hu12', 'county'):
        selected = AN.Select(
            final_clip, 'selected',
            "originalarea_pct >= 5 OR compactness >= .2 OR area_ha > 50000")
        not_selected = AN.Select(
            final_clip, '{}_not_selected'.format(output),
            "originalarea_pct < 5 AND compactness < .2 AND area_ha < 50000")

    else:
        selected = final_clip
    # eliminate small slivers, re-calc area fields, add perimeter and multipart flag
    # leaves the occasional errant sliver but some areas over 25 hectares are more valid so this is
    # CONSERVATIVE
    print("Trimming...")
    trimmed = DM.EliminatePolygonPart(selected,
                                      'trimmed',
                                      'AREA',
                                      '25 Hectares',
                                      part_option='ANY')

    # gather up a few calculations into one cursor because this is taking too long over the HU12 layer
    DM.AddField(trimmed, 'perimeter_m', 'DOUBLE')
    DM.AddField(trimmed, 'multipart', 'TEXT', field_length=1)
    uCursor_fields = [
        'area_ha', 'originalarea_pct', 'originalarea', 'perimeter_m',
        'multipart', 'SHAPE@'
    ]
    with arcpy.da.UpdateCursor(trimmed, uCursor_fields) as uCursor:
        for row in uCursor:
            area, orig_area_pct, orig_area, perim, multipart, shape = row
            area = shape.area / 10000  # convert to hectares from m2
            orig_area_pct = round(100 * area / orig_area, 2)
            perim = shape.length

            # multipart flag calc
            if shape.isMultipart:
                multipart = 'Y'
            else:
                multipart = 'N'
            row = (area, orig_area_pct, orig_area, perim, multipart, shape)
            uCursor.updateRow(row)

    # delete intermediate fields
    DM.DeleteField(trimmed, 'compactness')
    DM.DeleteField(trimmed, 'originalarea')

    print("Zone IDs....")
    # link to LAGOS-NE zone IDs
    DM.AddField(trimmed, 'zoneid', 'TEXT', field_length=40)
    trimmed_lyr = DM.MakeFeatureLayer(trimmed, 'trimmed_lyr')
    if lagosne_name:
        # join to the old master GDB path on the same master field and copy in the ids
        old_fc = os.path.join(LAGOSNE_GDB, lagosne_name)
        old_fc_lyr = DM.MakeFeatureLayer(old_fc, 'old_fc_lyr')
        if lagosne_name == 'STATE' or lagosne_name == 'COUNTY':
            DM.AddJoin(trimmed_lyr, zone_id_field, old_fc_lyr, 'FIPS')
        else:
            DM.AddJoin(trimmed_lyr, zone_id_field, old_fc_lyr,
                       zone_id_field)  # usually works because same source data

        # copy
        DM.CalculateField(trimmed_lyr, 'zoneid',
                          '!{}.ZoneID!.lower()'.format(lagosne_name), 'PYTHON')
        DM.RemoveJoin(trimmed_lyr)

    # generate new zone ids
    old_ids = [row[0] for row in arcpy.da.SearchCursor(trimmed, 'zoneid')]
    with arcpy.da.UpdateCursor(trimmed, 'zoneid') as cursor:
        counter = 1
        for row in cursor:
            if not row[
                    0]:  # if no existing ID borrowed from LAGOS-NE, assign a new one
                new_id = '{name}_{num}'.format(name=zone_name, num=counter)

                # ensures new ids don't re-use old numbers but fills in all positive numbers eventually
                while new_id in old_ids:
                    counter += 1
                    new_id = '{name}_{num}'.format(name=zone_name, num=counter)
                row[0] = new_id
                cursor.updateRow(row)
                counter += 1

    print("Edge flags...")
    # add flag fields
    DM.AddField(trimmed, 'onlandborder', 'TEXT', field_length=2)
    DM.AddField(trimmed, 'oncoast', 'TEXT', field_length=2)

    # identify border zones
    border_lyr = DM.MakeFeatureLayer(LAND_BORDER, 'border_lyr')
    DM.SelectLayerByLocation(trimmed_lyr, 'INTERSECT', border_lyr)
    DM.CalculateField(trimmed_lyr, 'onlandborder', "'Y'", 'PYTHON')
    DM.SelectLayerByAttribute(trimmed_lyr, 'SWITCH_SELECTION')
    DM.CalculateField(trimmed_lyr, 'onlandborder', "'N'", 'PYTHON')

    # identify coastal zones
    coastal_lyr = DM.MakeFeatureLayer(COASTLINE, 'coastal_lyr')
    DM.SelectLayerByLocation(trimmed_lyr, 'INTERSECT', coastal_lyr)
    DM.CalculateField(trimmed_lyr, 'oncoast', "'Y'", 'PYTHON')
    DM.SelectLayerByAttribute(trimmed_lyr, 'SWITCH_SELECTION')
    DM.CalculateField(trimmed_lyr, 'oncoast', "'N'", 'PYTHON')

    print("State assignment...")
    # State?
    DM.AddField(trimmed, "state", 'text', field_length='2')
    state_center = arcpy.SpatialJoin_analysis(
        trimmed,
        STATE_FC,
        'state_center',
        join_type='KEEP_COMMON',
        match_option='HAVE_THEIR_CENTER_IN')
    state_intersect = arcpy.SpatialJoin_analysis(trimmed,
                                                 STATE_FC,
                                                 'state_intersect',
                                                 match_option='INTERSECT')
    state_center_dict = {
        row[0]: row[1]
        for row in arcpy.da.SearchCursor(state_center, ['ZoneID', 'STUSPS'])
    }
    state_intersect_dict = {
        row[0]: row[1]
        for row in arcpy.da.SearchCursor(state_intersect, ['ZoneID', 'STUSPS'])
    }
    with arcpy.da.UpdateCursor(trimmed, ['ZoneID', 'state']) as cursor:
        for updateRow in cursor:
            keyValue = updateRow[0]
            if keyValue in state_center_dict:
                updateRow[1] = state_center_dict[keyValue]
            else:
                updateRow[1] = state_intersect_dict[keyValue]
            cursor.updateRow(updateRow)

    # glaciation status?
    # TODO as version 0.6

    # preface the names with the zones
    DM.DeleteField(trimmed, 'ORIG_FID')
    fields = [
        f.name for f in arcpy.ListFields(trimmed, '*')
        if f.type not in ('OID',
                          'Geometry') and not f.name.startswith('Shape_')
    ]
    for f in fields:
        new_fname = '{zn}_{orig}'.format(zn=zone_name, orig=f).lower()
        try:
            DM.AlterField(trimmed, f, new_fname, clear_field_alias='TRUE')
        # sick of debugging the required field message-I don't want to change required fields anyway
        except:
            pass

    DM.CopyFeatures(trimmed, output)

    # cleanup
    lyr_objects = [
        lyr_object for var_name, lyr_object in locals().items()
        if var_name.endswith('lyr')
    ]
    temp_fcs = arcpy.ListFeatureClasses('*')
    for l in lyr_objects + temp_fcs:
        DM.Delete(l)
Beispiel #12
0
            prevHyp = wrk + "/hyp" + str(iminus)
            print("working on " + elem + ", " + str(i) + " of " + str(listLen))
            curHyp = Combine([prevHyp, rasName])
            curHyp.save(wrk + "/hyp" + str(i))
            man.AddField(curHyp, "spp0", "TEXT", "", "", 251)
            jval = "hyp" + str(iminus)
            man.JoinField(curHyp, jval, prevHyp, "VALUE", ["spp0"])
            rasNoDot = rasName[0:rasName.find(".")]
            newCol = rasNoDot[0:11].upper()
            expr = "str(!spp0_1!) + str(!" + newCol + "!)"
            man.CalculateField(curHyp, "spp0", expr, "PYTHON")
            #clean up
            man.Delete(prevHyp)

# clean up a little more
man.DeleteField(curHyp, [jval.upper(), newCol, "spp0_1"])

#needed to continue below if you comment out the previous loop for any reason
#curHyp = wrk + "/hyp" + str(len(codeL)-1)

# expand information out to one col for each spp.
print("adding columns...")
for i in range(len(codeL)):
    newCol = codeL[i].upper()
    print("..." + newCol)
    man.AddField(curHyp, newCol, "SHORT")
    #expr="str(!supp0!)[i:i+1]"
    expr = "str(!spp0!)[" + str(i) + ":" + str(i + 1) + "]"
    print(expr)
    man.CalculateField(curHyp, newCol, expr, "PYTHON")
def flatten_overlaps(zone_fc,
                     zone_field,
                     output_fc,
                     output_table,
                     cluster_tolerance=' 3 Meters'):
    orig_env = arcpy.env.workspace
    arcpy.env.workspace = 'in_memory'

    objectid = [f.name for f in arcpy.ListFields(zone_fc)
                if f.type == 'OID'][0]
    zone_type = [f.type for f in arcpy.ListFields(zone_fc, zone_field)][0]
    fid1 = 'FID_{}'.format(os.path.basename(zone_fc))
    flat_zoneid = 'flat{}'.format(zone_field)
    flat_zoneid_prefix = 'flat{}_'.format(zone_field.replace('_zoneid', ''))

    # Union with FID_Only (A)
    arcpy.AddMessage("Splitting overlaps in polygons...")
    zoneid_dict = {
        r[0]: r[1]
        for r in arcpy.da.SearchCursor(zone_fc, [objectid, zone_field])
    }
    self_union = AN.Union([zone_fc],
                          'self_union',
                          'ONLY_FID',
                          cluster_tolerance=cluster_tolerance)

    # #If you don't run this section, Find Identical fails with error 999999. Seems to have to do with small slivers
    # #having 3 vertices and/or only circular arcs in the geometry.
    arcpy.AddMessage("Repairing self-union geometries...")
    # DM.AddGeometryAttributes(self_union, 'POINT_COUNT; AREA')
    # union_fix = DM.MakeFeatureLayer(self_union, 'union_fix', where_clause='PNT_COUNT <= 10 OR POLY_AREA < 5000')
    # arcpy.Densify_edit(union_fix, 'DISTANCE', distance = '1 Meters', max_deviation='1 Meters')  # selection ON, edits self_union disk
    DM.RepairGeometry(
        self_union, 'DELETE_NULL'
    )  # eliminate empty geoms. selection ON, edits self_union disk
    # for field in ['PNT_COUNT', 'POLY_AREA']:
    #     DM.DeleteField(self_union, field)

    # Find Identical by Shape (B)
    if arcpy.Exists('identical_shapes'):
        DM.Delete(
            'identical_shapes'
        )  # causes failure in FindIdentical even when overwrite is allowed
    identical_shapes = DM.FindIdentical(self_union, 'identical_shapes',
                                        'Shape')

    # Join A to B and calc flat[zone]_zoneid = FEAT_SEQ (C)
    DM.AddField(self_union, flat_zoneid, 'TEXT', field_length=20)
    union_oid = [
        f.name for f in arcpy.ListFields(self_union) if f.type == 'OID'
    ][0]
    identical_shapes_dict = {
        r[0]: r[1]
        for r in arcpy.da.SearchCursor(identical_shapes,
                                       ['IN_FID', 'FEAT_SEQ'])
    }
    with arcpy.da.UpdateCursor(self_union,
                               [union_oid, flat_zoneid]) as u_cursor:
        for row in u_cursor:
            row[1] = '{}{}'.format(flat_zoneid_prefix,
                                   identical_shapes_dict[row[0]])
            u_cursor.updateRow(row)

    # Add the original zone ids and save to table (E)
    arcpy.AddMessage("Assigning temporary IDs to split polygons...")
    unflat_table = DM.CopyRows(self_union, 'unflat_table')
    DM.AddField(unflat_table, zone_field,
                zone_type)  # default text length of 50 is fine if needed
    with arcpy.da.UpdateCursor(unflat_table, [fid1, zone_field]) as u_cursor:
        for row in u_cursor:
            row[1] = zoneid_dict[row[0]]  # assign zone id
            u_cursor.updateRow(row)

    # Delete Identical (C) (save as flat[zone])
    with arcpy.da.UpdateCursor(self_union, 'OID@') as cursor:
        visited = []
        for row in cursor:
            feat_seq = identical_shapes_dict[row[0]]
            if feat_seq in visited:
                cursor.deleteRow()
            visited.append(feat_seq)

    DM.DeleteField(self_union, fid1)
    DM.DeleteField(unflat_table, fid1)

    # save outputs
    output_fc = DM.CopyFeatures(self_union, output_fc)
    output_table = DM.CopyRows(unflat_table, output_table)

    # cleanup
    for item in [self_union, identical_shapes, unflat_table]:
        DM.Delete(item)
    arcpy.env.workspace = orig_env

    return output_fc
Beispiel #14
0
    drought = OS.path.splitext(shp_name)[0]
    DM.MakeFeatureLayer(OS.path.join(working_dir, shp_name), drought)

    #### Add Winery Data ####
    beerWinePath = OS.path.join(working_dir, "BeerWine", "BeerWine.gdb",
                                "BeerWine")
    intermediate_output = OS.path.join(working_dir, "BeerWine", "BeerWine.gdb",
                                       "BeerWineDrought")
    wine = "BeerWine"
    wine_drought = "Wine_Drought"
    DM.MakeFeatureLayer(beerWinePath, wine)
    DM.SelectLayerByAttribute(wine, "NEW_SELECTION", "Type = 'Winery'")
    ANALYSIS.SpatialJoin(drought, wine, intermediate_output, "JOIN_ONE_TO_ONE",
                         "KEEP_ALL")
    try:
        DM.DeleteField(intermediate_output, "NAME")
    except:
        pass
    final_wine_drought = "Wine_Drought_Summary"
    DM.MakeFeatureLayer(intermediate_output, final_wine_drought)

    lf = DM.SaveToLayerFile(
        final_wine_drought,
        OS.path.join(working_dir, '{}.lyr'.format(final_wine_drought)))
    DM.ApplySymbologyFromLayer(lf, lyr_template)

    pw = "test"  #GETPASS.getpass("Enter AGOL password:"******"Drought_and_Wine"

    agol = AGOLHandler("analytics", pw, service_name)
Beispiel #15
0
 classLevel = hypZ[i][-1:]
 curZo = wrk + "/zon_C" + classLevel
 # cycle through each edm
 for j in range(len(rasL)):
     if j == 0:
         inRas = inPath + "/" + rasL[j] + "_c.tif"
         curZoT_out = wrk + "/zonTab_C" + str(i) + "_" + str(j)
         print(".. zoning " + rasL[j])
         curZoT = ZonalStatisticsAsTable(hypZ[i], "Value", inRas,
                                         curZoT_out, "DATA", "MAXIMUM")
         man.CopyRaster(hypZ[i], curZo)
         man.AddField(curZo, "spp0", "TEXT", "", "", 251)
         man.JoinField(curZo, "Value", curZoT, "VALUE", ["MAX"])
         expr = "str( !MAX! )"
         man.CalculateField(curZo, "spp0", expr, "PYTHON")
         man.DeleteField(curZo, "MAX")
         man.Delete(curZoT_out)
     else:
         #jminus = j-1
         inRas = inPath + "/" + rasL[j] + "_c.tif"
         print(".. zoning " + rasL[j])
         curZoT_out = wrk + "/zonTab_C" + str(i) + "_" + str(j)
         curZoT = ZonalStatisticsAsTable(hypZ[i], "Value", inRas,
                                         curZoT_out, "DATA", "MAXIMUM")
         man.JoinField(curZo, "Value", curZoT, "VALUE", ["MAX"])
         expr = "str(!spp0!) + str(!MAX!)"
         man.CalculateField(curZo, "spp0", expr, "PYTHON")
         man.DeleteField(curZo, "MAX")
         man.Delete(curZoT_out)
 # expand information out to one col for each spp.
 print("adding columns...")
    def stats_area_table(zone_fc=zone_fc,
                         zone_field=zone_field,
                         in_value_raster=in_value_raster,
                         out_table=out_table,
                         is_thematic=is_thematic):
        def refine_zonal_output(t):
            """Makes a nicer output for this tool. Rename some fields, drop unwanted
                ones, calculate percentages using raster AREA before deleting that
                field."""
            if is_thematic:
                value_fields = arcpy.ListFields(t, "VALUE*")
                pct_fields = [
                    '{}_pct'.format(f.name) for f in value_fields
                ]  # VALUE_41_pct, etc. Field can't start with number.

                # add all the new fields needed
                for f, pct_field in zip(value_fields, pct_fields):
                    arcpy.AddField_management(t, pct_field, f.type)

                # calculate the percents
                cursor_fields = ['AREA'] + [f.name
                                            for f in value_fields] + pct_fields
                uCursor = arcpy.da.UpdateCursor(t, cursor_fields)
                for uRow in uCursor:
                    # unpacks area + 3 tuples of the right fields for each, no matter how many there are
                    vf_i_end = len(value_fields) + 1
                    pf_i_end = vf_i_end + len(pct_fields)

                    # pct_values and ha_values are both null at this point but unpack for clarity
                    area, value_values, pct_values = uRow[0], uRow[
                        1:vf_i_end], uRow[vf_i_end:pf_i_end]
                    new_pct_values = [100 * vv / area for vv in value_values]
                    new_row = [area] + value_values + new_pct_values
                    uCursor.updateRow(new_row)

                for vf in value_fields:
                    arcpy.DeleteField_management(t, vf.name)

            arcpy.AlterField_management(t, 'COUNT', 'CELL_COUNT')
            drop_fields = ['ZONE_CODE', 'COUNT', 'AREA']
            if not debug_mode:
                for df in drop_fields:
                    try:
                        arcpy.DeleteField_management(t, df)
                    except:
                        continue

        # Set up environments for alignment between zone raster and theme raster
        if isinstance(zone_fc, arcpy.Result):
            zone_fc = zone_fc.getOutput(0)
        this_files_dir = os.path.dirname(os.path.abspath(__file__))
        os.chdir(this_files_dir)
        common_grid = os.path.abspath('../common_grid.tif')
        env.snapRaster = common_grid
        env.cellSize = common_grid
        env.extent = zone_fc

        zone_desc = arcpy.Describe(zone_fc)
        zone_raster = 'convertraster'
        if zone_desc.dataType not in ['RasterDataset', 'RasterLayer']:
            zone_raster = arcpy.PolygonToRaster_conversion(
                zone_fc,
                zone_field,
                zone_raster,
                'CELL_CENTER',
                cellsize=env.cellSize)
            print('cell size is {}'.format(env.cellSize))
            zone_size = int(env.cellSize)
        else:
            zone_raster = zone_fc
            zone_size = min(
                arcpy.Describe(zone_raster).meanCellHeight,
                arcpy.Describe(zone_raster).meanCellWidth)
            raster_size = min(
                arcpy.Describe(in_value_raster).meanCellHeight,
                arcpy.Describe(in_value_raster).meanCellWidth)
            env.cellSize = min([zone_size, raster_size])
            print('cell size is {}'.format(env.cellSize))

        # I tested and there is no need to resample the raster being summarized. It will be resampled correctly
        # internally in the following tool given that the necessary environments are set above (cell size, snap).
        # # in_value_raster = arcpy.Resample_management(in_value_raster, 'in_value_raster_resampled', CELL_SIZE)
        if not is_thematic:
            arcpy.AddMessage("Calculating Zonal Statistics...")
            temp_entire_table = arcpy.sa.ZonalStatisticsAsTable(
                zone_raster, zone_field, in_value_raster, 'temp_zonal_table',
                'DATA', 'MEAN')

        if is_thematic:
            # for some reason env.cellSize doesn't work
            # calculate/doit
            arcpy.AddMessage("Tabulating areas...")
            temp_entire_table = arcpy.sa.TabulateArea(
                zone_raster,
                zone_field,
                in_value_raster,
                'Value',
                'temp_area_table',
                processing_cell_size=env.cellSize)
            # TabulateArea capitalizes the zone for some annoying reason and ArcGIS is case-insensitive to field names
            # so we have this work-around:
            zone_field_t = '{}_t'.format(zone_field)
            DM.AddField(temp_entire_table,
                        zone_field_t,
                        'TEXT',
                        field_length=20)
            expr = '!{}!'.format(zone_field.upper())
            DM.CalculateField(temp_entire_table, zone_field_t, expr, 'PYTHON')
            DM.DeleteField(temp_entire_table, zone_field.upper())
            DM.AlterField(temp_entire_table,
                          zone_field_t,
                          zone_field,
                          clear_field_alias=True)

            # replaces join to Zonal Stats in previous versions of tool
            # no joining, just calculate the area/count from what's produced by TabulateArea
            arcpy.AddField_management(temp_entire_table, 'AREA', 'DOUBLE')
            arcpy.AddField_management(temp_entire_table, 'COUNT', 'DOUBLE')

            cursor_fields = ['AREA', 'COUNT']
            value_fields = [
                f.name for f in arcpy.ListFields(temp_entire_table, 'VALUE*')
            ]
            cursor_fields.extend(value_fields)
            with arcpy.da.UpdateCursor(temp_entire_table,
                                       cursor_fields) as uCursor:
                for uRow in uCursor:
                    area, count, value_fields = uRow[0], uRow[1], uRow[2:]
                    area = sum(value_fields)
                    count = round(
                        area / (int(env.cellSize) * int(env.cellSize)), 0)
                    new_row = [area, count] + value_fields
                    uCursor.updateRow(new_row)

        arcpy.AddMessage("Refining output table...")

        arcpy.AddField_management(temp_entire_table, 'datacoveragepct',
                                  'DOUBLE')
        arcpy.AddField_management(temp_entire_table, 'ORIGINAL_COUNT', 'LONG')

        # calculate datacoveragepct by comparing to original areas in zone raster
        # alternative to using JoinField, which is prohibitively slow if zones exceed hu12 count
        zone_raster_dict = {
            row[0]: row[1]
            for row in arcpy.da.SearchCursor(zone_raster,
                                             [zone_field, 'Count'])
        }
        temp_entire_table_dict = {
            row[0]: row[1]
            for row in arcpy.da.SearchCursor(temp_entire_table,
                                             [zone_field, 'COUNT'])
        }

        sum_cell_area = float(env.cellSize) * float(env.cellSize)
        orig_cell_area = zone_size * zone_size

        with arcpy.da.UpdateCursor(
                temp_entire_table,
            [zone_field, 'datacoveragepct', 'ORIGINAL_COUNT']) as cursor:
            for uRow in cursor:
                key_value, data_pct, count_orig = uRow
                count_orig = zone_raster_dict[key_value]
                if key_value in temp_entire_table_dict:
                    count_summarized = temp_entire_table_dict[key_value]
                    data_pct = 100 * float((count_summarized * sum_cell_area) /
                                           (count_orig * orig_cell_area))
                else:
                    data_pct = None
                cursor.updateRow((key_value, data_pct, count_orig))

        # Refine the output
        refine_zonal_output(temp_entire_table)

        # in order to add vector capabilities back, need to do something with this
        # right now we just can't fill in polygon zones that didn't convert to raster in our system
        stats_result = cu.one_in_one_out(temp_entire_table, zone_fc,
                                         zone_field, out_table)

        # Convert "datacoveragepct" and "ORIGINAL_COUNT" values to 0 for zones with no metrics calculated
        with arcpy.da.UpdateCursor(
                out_table,
            [zone_field, 'datacoveragepct', 'ORIGINAL_COUNT', 'CELL_COUNT'
             ]) as u_cursor:
            for row in u_cursor:
                # data_coverage pct to 0
                if row[1] is None:
                    row[1] = 0
                # original count filled in if a) zone outside raster bounds or b) zone too small to be rasterized
                if row[2] is None:
                    if row[0] in zone_raster_dict:
                        row[2] = zone_raster_dict[row[0]]
                    else:
                        row[2] = 0
                # cell count set to 0
                if row[3] is None:
                    row[3] = 0
                u_cursor.updateRow(row)

        # count whether all zones got an output record or not)
        out_count = int(
            arcpy.GetCount_management(temp_entire_table).getOutput(0))
        in_count = int(arcpy.GetCount_management(zone_fc).getOutput(0))
        count_diff = in_count - out_count

        # cleanup
        if not debug_mode:
            for item in [
                    'temp_zonal_table', temp_entire_table, 'convertraster'
            ]:  # don't add zone_raster, orig
                arcpy.Delete_management(item)
        arcpy.ResetEnvironments()
        env.workspace = orig_env  # hope this prevents problems using list of FCs from workspace as batch
        arcpy.CheckInExtension("Spatial")

        return [stats_result, count_diff]