def main(huc_input, elev_input, strm_input, strm_seg, bf_input, outFGB, upstream_bool): # check data characteristics of input stream network for tool requirements check_req(strm_input, elev_input) # set environment parameters arcpy.AddMessage("Setting processing environment parameters...") arcpy.CheckOutExtension("Spatial") arcpy.env.overwriteOutput = True arcpy.env.workspace = outFGB arcpy.env.outputCoordinateSystem = elev_input arcpy.env.extent = elev_input arcpy.env.snapRaster = elev_input arcpy.env.cellSize = elev_input arcpy.env.mask = elev_input cellSizeResult = arcpy.GetRasterProperties_management( elev_input, "CELLSIZEX") cellSize = float(cellSizeResult.getOutput(0)) snap_dist = cellSize * 2 # metadata mWriter = Metadata.MetadataWriter("Delineate Catchments", "0.3") mWriter.createRun() # input parameters for metadata file mWriter.currentRun.addParameter("Watershed polygon feature class", huc_input) mWriter.currentRun.addParameter("DEM raster", elev_input) mWriter.currentRun.addParameter("Stream network polyline feature class", strm_input) mWriter.currentRun.addParameter("Segmented stream network feature class", strm_seg) mWriter.currentRun.addParameter( "Bankfull (or stream area) polygon feature class", bf_input) mWriter.currentRun.addParameter("Output file geodatabase", outFGB) mWriter.currentRun.addParameter("Upstream (i.e. overlapping) catchments?", upstream_bool) # check segmented stream network for LineOID field, and add if it's missing seg_oid = arcpy.Describe(strm_seg).OIDFieldName list_field = arcpy.ListFields(strm_seg, "LineOID") strm_seg_lyr = "strm_seg_lyr" arcpy.MakeFeatureLayer_management(strm_seg, strm_seg_lyr) if len(list_field) == 0: arcpy.AddField_management(strm_seg_lyr, "LineOID", "LONG") arcpy.CalculateField_management(strm_seg_lyr, "LineOID", "!" + seg_oid + "!", "PYTHON_9.3") # convert stream network to raster arcpy.AddMessage("Converting stream network to raster format...") strm_ras = sR.convert_ras(huc_input, bf_input, strm_input) # recondition DEM using stream network arcpy.AddMessage("Reconditioning DEM with stream network...") dem_rec = dR.dem_recnd(elev_input, huc_input, strm_ras, outFGB) # calculate flow direction and flow accumulation arcpy.AddMessage("Calculating flow direction and accumulation...") fd = FlowDirection(dem_rec, "NORMAL") fa = FlowAccumulation(fd, "", "FLOAT") # Plot segment endpoints as pour points arcpy.AddMessage("...plotting pour points.") seg_endpoints = end.main(strm_input, strm_seg, cellSize) arcpy.FeatureClassToFeatureClass_conversion(seg_endpoints, outFGB, "endpoints") # create blank polygon feature class to store catchments arcpy.AddMessage( "Creating blank polygon feature class to store catchments...") arcpy.CreateFeatureclass_management( outFGB, "catch_ply", "POLYGON", seg_endpoints) #the coord. sys. for pnt_input is applied arcpy.MakeFeatureLayer_management(outFGB + "\\catch_ply", "catch_ply_lyr") arcpy.MakeFeatureLayer_management(seg_endpoints, "seg_endpoints_lyr") # create field mappings for pour points fm = arcpy.FieldMappings() fm.addTable(seg_endpoints) # set up counters total_cnt = arcpy.GetCount_management("seg_endpoints_lyr") # if upstream boolean is selected, iterate through each point arcpy.AddMessage("Delineating catchments for each pour point...") if upstream_bool == 'true': with arcpy.da.SearchCursor(seg_endpoints, ["SHAPE@", "LineOID"]) as cursor: for row in cursor: try: arcpy.FeatureClassToFeatureClass_conversion( row[0], "in_memory", "pnt_tmp", "#", fm) arcpy.MakeFeatureLayer_management("in_memory\\pnt_tmp", "pnt_tmp_lyr") arcpy.CalculateField_management("pnt_tmp_lyr", "LineOID", row[1], "PYTHON_9.3") pnt_snap = SnapPourPoint("pnt_tmp_lyr", fa, snap_dist, "LineOID") wshd_ras = Watershed(fd, pnt_snap, "VALUE") arcpy.RasterToPolygon_conversion(wshd_ras, "in_memory\\wshd_ply", "NO_SIMPLIFY", "VALUE") arcpy.MakeFeatureLayer_management("in_memory\\wshd_ply", "wshd_ply_lyr") arcpy.AddField_management("wshd_ply_lyr", "LineOID", "LONG") arcpy.CalculateField_management("wshd_ply_lyr", "LineOID", row[1], "PYTHON_9.3") arcpy.Append_management("wshd_ply_lyr", "catch_ply_lyr", "NO_TEST") arcpy.AddMessage("Catchment delineated for " + str(round(row[1])) + " of " + str(total_cnt) + " records...") except: arcpy.AddMessage( "\nError delineating catchments for point #" + str(row[1]) + ": " + arcpy.GetMessages(2)) arcpy.AddMessage("Moving to next pour point...") #raise Exception continue # otherwise, create a "RCA-style" polygons else: arcpy.MakeFeatureLayer_management(seg_endpoints, "pnt_tmp_lyr") snap_dist = cellSize * 2 pnt_snap = SnapPourPoint("pnt_tmp_lyr", fa, snap_dist, "LineOID") wshd_ras = Watershed(fd, pnt_snap, "Value") arcpy.RasterToPolygon_conversion(wshd_ras, "in_memory\\wshd_ply", "NO_SIMPLIFY", "Value") arcpy.MakeFeatureLayer_management("in_memory\\wshd_ply", "wshd_ply_lyr") arcpy.AddField_management("wshd_ply_lyr", "LineOID", "LONG") arcpy.CalculateField_management("wshd_ply_lyr", "LineOID", "!gridcode!", "PYTHON_9.3") arcpy.Append_management("wshd_ply_lyr", "catch_ply_lyr", "NO_TEST") # final clean up of upstream catchment polygons arcpy.AddMessage("Removing slivers and dissolving watershed polygons...") arcpy.AddMessage("...repairing geometry.") arcpy.RepairGeometry_management("catch_ply_lyr") arcpy.AddField_management("catch_ply_lyr", "sqkm", "DOUBLE") arcpy.AddMessage("...calculating area of catchment polygons.") arcpy.CalculateField_management("catch_ply_lyr", "sqkm", "!SHAPE.AREA@SQUAREKILOMETERS!", "PYTHON_9.3") arcpy.AddMessage("...selecting sliver polygons.") arcpy.SelectLayerByAttribute_management("catch_ply_lyr", "NEW_SELECTION", """"sqkm" <= 0.0001""") arcpy.AddMessage("...merging sliver polygons with largest neighbors.") arcpy.Eliminate_management("catch_ply_lyr", "in_memory\\catch_eliminate", "LENGTH") arcpy.MakeFeatureLayer_management("in_memory\\catch_eliminate", "catch_elim_lyr") arcpy.AddMessage( "...dissolving catchment polygons based on LineOID value.") arcpy.Dissolve_management("catch_elim_lyr", outFGB + "\\catch_final", "LineOID") arcpy.MakeFeatureLayer_management(outFGB + "\\catch_final", "catch_final_lyr") arcpy.Delete_management(outFGB + "\\catch_ply") # find errors arcpy.AddMessage("Adding error_code field...") ### TEMP arcpy.AddField_management("catch_final_lyr", "error_code", "SHORT") # error_code = 1; polygons are "too small" arcpy.AddField_management("catch_final_lyr", "sqkm", "DOUBLE") arcpy.CalculateField_management("catch_final_lyr", "sqkm", "!SHAPE.AREA@SQUAREKILOMETERS!", "PYTHON_9.3") arcpy.SelectLayerByAttribute_management("catch_final_lyr", "NEW_SELECTION", """"sqkm" <= 0.02 """) arcpy.CalculateField_management("catch_final_lyr", "error_code", "1", "PYTHON_9.3") arcpy.SelectLayerByAttribute_management("catch_final_lyr", "CLEAR_SELECTION") # error_code = 2; polygons are "too thin" arcpy.AddField_management("catch_final_lyr", "thinness", "DOUBLE") arcpy.CalculateField_management( "catch_final_lyr", "thinness", """(4*3.14*!SHAPE.AREA!)/(math.pow(!SHAPE.LENGTH!,2))""", "PYTHON_9.3") arcpy.SelectLayerByAttribute_management("catch_final_lyr", "NEW_SELECTION", """"thinness" < 0.090""") arcpy.CalculateField_management("catch_final_lyr", "error_code", "2", "PYTHON_9.3") arcpy.SelectLayerByAttribute_management("catch_final_lyr", "CLEAR_SELECTION") arcpy.DeleteField_management("catch_final_lyr", "thinness") # Outputs and stop processing clock for metadata mWriter.currentRun.addOutput("Output catchment area polygons", outFGB + r"\catch_final") mWriter.currentRun.addOutput("Output endpoints", outFGB + "\endpoints") mWriter.currentRun.addOutput("Output reconditioned DEM", outFGB + r"\dem_recond") mWriter.finalizeRun() # Write the metadata file d = datetime.datetime.now() outPath = os.path.dirname(outFGB) metadataFile = "{0}{1}{2}{3}{4}{5}{6}{7}".format(outPath, r"\metadata_", d.year, d.month, d.day, d.hour, d.minute, ".xml") mWriter.writeMetadataFile(metadataFile) return
def grouppolygons(layer): # Splits polygon layer to groups (blocks). In a group polygons have at least one common point. # Output is [[GroupNo1, [allhouseNo1, allhouseNo2,...]], [GroupNo2, [...]], ...] temp_buff = "in_memory\\TMPBuff" temp_diss = "in_memory\\TMPDiss" temp_join = "in_memory\\TMPJoin" # create a small buffer around every polygon arcpy.Buffer_analysis(in_features=layer, out_feature_class=temp_buff, buffer_distance_or_field='0.5 Meters') arcpy.RepairGeometry_management(in_features=temp_buff, delete_null='DELETE_NULL') # dissolve it with no attributes arcpy.Dissolve_management(in_features=layer, out_feature_class=temp_diss, dissolve_field='', statistics_fields='', multi_part='SINGLE_PART') fieldmapping = arcpy.FieldMappings() fm = arcpy.FieldMap() fm.addInputField(layer, 'OBJECTID') fm_name = fm.outputField fm_name.name = 'ELEMENTID' fm_name.alias = 'ELEMENTID' fm_name.type = 'LONG' fm.outputField = fm_name fieldmapping.addFieldMap(fm) del fm fm = arcpy.FieldMap() fm.addInputField(temp_diss, 'OBJECTID') fm_name = fm.outputField fm_name.name = 'GROUPID' fm_name.alias = 'GROUPID' fm_name.type = 'LONG' fm.outputField = fm_name fieldmapping.addFieldMap(fm) del fm # join every polygon to it's group arcpy.SpatialJoin_analysis(target_features=layer, join_features=temp_diss, out_feature_class=temp_join, join_operation='JOIN_ONE_TO_ONE', join_type='KEEP_COMMON', field_mapping=fieldmapping, match_option='INTERSECT') # raw_groups - a list of [[GROUPID, ELEMENTID],...] raw_groups = [[ row[0], row[1] ] for row in arcpy.da.SearchCursor(temp_join, ['GROUPID', 'ELEMENTID'])] raw_groups.sort(key=lambda x: x[0], reverse=False) groups = [[raw_groups[0][0], []]] prev = raw_groups[0][0] for g in raw_groups: if g[0] != prev: groups += [[g[0], [g[1]]]] prev = g[0] else: groups[-1][1] += [g[1]] arcpy.Delete_management(temp_buff) arcpy.Delete_management(temp_diss) arcpy.Delete_management(temp_join) return groups
def insert_from_path(dataset_path, insert_dataset_path, field_names=None, **kwargs): """Insert features into dataset from another dataset. Args: dataset_path (str): Path of the dataset. insert_dataset_path (str): Path of dataset to insert features from. field_names (iter): Collection of field names to insert. Listed field must be present in both datasets. If field_names is None, all fields will be inserted. **kwargs: Arbitrary keyword arguments. See below. Keyword Args: insert_where_sql (str): SQL where-clause for insert-dataset subselection. use_edit_session (bool): Flag to perform updates in an edit session. Default is False. log_level (str): Level to log the function at. Default is 'info'. Returns: collections.Counter: Counts for each feature action. """ kwargs.setdefault('insert_where_sql') kwargs.setdefault('use_edit_session', False) log = leveled_logger(LOG, kwargs.setdefault('log_level', 'info')) log("Start: Insert features into %s from %s.", dataset_path, insert_dataset_path) meta = { 'dataset': arcobj.dataset_metadata(dataset_path), 'insert': arcobj.dataset_metadata(insert_dataset_path), } if field_names is None: keys = set.intersection(*(set( name.lower() for name in _meta['field_names_tokenized']) for _meta in meta.values())) else: keys = set(name.lower() for name in contain(field_names)) # OIDs & area/length "fields" have no business being part of an insert. # Geometry itself is handled separately in append function. for _meta in meta.values(): for key in chain(*_meta['field_token'].items()): keys.discard(key) append_kwargs = { 'inputs': unique_name('view'), 'target': dataset_path, 'schema_type': 'no_test', 'field_mapping': arcpy.FieldMappings(), } # Create field maps. # ArcGIS Pro's no-test append is case-sensitive (verified 1.0-1.1.1). # Avoid this problem by using field mapping. # BUG-000090970 - ArcGIS Pro 'No test' field mapping in Append tool does not auto- # map to the same field name if naming convention differs. for key in keys: field_map = arcpy.FieldMap() field_map.addInputField(insert_dataset_path, key) append_kwargs['field_mapping'].addFieldMap(field_map) view = arcobj.DatasetView( insert_dataset_path, kwargs['insert_where_sql'], view_name=append_kwargs['inputs'], # Must be nonspatial to append to nonspatial table. force_nonspatial=(not meta['dataset']['is_spatial']), ) session = arcobj.Editor(meta['dataset']['workspace_path'], kwargs['use_edit_session']) with view, session: arcpy.management.Append(**append_kwargs) feature_count = Counter({'inserted': view.count}) log("%s features inserted.", feature_count['inserted']) log("End: Insert.") return feature_count
temp_df = updates.loc[updates['Jurisdiction'] == jurisdiction] print(temp_df.head()) row[1] = temp_df.iloc[0]['Cases'] row[2] = row[1] row[3] = temp_df.iloc[0]['Hospitalizations'] row[4] = dt.datetime.now() row[6] = (row[2]/row[5])*100000. row[7] = temp_df.iloc[0]['Deaths'] count += 1 ucursor.updateRow(row) print(f'Total count of COVID Case Count updates is: {count}') # 2) APPEND MOST RECENT CASE COUNTS TO COUNTS BY DAY TABLE # Build Field Map for all fields from counts_service into counts_by_day fms = arcpy.FieldMappings() # Old Field Mapping # fm_dict = {'DISTNAME': 'DISTNAME', # 'COVID_Cases_Utah_Resident': 'COVID19_Cases_in_Utah_Residents', # 'COVID_Cases_Non_Utah_Resident': 'COVID19_Cases_in_Non_Utah_Residents', # 'COVID_Cases_Total': 'Total_COVID19_Cases_in_Utah', # 'Date_Updated': 'Day', # 'Hospitalizations': 'Hospitalizations', # 'Population': 'Population', # 'Cases_per_100k': 'Case_Rate_Per_100_000'} # New Field Mapping fm_dict = {'DISTNAME': 'DISTNAME', 'COVID_Cases_Utah_Resident': 'COVID_Cases_Utah_Resident', 'COVID_Cases_Non_Utah_Resident': 'COVID_Cases_Non_Utah_Resident',
def SpatialJoinLargestOverlap(target_features, join_features, out_fc, keep_all, spatial_rel): if spatial_rel == "largest_overlap": # Calculate intersection between Target Feature and Join Features intersect = arcpy.analysis.Intersect([target_features, join_features], "in_memory/intersect", "ONLY_FID") # Save copy of intersect file for debugging arcpy.CopyFeatures_management( intersect, "C:/Users/IanAvery/Desktop/highTidePrototype/santaClaraCountyAnalysis.gdb/intersect" ) # Find which Join Feature has the largest overlap with each Target Feature # Need to know the Target Features shape type, to know to read the SHAPE_AREA oR SHAPE_LENGTH property geom = "AREA" if arcpy.Describe( target_features).shapeType.lower() == "polygon" and arcpy.Describe( join_features).shapeType.lower() == "polygon" else "LENGTH" fields = [ "FID_{0}".format( os.path.splitext(os.path.basename(target_features))[0]), "FID_{0}".format( os.path.splitext(os.path.basename(join_features))[0]), "SHAPE@{0}".format(geom) ] overlap_dict = {} with arcpy.da.SearchCursor(intersect, fields) as scur: for row in scur: try: if row[2] > overlap_dict[row[0]][1]: overlap_dict[row[0]] = [row[1], row[2]] except: overlap_dict[row[0]] = [row[1], row[2]] # Copy the target features and write the largest overlap join feature ID to each record # Set up all fields from the target features + ORIG_FID fieldmappings = arcpy.FieldMappings() fieldmappings.addTable(target_features) fieldmap = arcpy.FieldMap() fieldmap.addInputField(target_features, arcpy.Describe(target_features).OIDFieldName) fld = fieldmap.outputField fld.type, fld.name, fld.aliasName = "LONG", "ORIG_FID", "ORIG_FID" fieldmap.outputField = fld fieldmappings.addFieldMap(fieldmap) # Perform the copy arcpy.conversion.FeatureClassToFeatureClass(target_features, os.path.dirname(out_fc), os.path.basename(out_fc), "", fieldmappings) # Add a new field JOIN_FID to contain the fid of the join feature with the largest overlap arcpy.management.AddField(out_fc, "JOIN_FID", "LONG") # Calculate the JOIN_FID field with arcpy.da.UpdateCursor(out_fc, ["ORIG_FID", "JOIN_FID"]) as ucur: for row in ucur: try: row[1] = overlap_dict[row[0]][0] ucur.updateRow(row) except: if not keep_all: ucur.deleteRow() # Join all attributes from the join features to the output joinfields = [ x.name for x in arcpy.ListFields(join_features) if not x.required ] arcpy.management.JoinField(out_fc, "JOIN_FID", join_features, arcpy.Describe(join_features).OIDFieldName, joinfields)
def output2NewFC(self, outputFC, candidateFields, appendFields=[], fieldOrder=[]): """Creates a new feature class with the same shape charcteristics as the source input feature class and appends data to it. INPUTS: outputFC (str): catalogue path to output feature class candidateFields (dict): fieldName = instance of CandidateField appendFields {list, []}: field names in the order you want appended fieldOrder {list, []}: the order with which to write fields """ #### Initial Progressor Bar #### ARCPY.overwriteOutput = True ARCPY.SetProgressor("default", ARCPY.GetIDMessage(84006)) #### Validate Output Workspace #### ERROR.checkOutputPath(outputFC) #### Create Path for Output FC #### outPath, outName = OS.path.split(outputFC) #### Get Output Name for SDE if Necessary #### baseType = UTILS.getBaseWorkspaceType(outPath) if baseType.upper() == 'REMOTEDATABASE': outName = outName.split(".")[-1] self.outputFC = OS.path.join(outPath, outName) #### Assess Whether to Honor Original Field Nullable Flag #### setNullable = UTILS.setToNullable(self.catPath, self.outputFC) #### Add Null Value Flag #### outIsShapeFile = UTILS.isShapeFile(self.outputFC) #### Create Output Field Names to be Appended From Input #### inputFieldNames = ["SHAPE@", self.masterField] appendFieldNames = [] masterIsOID = self.masterField == self.oidName if masterIsOID: appendFieldNames.append("SOURCE_ID") else: master = self.allFields[self.masterField.upper()] returnName = UTILS.returnOutputFieldName(master) appendFieldNames.append(returnName) for fieldName in appendFields: field = self.allFields[fieldName.upper()] returnName = UTILS.returnOutputFieldName(field) inputFieldNames.append(fieldName) appendFieldNames.append(returnName) appendFieldNames = UTILS.createAppendFieldNames( appendFieldNames, outPath) masterOutName = appendFieldNames[0] #### Create Field Mappings for Visible Fields #### outputFieldMaps = ARCPY.FieldMappings() #### Add Input Fields to Output #### for ind, fieldName in enumerate(appendFieldNames): if ind == 0: #### Master Field #### sourceFieldName = self.masterField if masterIsOID: fieldType = "LONG" alias = fieldName setOutNullable = False fieldLength = None fieldPrecision = None else: masterOutField = self.allFields[self.masterField.upper()] fieldType = masterOutField.type alias = masterOutField.baseName setOutNullable = setNullable fieldLength = masterOutField.length fieldPrecision = masterOutField.precision else: #### Append Fields #### sourceFieldName = appendFields[ind - 1] outField = self.allFields[sourceFieldName] fieldType = outField.type alias = outField.baseName setOutNullable = setNullable fieldLength = outField.length fieldPrecision = outField.precision #### Create Candidate Field #### outCandidate = CandidateField(fieldName, fieldType, None, alias=alias, precision=fieldPrecision, length=fieldLength) #### Create Output Field Map #### outFieldMap = UTILS.createOutputFieldMap( self.inputFC, sourceFieldName, outFieldCandidate=outCandidate, setNullable=setOutNullable) #### Add Output Field Map to New Field Mapping #### outputFieldMaps.addFieldMap(outFieldMap) #### Do FC2FC Without Extent Env Var #### FC2FC = UTILS.clearExtent(CONV.FeatureClassToFeatureClass) try: FC2FC(self.inputFC, outPath, outName, "", outputFieldMaps) except: ARCPY.AddIDMessage("ERROR", 210, self.outputFC) raise SystemExit() #### Create/Verify Result Field Order #### fieldKeys = candidateFields.keys() fieldKeys.sort() if len(fieldOrder) == len(fieldKeys): fKeySet = set(fieldKeys) fieldOrderSet = set(fieldOrder) if fieldOrderSet == fKeySet: fieldKeys = fieldOrder del fKeySet, fieldOrderSet #### Add Empty Output Analysis Fields #### outputFieldNames = [masterOutName] for fieldInd, fieldName in enumerate(fieldKeys): field = candidateFields[fieldName] field.copy2FC(outputFC) outputFieldNames.append(fieldName) #### Replace NaNs for Shapefiles #### if outIsShapeFile: if field.type != "TEXT": isNaN = NUM.isnan(field.data) if NUM.any(isNaN): field.data[isNaN] = UTILS.shpFileNull[field.type] #### Populate Output Feature Class with Values #### ARCPY.SetProgressor("step", ARCPY.GetIDMessage(84003), 0, self.numObs, 1) outRows = DA.UpdateCursor(self.outputFC, outputFieldNames) for row in outRows: masterID = row[0] if self.master2Order.has_key(masterID): order = self.master2Order[masterID] #### Create Output Row from Input #### resultValues = [masterID] #### Add Result Values #### for fieldName in fieldKeys: field = candidateFields[fieldName] fieldValue = field.data.item(order) resultValues.append(fieldValue) #### Insert Values into Output #### outRows.updateRow(resultValues) else: #### Bad Record #### outRows.deleteRow() ARCPY.SetProgressorPosition() #### Clean Up #### del outRows
def geocode(): try: # Local variables: transformed_xlsx = "I:\\GIS\\OASIS\\Geocoder\\transformed.xlsx" transfomed = "I:\\GIS\\OASIS\\Geocoder\\geocoder.gdb\\transfomed" AddressLocator_Master_Address_Database = "I:\\GIS\\OASIS\\AddressLocators\\AddressLocator_Master_Address_Database" geocoded_addresses = "I:\\GIS\\OASIS\\Geocoder\\geocoder.gdb\\geocoded_addresses" geocoder_gdb = "I:\\GIS\\OASIS\\Geocoder\\geocoder.gdb" geocoded_addresses_failed = "geocoded_addresses_failed" unmatched_xls = "I:\\GIS\\OASIS\\Geocoder\\unmatched.xls" unmatched = "I:\\GIS\\OASIS\\Geocoder\\geocoder.gdb\\unmatched" unmatched__3_ = unmatched AddressLocator_Street_Centerlines__2_ = "I:\\GIS\\OASIS\\AddressLocators\\AddressLocator_Street_Centerlines" geocoded_street_centerlines = "I:\\GIS\\OASIS\\Geocoder\\geocoder.gdb\\geocoded_street_centerlines" geocoded_street_centerlines_successful = "geocoded_street_centerlines_successful" geocoded_street_centerlines_successful__2_ = geocoded_street_centerlines_successful geocoded_street_centerlines_successful__3_ = geocoded_street_centerlines_successful__2_ geocoder_gdb__2_ = "I:\\GIS\\OASIS\\Geocoder\\geocoder.gdb" geocoded_master_successful = "geocoded_master_successful" geocoded_master_successful__2_ = geocoded_master_successful geocoded_master_successful__4_ = geocoded_master_successful__2_ geocoder_eas = "I:\\GIS\\OASIS\\Geocoder\\geocoder.gdb\\geocoder_eas" final = "I:\\GIS\\OASIS\\Geocoder\\geocoder.gdb\\final" # Process: Excel To Table try: arcpy.ExcelToTable_conversion(transformed_xlsx, transfomed) except Exception as e: print(e) # Process: Geocode Addresses try: arcpy.GeocodeAddresses_geocoding(transfomed, AddressLocator_Master_Address_Database, "Key transformed_address VISIBLE NONE", geocoded_addresses, "STATIC", "", "") except Exception as e: print(e) # Process: Make Feature Layer try: arcpy.MakeFeatureLayer_management(geocoded_addresses, geocoded_addresses_failed, "Status = 'U'", geocoder_gdb, "ObjectID OBJECTID VISIBLE NONE;Shape Shape VISIBLE NONE;Status Status VISIBLE NONE;Score Score VISIBLE NONE;Match_type Match_type VISIBLE NONE;Match_addr Match_addr VISIBLE NONE;X X VISIBLE NONE;Y Y VISIBLE NONE;Xmin Xmin VISIBLE NONE;Xmax Xmax VISIBLE NONE;Ymin Ymin VISIBLE NONE;Ymax Ymax VISIBLE NONE;Addr_type Addr_type VISIBLE NONE;ARC_Single_Line_Input ARC_Single_Line_Input VISIBLE NONE") except Exception as e: print(e) # Process: Table To Excel try: arcpy.TableToExcel_conversion(geocoded_addresses_failed, unmatched_xls, "NAME", "CODE") except Exception as e: print(e) # Process: Excel To Table (2) arcpy.ExcelToTable_conversion(unmatched_xls, unmatched, "") # Process: Delete Field arcpy.DeleteField_management(unmatched, "OBJECTID_1;Status;Score;Match_type;Match_addr;X;Y;Xmin;Xmax;Ymin;Ymax;Addr_type;ARC_Single_Line_Input;ARC_SingleKey") # Process: Geocode Addresses (2) arcpy.GeocodeAddresses_geocoding(unmatched__3_, AddressLocator_Street_Centerlines__2_, "'Full Address' transformed_address VISIBLE NONE", geocoded_street_centerlines, "STATIC", "", "") # Process: Make Feature Layer (3) arcpy.MakeFeatureLayer_management(geocoded_street_centerlines, geocoded_street_centerlines_successful, "", "", "ObjectID ObjectID VISIBLE NONE;Shape Shape VISIBLE NONE;Status Status VISIBLE NONE;Score Score VISIBLE NONE;Match_type Match_type VISIBLE NONE;Match_addr Match_addr VISIBLE NONE;Side Side VISIBLE NONE;Ref_ID Ref_ID VISIBLE NONE;User_fld User_fld VISIBLE NONE;Addr_type Addr_type VISIBLE NONE;ARC_Single_Line_Input ARC_Single_Line_Input VISIBLE NONE") # Process: Add Field (2) arcpy.AddField_management(geocoded_street_centerlines_successful, "geocoder", "TEXT", "", "", "", "", "NULLABLE", "NON_REQUIRED", "") # Process: Calculate Field (2) arcpy.CalculateField_management(geocoded_street_centerlines_successful__2_, "geocoder", "classifyGeocoder(!Status!)", "PYTHON", "def classifyGeocoder(Status):\\n if Status == \"M\" or Status == \"T\":\\n return \"SC\"\\n else:\\n return \"U\"") # Process: Make Feature Layer (2) arcpy.MakeFeatureLayer_management(geocoded_addresses, geocoded_master_successful, "Status = 'M' OR Status = 'T'", geocoder_gdb__2_, "ObjectID OBJECTID VISIBLE NONE;Shape Shape VISIBLE NONE;Status Status VISIBLE NONE;Score Score VISIBLE NONE;Match_type Match_type VISIBLE NONE;Match_addr Match_addr VISIBLE NONE;X X VISIBLE NONE;Y Y VISIBLE NONE;Xmin Xmin VISIBLE NONE;Xmax Xmax VISIBLE NONE;Ymin Ymin VISIBLE NONE;Ymax Ymax VISIBLE NONE;Addr_type Addr_type VISIBLE NONE;ARC_Single_Line_Input ARC_Single_Line_Input VISIBLE NONE") # Process: Add Field arcpy.AddField_management(geocoded_master_successful, "geocoder", "TEXT", "", "", "20", "", "NULLABLE", "NON_REQUIRED", "") # Process: Calculate Field arcpy.CalculateField_management(geocoded_master_successful__2_, "geocoder", "\"EAS\"", "PYTHON", "") # Process: Copy Features arcpy.CopyFeatures_management(geocoded_master_successful__4_, geocoder_eas, "", "0", "0", "0") # Process: Merge print("SUCCEDED") fieldmappings = arcpy.FieldMappings() fieldmappings.addTable(transfomed) arcpy.Merge_management( "I:\\GIS\\OASIS\\Geocoder\\geocoder.gdb\\geocoder_eas; geocoded_street_centerlines_successful", final, fieldmappings) print("GEOCODING SUCCESSFUL") except Exception as e: print("ERROR") print(e)
gde_wetlands = r"K:\GIS3\Projects\GDE\Geospatial\NV_iGDE_050919.gdb\Wetlands" # Map to GDE Wetland layer and poy polygon features there: def mapFields(inlayer, infield, mapfield_name, mapfield_alias, mapfield_type): # mapFields function fldMap = arcpy.FieldMap() fldMap.addInputField(inlayer, infield) mapOut = fldMap.outputField mapOut.name, mapOut.alias, mapOut.type = mapfield_name, mapfield_alias, mapfield_type fldMap.outputField = mapOut return fldMap # Field mapping for Wetlands layer wet_type_map = mapFields(wet_copy, "WETLAND_TYPE", "WET_TYPE", "Wetland Type", "TEXT") wet_subtype_map = mapFields(wet_copy, "WETLAND_SUBTYPE", "WET_SUBTYPE", "Wetland Subtype", "TEXT") source_map = mapFields(wet_copy, "SOURCECODE", "SOURCE_CODE", "Source Code", "TEXT") fldMap_list = [wet_type_map, wet_subtype_map, source_map] wetFldMappings = arcpy.FieldMappings() for fm in fldMap_list: wetFldMappings.addFieldMap(fm) # Append to GDE database Wetland layer arcpy.Append_management(wet_copy, gde_wetlands, "NO_TEST", wetFldMappings) # END
#dissolve the union based on geography, genzone, genswr fields #dissolve above shapefile based on geoid, genzone, and swr arcpy.Dissolve_management( diss_union + "diss_cbg_gen_sew_" + county.name + '.shp', diss_output + "diss_un_cgb_gen_sew" + "_" + county.name, ["GEOGRAPHY", "GENZONE", "GENZ_SWR"]) arcpy.AddMessage( "Dissolve of Union of CGB and Dissolved Gen and Sewer Done") #spatial join selected points to above dissolved union targetFeatures = diss_output + "diss_un_cgb_gen_sew" + "_" + county.name + '.shp' joinFeatures = county.name outfc = outputPre + county.name + outputPostZS #fieldmapping fieldmappings = arcpy.FieldMappings() #create FieldMappings Object fieldmappings.addTable( targetFeatures) #then add tables that are to be combined fieldmappings.addTable(joinFeatures) #getAcctID from parcel shapefile acctidFieldIndex = fieldmappings.findFieldMapIndex("ACCTID") fieldmap = fieldmappings.getFieldMap(acctidFieldIndex) #set merge rule Count fieldmap.mergeRule = "count" #replace field map? fieldmappings.replaceFieldMap(acctidFieldIndex, fieldmap) #spatial join points to above shapefile #matching
# Add Phreatophyte Group to the attribute table of TNC layer (generalizes phreatophyte types in the public database) phrea_lut = r"K:\GIS3\Projects\GDE\Tables\GDE_Phreatophyte_NameCodeGroup.csv" phrea_tbl = arcpy.TableToTable_conversion(phrea_lut, path, "gde_phreatophyte_lut") arcpy.JoinField_management(tnc_veg, "SYS_CODE", phrea_tbl, "SYS_CODE", ["SYS_GROUP"]) # Append TNC Phreatophytes to GDE Phreatophytes layer veg_type_map = mapFields(tnc_veg, "SYS_NAME", "PHR_TYPE", "Phreatophyte Type", "TEXT") veg_group_map = mapFields(tnc_veg, "SYS_GROUP", "PHR_GROUP", "Phreatophyte Group", "TEXT") source_map = mapFields(tnc_veg, "SOURCECODE", "SOURCE_CODE", "Source Code", "TEXT") fldMap_list = [veg_type_map, veg_group_map, source_map] allFldMappings = arcpy.FieldMappings() for fm in fldMap_list: allFldMappings.addFieldMap(fm) arcpy.Append_management(tnc_veg, gde_phr, "NO_TEST", allFldMappings) #------------------------------------------------------------------------------- #------------------------------------------------------------------------------- # Process phreaotphytes from Landfire BpS gde_phr = r"K:\GIS3\Projects\GDE\Geospatial\NV_iGDE_050919.gdb\Phreatophytes" env.workspace = path # Load LANDFIRE BpS lf_bps = r"K:\GIS3\States\NV\Landfire_BPS_NV\US_140BPS_20180618\grid\us_140bps" # Read in lookup table with codes and names for identified non-wetland GDE systems in Landfire lf_code_csv = r"K:\GIS3\Projects\GDE\Tables\Landfire_TNC_GDE_lut.csv"
def process_years_to_trend( years, tables, long_features, diff_features, base_year=None, snapshot_year=None, out_gdb_name=None, ): """ Utilizing a base and snapshot year, trend data are generated for the associated time period. Procedure: 1) creates a a blank output workspace with necessary feature dataset categories uniquely named 2) generates tables long on year for all tabular data and summary areas 3) generated difference tables for all tabular data summary features (Summary Areas, Census Blocks, MAZ, and TAZ) 4) upon completion, replace existing copy of Trend/NearTerm gdb with newly processed version. """ # TODO: add a try/except to delete any intermediate data created # Validation if base_year is None: base_year = years[0] if snapshot_year is None: snapshot_year = years[-1] if base_year not in years or snapshot_year not in years: raise ValueError("Base year and snapshot year must be in years list") if out_gdb_name is None: out_gdb_name = "Trend" # Set criteria table_criteria = [spec["table"] for spec in tables] diff_criteria = [spec["table"][0] for spec in diff_features] long_criteria = [spec["table"][0] for spec in long_features] # make a blank geodatabase out_path = PMT.validate_directory(BUILD) out_gdb = b_help.make_trend_template(out_path) # Get snapshot data for yi, year in enumerate(years): process_year = year if year == snapshot_year: if year == "NearTerm": process_year = snapshot_year = "NearTerm" else: process_year = snapshot_year = "Current" in_gdb = PMT.validate_geodatabase( gdb_path=PMT.make_path(BUILD, f"Snapshot_{process_year}.gdb"), overwrite=False, ) # Make every table extra long on year year_tables = PMT._list_table_paths(gdb=in_gdb, criteria=table_criteria) year_fcs = PMT._list_fc_paths(gdb=in_gdb, fds_criteria="*", fc_criteria=long_criteria) elongate = year_tables + year_fcs for elong_table in elongate: elong_out_name = os.path.split(elong_table)[1] + "_byYear" if yi == 0: # Initialize the output table print(f"Creating long table {elong_out_name}") arcpy.TableToTable_conversion(in_rows=elong_table, out_path=out_gdb, out_name=elong_out_name) else: # Append to the output table print( f"Appending to long table {elong_out_name} ({process_year})" ) out_table = PMT.make_path(out_gdb, elong_out_name) arcpy.Append_management(inputs=elong_table, target=out_table, schema_type="NO_TEST") # Get snapshot and base year params if process_year == base_year: base_tables = year_tables[:] base_fcs = PMT._list_fc_paths(gdb=in_gdb, fds_criteria="*", fc_criteria=diff_criteria) elif process_year == snapshot_year: snap_tables = year_tables[:] snap_fcs = PMT._list_fc_paths(gdb=in_gdb, fds_criteria="*", fc_criteria=diff_criteria) # Make difference tables (snapshot - base) for base_table, snap_table, specs in zip(base_tables, snap_tables, tables): out_name = os.path.split(base_table)[1] + "_diff" out_table = PMT.make_path(out_gdb, out_name) idx_cols = specs["index_cols"] diff_df = PMT.table_difference(this_table=snap_table, base_table=base_table, idx_cols=idx_cols) print(f"Creating table {out_name}") PMT.df_to_table(df=diff_df, out_table=out_table, overwrite=True) # Make difference fcs (snapshot - base) for base_fc, snap_fc, spec in zip(base_fcs, snap_fcs, diff_features): # TODO: will raise if not all diff features are found, but maybe that's good? # Get specs fc_name, fc_id, fc_fds = spec["table"] idx_cols = spec["index_cols"] if isinstance(idx_cols, string_types): idx_cols = [idx_cols] if fc_id not in idx_cols: idx_cols.append(fc_id) out_fds = PMT.make_path(out_gdb, fc_fds) out_name = fc_name + "_diff" out_table = PMT.make_path(out_fds, out_name) # Field mappings field_mappings = arcpy.FieldMappings() for idx_col in idx_cols: fm = arcpy.FieldMap() fm.addInputField(base_fc, idx_col) field_mappings.addFieldMap(fm) # Copy geoms print(f"Creating feature class {out_name}") arcpy.FeatureClassToFeatureClass_conversion( in_features=base_fc, out_path=out_fds, out_name=out_name, field_mapping=field_mappings, ) # Get table difference diff_df = PMT.table_difference(this_table=snap_fc, base_table=base_fc, idx_cols=idx_cols) # Extend attribute table drop_cols = [ c for c in diff_df.columns if c in idx_cols and c != fc_id ] diff_df.drop(columns=drop_cols, inplace=True) print("... adding difference columns") PMT.extend_table_df( in_table=out_table, table_match_field=fc_id, df=diff_df, df_match_field=fc_id, ) # TODO: calculate percent change in value over base for summary areas print("Finalizing the trend") final_gdb = PMT.make_path(BUILD, f"{out_gdb_name}.gdb") b_help.finalize_output(intermediate_gdb=out_gdb, final_gdb=final_gdb)
def Wanderungssalden_schaetzen(self): parameters = self.par projektname = self.projectname cursor = self.parent_tbx.query_table( table_name='Chronik_Nutzung', columns=['Arbeitsschritt', 'Letzte_Nutzung'], workspace='FGDB_Einnahmen.gdb') salden_berechnet = True for row in cursor: if row[0] == "Wanderung Einwohner" and row[1] is None: salden_berechnet = False if self.par.aktualisieren.value == True or salden_berechnet == False: lib_einnahmen.create_gemeindebilanzen(self, projektname) workspace_projekt_definition = self.folders.get_db( 'FGDB_Definition_Projekt.gdb', projektname) workspace_projekt_einnahmen = self.folders.get_db( 'FGDB_Einnahmen.gdb', projektname) wanderungssalden = os.path.join(workspace_projekt_einnahmen, "Gemeindebilanzen") projektflaechen = join(workspace_projekt_definition, 'Teilflaechen_Plangebiet') if arcpy.Exists("projektflaechen_lyr"): arcpy.Delete_management("projektflaechen_lyr") arcpy.MakeFeatureLayer_management(projektflaechen, "projektflaechen_lyr") fields = [ "Einw_Zuzug", "Einw_Fortzug", "Einw_Saldo", "Wanderungsanteil_Ew", "Gewichtete_Ew", "SvB_Zuzug", "SvB_Fortzug", "SvB_Saldo" ] cursor = arcpy.da.UpdateCursor(wanderungssalden, fields) for gemeinde in cursor: gemeinde[0] = 0 gemeinde[1] = 0 gemeinde[2] = 0 gemeinde[3] = 0 gemeinde[4] = 0 gemeinde[5] = 0 gemeinde[6] = 0 gemeinde[7] = 0 cursor.updateRow(gemeinde) #Wichtungsfaktoren auslesen Wichtungen_Gewerbe = {} Wichtungen_Wohnen = {} path_distanzen = self.folders.get_base_table( "FGDB_Einnahmen_Tool.gdb", "Wanderung_Entfernungswichtung") cursor = arcpy.da.SearchCursor( path_distanzen, ["Distance", "Wichtung_Wohnen", "Wichtung_Gewerbe"]) for distanz in cursor: Wichtungen_Wohnen[str(int(distanz[0]))] = distanz[1] Wichtungen_Gewerbe[str(int(distanz[0]))] = distanz[2] #arcpy.AddMessage(Wichtungen_Wohnen) #Randsummen Wanderung auslesen (Anteile innerhalb/außerhalb 25km und Neugründungen [nur Gewerbe]) Randsummen_Gewerbe = {} Randsummen_Wohnen = {} path_randsummen = self.folders.get_base_table( "FGDB_Einnahmen_Tool.gdb", "Wanderung_Randsummen") cursor = arcpy.da.SearchCursor( path_randsummen, ["IDWanderungstyp", "Anteil_Wohnen", "Anteil_Gewerbe"]) for randsumme in cursor: Randsummen_Wohnen[randsumme[0]] = randsumme[1] Randsummen_Gewerbe[randsumme[0]] = randsumme[2] #arcpy.AddMessage(Randsummen_Wohnen) #Anteile der Herkunftsgemeinden an Einwohner bestimmen pfad_buffer = os.path.join(workspace_projekt_einnahmen, "buffer_25km") if arcpy.Exists(pfad_buffer): arcpy.Delete_management(pfad_buffer) pfad_Rasterausschnitt = os.path.join(workspace_projekt_einnahmen, "Rasterausschnitt") if arcpy.Exists(pfad_Rasterausschnitt): arcpy.Delete_management(pfad_Rasterausschnitt) pfad_Rasterausschnitt_25km = os.path.join(workspace_projekt_einnahmen, "Rasterausschnitt_25km") if arcpy.Exists(pfad_Rasterausschnitt_25km): arcpy.Delete_management(pfad_Rasterausschnitt_25km) pfad_Punktlayer_25km_posWerte = os.path.join( workspace_projekt_einnahmen, "Punktlayer_25km_posWerte") if arcpy.Exists(pfad_Punktlayer_25km_posWerte): arcpy.Delete_management(pfad_Punktlayer_25km_posWerte) pfad_Entfernungsringe = os.path.join(workspace_projekt_einnahmen, "Entfernungsringe") if arcpy.Exists(pfad_Entfernungsringe): arcpy.Delete_management(pfad_Entfernungsringe) pfad_Herkunftsraeume = os.path.join(workspace_projekt_einnahmen, "Herkunftsgebiete") if arcpy.Exists(pfad_Herkunftsraeume): arcpy.Delete_management(pfad_Herkunftsraeume) pfad_Herkunftsraeume_mit_Ew = os.path.join(workspace_projekt_einnahmen, "Herkunftsgebiete_mit_Ew") if arcpy.Exists(pfad_Herkunftsraeume_mit_Ew): arcpy.Delete_management(pfad_Herkunftsraeume_mit_Ew) pfad_zensusgrid = self.folders.ZENSUS_RASTER_FILE pfad_bkggemeinden = self.folders.get_base_table( "FGDB_Basisdaten_deutschland.gdb", "bkg_gemeinden") #25km Buffer um Projektflaeche arcpy.Buffer_analysis(projektflaechen, pfad_buffer, "25000 Meters", "FULL", "ROUND", "ALL", "", "PLANAR") #Extent des Buffers auslesen desc = arcpy.Describe(pfad_buffer) xmin = desc.extent.XMin xmax = desc.extent.XMax ymin = desc.extent.YMin ymax = desc.extent.YMax #Verschneidung mit Zensusraster arcpy.Clip_management(pfad_zensusgrid, "%s %s %s %s" % (xmin, ymin, xmax, ymax), pfad_Rasterausschnitt, pfad_buffer, "2147483647", "ClippingGeometry", "NO_MAINTAIN_EXTENT") #Raterausschnitt in Punktlayer konvertieren arcpy.RasterToPoint_conversion(pfad_Rasterausschnitt, pfad_Rasterausschnitt_25km, "Value") #LeereSiedlungszellen entfernen arcpy.Select_analysis(pfad_Rasterausschnitt_25km, pfad_Punktlayer_25km_posWerte, '"grid_code" > 0') #Mehrere Buffer um Projektflaeche erzeugen arcpy.MultipleRingBuffer_analysis( projektflaechen, pfad_Entfernungsringe, "1500;2500;3500;4500;6500;8500;11500;14500;18500;25000", "Meters", "distance", "ALL", "FULL") #Buffer mit Gemeinden verschneiden arcpy.Intersect_analysis([pfad_bkggemeinden, pfad_Entfernungsringe], pfad_Herkunftsraeume, "NO_FID", "", "INPUT") #Verschneiden der Herkunftsraume mit den Summen der jeweiligen Punktfeatures fieldmappings = arcpy.FieldMappings() fieldmappings.addTable(pfad_Herkunftsraeume) fieldmappings.addTable(pfad_Punktlayer_25km_posWerte) gridcode_FieldIndex = fieldmappings.findFieldMapIndex("grid_code") fieldmap = fieldmappings.getFieldMap(gridcode_FieldIndex) field = fieldmap.outputField field.name = "Summe_Ew" field.aliasName = "Summe_Ew" fieldmap.outputField = field fieldmap.mergeRule = "sum" fieldmappings.replaceFieldMap(gridcode_FieldIndex, fieldmap) arcpy.SpatialJoin_analysis(pfad_Herkunftsraeume, pfad_Punktlayer_25km_posWerte, pfad_Herkunftsraeume_mit_Ew, "JOIN_ONE_TO_ONE", "KEEP_ALL", fieldmappings) #SvB_je_EW Summe_Wichtungsfaktoren_Gesamtraum_Wohnen = 0 Summe_Wichtungsfaktoren_Gesamtraum_Gewerbe = 0 Summe_Wichtungsfaktoren_Gemeinde_Wohnen = 0 Summe_Wichtungsfaktoren_Gemeinde_Gewerbe = 0 SvB_je_EW = 0 herkunftsraeume = [] cursor_gemeindebilanz = arcpy.da.SearchCursor(wanderungssalden, ["AGS", "SvB_pro_Ew"]) for gemeinde in cursor_gemeindebilanz: where = '"AGS"' + "='" + gemeinde[0] + "'" cursor_Summe_Ew = arcpy.da.SearchCursor( pfad_Herkunftsraeume_mit_Ew, ["AGS", "Summe_Ew", "distance", "Shape_Area"], where) for gemeindeteil in cursor_Summe_Ew: if gemeindeteil[1] >= 1: Wichtungsfaktor_Wohnen = gemeindeteil[1] * gemeindeteil[ 3] * Wichtungen_Wohnen[str(int(gemeindeteil[2]))] Wichtungsfaktor_Gewerbe = gemeindeteil[1] * gemeinde[ 1] * gemeindeteil[3] * Wichtungen_Gewerbe[str( int(gemeindeteil[2]))] herkunftsraeume.append([ gemeindeteil[0], Wichtungsfaktor_Wohnen, Wichtungsfaktor_Gewerbe ]) Summe_Wichtungsfaktoren_Gesamtraum_Wohnen += Wichtungsfaktor_Wohnen Summe_Wichtungsfaktoren_Gesamtraum_Gewerbe += Wichtungsfaktor_Gewerbe ap = 0 bewohner = 0 teilflaechen = self.folders.get_table("Teilflaechen_Plangebiet", "FGDB_Definition_Projekt.gdb") rahmendaten = self.folders.get_table("Projektrahmendaten", "FGDB_Definition_Projekt.gdb") cursor = arcpy.da.SearchCursor(rahmendaten, ["AGS"]) for row in cursor: ags_projekt = row[0] cursor = arcpy.da.SearchCursor(teilflaechen, ["ew", "AP_gesamt"]) for flaeche in cursor: ap += flaeche[1] bewohner += flaeche[0] #Aufteilung Fortzug (und Zuzug [nur Projektgemeinde]) cursor_gemeindebilanz = arcpy.da.UpdateCursor(wanderungssalden, [ "AGS", "SvB_pro_Ew", "Einw_Fortzug", "SvB_Fortzug", "Einw_Zuzug", "SvB_Zuzug", "SvB_Saldo", "Einw_Saldo", "Wanderungsanteil_Ew", "Wanderungsanteil_SvB" ]) for gemeinde in cursor_gemeindebilanz: Summe_Wichtungsfaktoren_Gemeinde_Wohnen = 0 Summe_Wichtungsfaktoren_Gemeinde_Gewerbe = 0 for raum in herkunftsraeume: if raum[0] == gemeinde[0]: Summe_Wichtungsfaktoren_Gemeinde_Wohnen += raum[1] Summe_Wichtungsfaktoren_Gemeinde_Gewerbe += raum[2] Einw_Fortzug_neu = -1 * (bewohner * Randsummen_Wohnen[1] * Summe_Wichtungsfaktoren_Gemeinde_Wohnen / Summe_Wichtungsfaktoren_Gesamtraum_Wohnen) if Einw_Fortzug_neu != 0: gemeinde[2] = Einw_Fortzug_neu # - 0.5 else: gemeinde[2] = Einw_Fortzug_neu SvB_Fortzug_neu = -1 * (ap * Randsummen_Gewerbe[1] * Summe_Wichtungsfaktoren_Gemeinde_Gewerbe / Summe_Wichtungsfaktoren_Gesamtraum_Gewerbe) if SvB_Fortzug_neu != 0: gemeinde[3] = SvB_Fortzug_neu # - 0.5 else: gemeinde[3] = SvB_Fortzug_neu if gemeinde[0] == ags_projekt: gemeinde[4] = bewohner gemeinde[5] = ap else: gemeinde[ 8] = Summe_Wichtungsfaktoren_Gemeinde_Wohnen / Summe_Wichtungsfaktoren_Gesamtraum_Wohnen gemeinde[ 9] = Summe_Wichtungsfaktoren_Gemeinde_Gewerbe / Summe_Wichtungsfaktoren_Gesamtraum_Gewerbe gemeinde[6] = gemeinde[3] + gemeinde[5] gemeinde[7] = gemeinde[4] + gemeinde[2] cursor_gemeindebilanz.updateRow(gemeinde) #Summe der zugeordneten Fortzüge (innerhalb 25 km) und Zuzüge [jeweils inkl. Rundungseffekte] Summe_Zugeordneter_Fortzug_Ew = 0.0 Summe_Zugeordneter_Zuzug_Ew = 0.0 Summe_Zugeordneter_Fortzug_AP = 0.0 Summe_Zugeordneter_Zuzug_AP = 0.0 cursor_summe = arcpy.da.SearchCursor(wanderungssalden, [ "AGS", "SvB_pro_Ew", "Einw_Fortzug", "SvB_Fortzug", "Einw_Zuzug", "SvB_Zuzug", "SvB_Saldo", "Einw_Saldo" ]) for gemeinde in cursor_summe: Summe_Zugeordneter_Fortzug_Ew += gemeinde[2] Summe_Zugeordneter_Zuzug_Ew += gemeinde[4] Summe_Zugeordneter_Fortzug_AP += gemeinde[3] Summe_Zugeordneter_Zuzug_AP += gemeinde[5] Differenz_Ew = Summe_Zugeordneter_Zuzug_Ew + Summe_Zugeordneter_Fortzug_Ew Differenz_AP = Summe_Zugeordneter_Zuzug_AP + Summe_Zugeordneter_Fortzug_AP #Neugruendungen (nur Gewerbe) Neugruendungen_AP = ap * Randsummen_Gewerbe[3] + 0.5 #Fortzüge aus dem restlichen Bundesgebiet/Welt (hier werden auch die Rundungsverluste ungebracht) Fortzuege_restliches_Bundesgebiet_und_Welt_Ew = -1 * Differenz_Ew Fortzuege_restliches_Bundesgebiet_und_Welt_AP = -1 * ( Differenz_AP - Neugruendungen_AP) #arcpy.AddMessage("Wohnen: Fortzüge restliches Bundesgebiet = {0}".format(Fortzuege_restliches_Bundesgebiet_und_Welt_Ew)) #arcpy.AddMessage("Gewerbe: Fortzüge restliches Bundesgebiet = {0}".format(Fortzuege_restliches_Bundesgebiet_und_Welt_AP)) #arcpy.AddMessage("Gewerbe: Neugründungen = {0}".format(Neugruendungen_AP)) self.parent_tbx.delete_rows_in_table("Zuzugsstatistik_Ew") column_values = { "Kategorie": [u"Projektgemeinde/Region", u"Restliches Bundesgebiet/Ausland"], "Anzahl": [ int(round(Summe_Zugeordneter_Fortzug_Ew * -1)), int(round(Differenz_Ew)) ] } self.parent_tbx.insert_rows_in_table("Zuzugsstatistik_Ew", column_values) self.parent_tbx.delete_rows_in_table("Zuzugsstatistik_SvB") column_values = { "Kategorie": [ u"Projektgemeinde/Region", u"Restliches Bundesgebiet/Ausland", u"Neugründungen" ], "Anzahl": [ int(round(Summe_Zugeordneter_Fortzug_AP * -1)), int(round((Differenz_AP - Neugruendungen_AP))), int(round(Neugruendungen_AP)) ] } self.parent_tbx.insert_rows_in_table("Zuzugsstatistik_SvB", column_values) c.set_chronicle( "Wanderung Einwohner", self.folders.get_table(tablename='Chronik_Nutzung', workspace="FGDB_Einnahmen.gdb", project=projektname)) c.set_chronicle( "Wanderung Beschaeftigte", self.folders.get_table(tablename='Chronik_Nutzung', workspace="FGDB_Einnahmen.gdb", project=projektname))
def main(): scratch_datasets = [] new_fields = [ 'Crash_Count', 'Crash_Count_Weight', 'Crash_Frequency', 'Crash_Rate', 'Weighted_Crash_Frequency', 'Weighted_Crash_Rate' ] try: streets_intersection = arcpy.GetParameterAsText(0) crashes = arcpy.GetParameterAsText(1) time_interval, time_unit = arcpy.GetParameterAsText(2).split(' ') time_interval = float(time_interval) if time_unit == 'Years': time_interval = time_interval * 365 elif time_unit == 'Weeks': time_interval = time_interval * 7 snap_distance = arcpy.GetParameterAsText(3) weight_field = arcpy.GetParameterAsText(4) weight_table = arcpy.GetParameter(5) adt_field = arcpy.GetParameterAsText(6) output_crash_rates = arcpy.GetParameterAsText(7) params = arcpy.GetParameterInfo() shape_type = arcpy.Describe(streets_intersection).shapeType weight_provided = False if weight_field is not None and weight_field != '': weight_provided = True adt_provided = False if adt_field is not None and adt_field != '': adt_provided = True arcpy.SetProgressorLabel("Creating Temporary Crash Layer...") arcpy.MakeFeatureLayer_management(crashes, "Crash Layer") crashes_snap = os.path.join(arcpy.env.scratchGDB, "Crash_Snap") if arcpy.Exists(crashes_snap): arcpy.Delete_management(crashes_snap) arcpy.CopyFeatures_management("Crash Layer", crashes_snap) scratch_datasets.append(crashes_snap) crash_count_field = new_fields[0] crash_count_weight_field = new_fields[1] arcpy.AddField_management(crashes_snap, crash_count_field, "Double", field_alias="Crash Count") fields = [crash_count_field] if weight_provided: arcpy.AddField_management(crashes_snap, crash_count_weight_field, "Double", field_alias="Crash Count Weight") fields.append(crash_count_weight_field) fields.append(weight_field) for field in arcpy.Describe(crashes).fields: if field.name == weight_field: if field.domain is not None and field.domain != '': database = get_workspace(crashes) if database is not None: for domain in arcpy.da.ListDomains(database): if domain.name == field.domain: if domain.domainType == 'CodedValue': for key, value in domain.codedValues.items( ): for i in range( 0, weight_table.rowCount): if weight_table.getValue( i, 0) == value: weight_table.setValue( i, 0, str(key)) break with arcpy.da.UpdateCursor(crashes_snap, fields) as cursor: for row in cursor: row[0] = 1.0 if len(fields) == 3: value = str(row[2]) for i in range(0, weight_table.rowCount): if value == weight_table.getValue(i, 0): row[1] = weight_table.getValue(i, 1) break cursor.updateRow(row) if (shape_type == "Polyline"): arcpy.SetProgressorLabel("Snapping Crashes to Nearest Street...") else: arcpy.SetProgressorLabel( "Snapping Crashes to Nearest Intersection...") snapEnv = [streets_intersection, "EDGE", snap_distance] arcpy.Snap_edit(crashes_snap, [snapEnv]) fms = arcpy.FieldMappings() desc = arcpy.Describe(streets_intersection) for field in desc.fields: if field.type == 'Geometry' or field.type == 'OID' or field.name in new_fields: continue if shape_type == "Polyline" and field.name == desc.AreaFieldName: continue fm = arcpy.FieldMap() fm.addInputField(streets_intersection, field.name) fms.addFieldMap(fm) fm = arcpy.FieldMap() fm.addInputField(crashes_snap, crash_count_field) fm.mergeRule = 'Sum' fms.addFieldMap(fm) if weight_provided: fm = arcpy.FieldMap() fm.addInputField(crashes_snap, crash_count_weight_field) fm.mergeRule = 'Sum' fms.addFieldMap(fm) crashes_join = os.path.join(arcpy.env.scratchGDB, "Crash") if arcpy.Exists(crashes_join): arcpy.Delete_management(crashes_join) arcpy.SpatialJoin_analysis(streets_intersection, crashes_snap, crashes_join, "JOIN_ONE_TO_ONE", "KEEP_ALL", fms, "Intersect", "0 Feet") scratch_datasets.append(crashes_join) if weight_provided: with arcpy.da.UpdateCursor(crashes_join, [crash_count_weight_field]) as cursor: for row in cursor: if row[0] == 0: row[0] = None cursor.updateRow(row) arcpy.SetProgressorLabel("Calculating Crash Statistics") templateDir = os.path.dirname(__file__) crash_frequency_field = new_fields[2] crash_rate_field = new_fields[3] weighted_crash_frequency_field = new_fields[4] weighted_crash_rate_field = new_fields[5] add_fields = [] fields = [crash_count_field] if (shape_type == "Polyline"): fields.append('SHAPE@') add_fields = [[crash_frequency_field, "Crashes Per Mile Per Year"], [ crash_rate_field, "Crashes Per Million Vehicle Miles" ], [ weighted_crash_frequency_field, "Weighted Crashes Per Mile Per Year" ], [ weighted_crash_rate_field, "Weighted Crashes Per Million Vehicle Miles" ]] else: add_fields = [ [crash_frequency_field, "Crashes Per Year"], [crash_rate_field, "Crashes Per Million Entering Vehicles"], [weighted_crash_frequency_field, "Weighted Crashes Per Year"], [ weighted_crash_rate_field, "Weighted Crashes Per Million Entering Vehicles" ] ] arcpy.AddField_management(crashes_join, add_fields[0][0], "Double", field_alias=add_fields[0][1]) fields.append(add_fields[0][0]) if adt_provided: arcpy.AddField_management(crashes_join, add_fields[1][0], "Double", field_alias=add_fields[1][1]) fields.append(add_fields[1][0]) fields.append(adt_field) if weight_provided: fields.append(crash_count_weight_field) arcpy.AddField_management(crashes_join, add_fields[2][0], "Double", field_alias=add_fields[2][1]) fields.append(add_fields[2][0]) if adt_provided: arcpy.AddField_management(crashes_join, add_fields[3][0], "Double", field_alias=add_fields[3][1]) fields.append(add_fields[3][0]) with arcpy.da.UpdateCursor(crashes_join, fields) as cursor: for row in cursor: if row[cursor.fields.index(crash_count_field)] is None: continue miles = 1.0 if 'SHAPE@' in cursor.fields: miles = row[cursor.fields.index('SHAPE@')].getLength( 'GEODESIC', 'MILES') row[cursor.fields.index(crash_frequency_field)] = row[ cursor.fields.index(crash_count_field)] / ( (time_interval / 365) * miles) if crash_count_weight_field in cursor.fields and row[ cursor.fields.index( crash_count_weight_field)] is not None: row[cursor.fields.index( weighted_crash_frequency_field )] = row[cursor.fields.index(crash_count_weight_field)] / ( (time_interval / 365) * miles) if adt_field in cursor.fields and row[cursor.fields.index( adt_field)] is not None: row[cursor.fields.index(crash_rate_field)] = ( row[cursor.fields.index(crash_count_field)] * 1000000 ) / (time_interval * row[cursor.fields.index(adt_field)] * miles) if crash_count_weight_field in cursor.fields and row[ cursor.fields.index( crash_count_weight_field)] is not None: row[cursor.fields.index(weighted_crash_rate_field)] = ( row[cursor.fields.index(crash_count_weight_field)] * 1000000) / (time_interval * row[cursor.fields.index(adt_field)] * miles) cursor.updateRow(row) arcpy.SetProgressorLabel("Creating Crash Rate Layer...") field_info = "" fields_to_hide = ['Join_Count', 'TARGET_FID', new_fields[0]] if weight_provided: fields_to_hide.append(new_fields[1]) field_list = arcpy.ListFields(crashes_join) for field in field_list: if field.name in fields_to_hide: field_info = "{0}{1} {1} HIDDEN;".format( field_info, field.name) else: field_info = "{0}{1} {1} VISIBLE;".format( field_info, field.name) arcpy.MakeFeatureLayer_management(crashes_join, "Output Crash Layer", field_info=field_info[:-1]) arcpy.SelectLayerByAttribute_management( "Output Crash Layer", "NEW_SELECTION", '{0} IS NOT NULL'.format(new_fields[2])) arcpy.CopyFeatures_management("Output Crash Layer", output_crash_rates) lyrx_json = _CRASH_RATE_POINT if (shape_type == "Polyline"): lyrx_json = _CRASH_RATE_POLYLINE with tempfile.NamedTemporaryFile(delete=False) as temp_lyrx: temp_lyrx.write(lyrx_json.encode()) lyrx_path = "{0}.lyrx".format(temp_lyrx.name) os.rename(temp_lyrx.name, lyrx_path) params[7].symbology = lyrx_path finally: for dataset in scratch_datasets: if arcpy.Exists(dataset): arcpy.Delete_management(dataset)
spans_dif_D = (list(set(fieldnames_Spans) - set(SPAN_FIELDS))) for add_field in spans_dif_A: arcpy.AddMessage( "Adding Required Field >{0}< ... ".format(add_field)) arcpy.AddField_management(inSpans, add_field, "TEXT", "", "", 100) for del_field in spans_dif_D: if del_field not in ['Shape', 'OBJECTID', 'OBJECTID_1']: arcpy.AddMessage("Deleting Unneccasry Field %s" % del_field) arcpy.DeleteField_management(inSpans, del_field) # SPANS FIELD MAP fieldMappings_spans = arcpy.FieldMappings() # 1. BST TAG fldMap_BST_TAG_spans = arcpy.FieldMap() fldMap_BST_TAG_spans.addInputField(inSpans, "BST_TAG") BST_TAG_spans_field = fldMap_BST_TAG_spans.outputField BST_TAG_spans_field.name = "BST_TAG" fldMap_BST_TAG_spans.outputField = BST_TAG_spans_field fieldMappings_spans.addFieldMap(fldMap_BST_TAG_spans) # 2. AST TAG fldMap_AST_TAG_spans = arcpy.FieldMap() fldMap_AST_TAG_spans.addInputField(inSpans, "AST_TAG")
def elementType(): '''function that assigns element type to all features based on name of shapefile within which the feature is contained''' for ins, output in zip(input_features, elementTables): feature = os.path.join(elementGDB, ins) fieldmappings = arcpy.FieldMappings() fieldmappings.addTable(feature) # fields to be kept after spatial join keepFields = ["OID", "dm_stat", "elem_type", "created_date"] # remove all fields not in keep fields from field map for field in fieldmappings.fields: if field.name not in keepFields: fieldmappings.removeFieldMap( fieldmappings.findFieldMapIndex(field.name)) arcpy.TableToTable_conversion(os.path.join(elementGDB, ins), env.workspace, output, "", fieldmappings) for table in elementTables: arcpy.AddField_management(table, "Feature_Class", "TEXT", field_length=50, field_alias="Taxa") arcpy.CalculateField_management(table, "Feature_Class", "'" + table + "'", "PYTHON_9.3") for table in elementTables[0:3]: with arcpy.da.UpdateCursor(table, ['elem_type', 'Feature_Class']) as cursor: for row in cursor: if row[0] == 0 or row[0] == 1: row[1] = 'Lepidoptera and Other Insects' cursor.updateRow(row) elif row[0] == 2: row[1] = 'Other Invertebrates' cursor.updateRow(row) elif row[0] == 3: row[1] = 'Plants' cursor.updateRow(row) elif row[0] == 4: row[1] = 'Vertebrate Animals' cursor.updateRow(row) arcpy.DeleteField_management(table, "elem_type") elementRecords = arcpy.CreateTable_management(env.workspace, "elementRecords", "element_point") arcpy.Append_management(elementTables, elementRecords) with arcpy.da.UpdateCursor(elementRecords, ["dm_stat", "Feature_Class"]) as cursor: for row in cursor: if row[0] == "dr": row[0] = "Draft" cursor.updateRow(row) if row[0] == "idrev": row[0] = "Ready for ID Review" cursor.updateRow(row) if row[0] == "dmproc": row[0] = "DM Processed" cursor.updateRow(row) if row[0] == "dmready": row[0] = "Ready for DM" cursor.updateRow(row) if row[0] == "dmpend": row[0] = "DM Pending" cursor.updateRow(row) if row[1] == "community_poly" or row[1] == "community_point": row[1] = "Communities" cursor.updateRow(row) if row[1] == "survey_poly": row[1] = "Survey Sites" cursor.updateRow(row) with arcpy.da.UpdateCursor(elementRecords, "created_date") as cursor: for row in cursor: if row[0] > datetime.datetime(int(year), int(q), 01, 0, 0, 0, 0): cursor.deleteRow()
orig_input_facilities = facilities_obj.inputFeatures #Make a layer so we can use AddJoin orig_input_facilities_lyr = "OrigInputFacilitiesLayer" arcpy.management.MakeFeatureLayer(orig_input_facilities, orig_input_facilities_lyr) #Make a join based on FacilityOID from routes sublayer and OID from orig_input_facilities arcpy.management.AddJoin(orig_input_facilities_lyr, orig_input_facilities_oid, routes_sub_layer, "FacilityOID") where_clause = "CFRoutes.FacilityOID IS NOT NULL" arcpy.management.SelectLayerByAttribute(orig_input_facilities_lyr, "NEW_SELECTION", where_clause) arcpy.management.RemoveJoin(orig_input_facilities_lyr, "CFRoutes") #Transfer all attributes and the OID as ORIG_ID. #If ORIG_ID already exists, get a unique field name such as ORIG_ID_1 fac_fms = arcpy.FieldMappings() fac_fms.addTable(orig_input_facilities_lyr) fac_fm = arcpy.FieldMap() fac_fm.addInputField(orig_input_facilities_lyr, orig_input_facilities_oid) out_fld = fac_fm.outputField unique_fld_name = nau.get_unique_field_name( "ORIG_FID", orig_input_facilities_fld_names) out_fld.name = unique_fld_name out_fld.aliasName = unique_fld_name fac_fm.outputField = out_fld fac_fms.addFieldMap(fac_fm) arcpy.conversion.FeatureClassToFeatureClass(orig_input_facilities_lyr, out_gdb_workspace, out_facilities_name, field_mapping=fac_fms) #arcpy.management.CopyFeatures(orig_input_facilities_lyr, out_facilities_fc)
from GTFSdownloader import GTFSdownloader table = GTFSdownloader.get_table(Config.site) # For unit testing, I just want to process the local data! aoi = [ "Benton Area Transit", "Sunset Empire Transportation District - The Bus", "NorthWest POINT", "Lincoln County Transit" ] gtfs = GTFSimport(workspace) all_routes = [] all_stops = [] all_agencies = [] routes_fm = arcpy.FieldMappings() stops_fm = arcpy.FieldMappings() agencies_fm = arcpy.FieldMappings() for item in table: agency = item['Transit service'] if agency in aoi: print("IMPORTING \"%s\"..." % agency) fc = gtfs.do_import(Config.sourcedir, agency, overwrite=False) all_routes.append(fc[0]) all_stops.append(fc[1]) all_agencies.append(fc[2]) routes_fm.addTable(fc[0]) stops_fm.addTable(fc[1])
def simplify(self): try: # Init WorkSpase # arcpy.env.overwriteOutput = 1 duongDanNguon = "C:/Generalize_25_50/50K_Process.gdb" duongDanDich = "C:/Generalize_25_50/50K_Final.gdb" urlFile = '/ConfigSimplify.json' _algorithm = "BEND_SIMPLIFY" _tolerance = "50 Meters" _error_option = "NO_CHECK" _collapsed_point_option = "NO_KEEP" _checkExitLayer = False if arcpy.Exists(duongDanNguon + "/ThuyHe/SongSuoiL_KenhMuongL_SnapPBM") and arcpy.Exists(duongDanNguon + "/PhuBeMat/PhuBeMat_Full"): #arcpy.CopyFeatures_management(duongDanNguon + "/PhuBeMat/PhuBeMat_LocMatNuoc", duongDanNguon + "/PhuBeMat/PhuBeMat") _checkExitLayer = True #Doc file config s1 = inspect.getfile(inspect.currentframe()) s2 = os.path.dirname(s1) urlFile = s2 + urlFile arcpy.AddMessage("\n# Doc file cau hinh: \"{0}\"".format(urlFile)) if os.path.exists(urlFile): fileConfig = open(urlFile) listLayerConfig = json.load(fileConfig) fileConfig.close() ############################### Simplify Polygon ######################################## arcpy.Integrate_management([[duongDanNguon + "/PhuBeMat/PhuBeMat", 1]], "2 Meters") arcpy.AddMessage("\n# Bat dau Simplify Polygon") listPolygon = [] fieldMappings = arcpy.FieldMappings() enableFields = [] inputsMerge = [] for objConfig in listLayerConfig: if objConfig["LayerType"] == "Polygon" and objConfig["RunStatus"] == "True": if not(_checkExitLayer == False and objConfig["LayerName"] == "PhuBeMat_Full"): temp = { "LayerType": objConfig["LayerType"], "DatasetName": objConfig["DatasetName"], "LayerName": objConfig["LayerName"], "featureLayer": "in_memory\\" + objConfig["LayerName"] + "_Layer", "featureCopy": "in_memory\\" + objConfig["LayerName"] + "_Copy", "featureCopyLayer": "in_memory\\" + objConfig["LayerName"] + "_Copy_Layer", "FID_XXX": "FID_" + objConfig["LayerName"] } listPolygon.append(temp) elif objConfig["LayerType"] == "Polyline" and objConfig["RunStatus"] == "True" and objConfig["LayerName"] <> "DuongBinhDo": if not(_checkExitLayer == False and objConfig["LayerName"] == "SongSuoiL_KenhMuongL_SnapPBM"): arcpy.AddMessage("\n# Buffer lop: \"{0}\"".format(objConfig["LayerName"])) layerPath = duongDanNguon + "/" + objConfig["DatasetName"] + "/" + objConfig["LayerName"] arcpy.Buffer_analysis(in_features = layerPath, out_feature_class = layerPath + "_Buffer", buffer_distance_or_field = "0.1 Meters", line_side = "RIGHT") temp = { "LayerType": objConfig["LayerType"], "DatasetName": objConfig["DatasetName"], "LayerName": objConfig["LayerName"] + "_Buffer", "featureLayer": "in_memory\\" + objConfig["LayerName"] + "_Buffer_Layer", "featureCopy": "in_memory\\" + objConfig["LayerName"] + "_Buffer_Copy", "featureCopyLayer": "in_memory\\" + objConfig["LayerName"] + "_Buffer_Copy_Layer", "FID_XXX": "FID_" + objConfig["LayerName"] } listPolygon.append(temp) for element in listPolygon: arcpy.AddMessage("\n# Xu ly lop: {0}".format(element["LayerName"])) layerPath = duongDanNguon + "/" + element["DatasetName"] + "/" + element["LayerName"] arcpy.MakeFeatureLayer_management(layerPath, element["featureLayer"]) arcpy.AddField_management(element["featureLayer"], element["FID_XXX"], "LONG") with arcpy.da.UpdateCursor(element["featureLayer"], ["OID@", element["FID_XXX"]]) as cursor: for row in cursor: row[1] = row[0] cursor.updateRow(row) arcpy.CopyFeatures_management(layerPath, element["featureCopy"]) arcpy.MakeFeatureLayer_management(element["featureCopy"], element["featureCopyLayer"]) ## Field Mappings ## enableFields.append(element["FID_XXX"]) fieldMappings.addTable(element["featureCopyLayer"]) inputsMerge.append(element["featureCopyLayer"]) for field in fieldMappings.fields: if field.name not in enableFields: fieldMappings.removeFieldMap(fieldMappings.findFieldMapIndex(field.name)) ## Merge ## arcpy.AddMessage("\n# Merge Polygon...") outPathMerge = "in_memory\\outPathMergeTemp" #outPathMerge = "C:/Generalize_25_50/50K_Process.gdb/DanCuCoSoHaTang/outPathMergeTemp" arcpy.Merge_management (inputsMerge, outPathMerge, fieldMappings) ## Simplify Polygon ## arcpy.AddMessage("\n# Simplify Polygon...") outPathSimplify = "in_memory\\outPathSimplifyTemp" #outPathSimplify = "C:/Generalize_25_50/50K_Process.gdb/DanCuCoSoHaTang/outPathSimplifyTemp" arcpy.SimplifyPolygon_cartography(in_features = outPathMerge, out_feature_class = outPathSimplify, algorithm = _algorithm, tolerance = _tolerance, minimum_area = "0 SquareMeters", error_option = _error_option, collapsed_point_option = _collapsed_point_option) ## MakeLayerFeature ## outPathSimplifyLayer = "in_memory\\outPathSimplifyTempLayer" arcpy.MakeFeatureLayer_management(outPathSimplify, outPathSimplifyLayer) ## Update Shape Feature Class ## arcpy.AddMessage("\n# Update Shape Feature Class:") for element in listPolygon: arcpy.AddMessage("\n\t# Update {0}...".format(element["LayerName"])) ### MakeLayerFeature ### layerPath = duongDanNguon + "/" + element["DatasetName"] + "/" + element["LayerName"] arcpy.MakeFeatureLayer_management(layerPath, element["featureLayer"]) ### Select ### strQuery = element["FID_XXX"] + " IS NOT NULL" arcpy.SelectLayerByAttribute_management(outPathSimplifyLayer, "NEW_SELECTION", strQuery) ### Copy To Table Temp ### outTableTemp = "in_memory\\outTableTemp" arcpy.CopyFeatures_management(outPathSimplifyLayer, outTableTemp) ### ... ### with arcpy.da.UpdateCursor(element["featureLayer"], ["OID@", "SHAPE@"]) as cursor: for row in cursor: found = False with arcpy.da.UpdateCursor(outTableTemp, [element["FID_XXX"], "SHAPE@"]) as cursorSub: for rowSub in cursorSub: if row[0] == rowSub[0]: found = True row[1] = rowSub[1] cursor.updateRow(row) cursorSub.deleteRow() break if found == False: cursor.deleteRow() arcpy.AddMessage("\n# Hoan thanh Simplify Polygon!!!") ############################################## Simplify Line ############################# arcpy.AddMessage("\n# Bat dau Simplify Line") listPolyLine = [] fieldMappingLine = arcpy.FieldMappings() enableFieldLine = [] inputsMergeLine = [] for objConfig in listLayerConfig: if objConfig["LayerType"] == "Polyline" and objConfig["RunStatus"] == "True": if not(_checkExitLayer == False and objConfig["LayerName"] == "SongSuoiL_KenhMuongL_SnapPBM"): temp = { "LayerType": objConfig["LayerType"], "DatasetName": objConfig["DatasetName"], "LayerName": objConfig["LayerName"], "featureLayer": "in_memory\\" + objConfig["LayerName"] + "_Layer", "featureCopy": "in_memory\\" + objConfig["LayerName"] + "_Copy", "featureCopyLayer": "in_memory\\" + objConfig["LayerName"] + "_Copy_Layer", "FID_XXX": "FID_" + objConfig["LayerName"] } listPolyLine.append(temp) for element in listPolyLine: arcpy.AddMessage("\n# Xu ly lop: {0}".format(element["LayerName"])) layerPath = duongDanNguon + "/" + element["DatasetName"] + "/" + element["LayerName"] if element["LayerName"] == "DuongBinhDo": arcpy.AddField_management(layerPath, "OLD_OBJECTID", "LONG", None, None, None,"OLD_OBJECTID", "NULLABLE") arcpy.CalculateField_management(layerPath, "OLD_OBJECTID", "!OBJECTID!", "PYTHON_9.3") arcpy.MakeFeatureLayer_management(layerPath, element["featureLayer"]) arcpy.AddField_management(element["featureLayer"], element["FID_XXX"], "LONG") with arcpy.da.UpdateCursor(element["featureLayer"], ["OID@", element["FID_XXX"]]) as cursor: for row in cursor: row[1] = row[0] cursor.updateRow(row) arcpy.CopyFeatures_management(layerPath, element["featureCopy"]) arcpy.MakeFeatureLayer_management(element["featureCopy"], element["featureCopyLayer"]) ## Field Mappings ## enableFieldLine.append(element["FID_XXX"]) fieldMappingLine.addTable(element["featureCopyLayer"]) inputsMergeLine.append(element["featureCopyLayer"]) for field in fieldMappingLine.fields: if field.name not in enableFieldLine: fieldMappingLine.removeFieldMap(fieldMappingLine.findFieldMapIndex(field.name)) ## Merge ## arcpy.AddMessage("\n# Merge Polyline...") outPathMerge = "in_memory\\outPathMergeTemp" arcpy.Merge_management (inputsMergeLine, outPathMerge, fieldMappingLine) ## Simplify Polyline ## arcpy.AddMessage("\n# Simplify Polyline...") outPathSimplify = "in_memory\\outPathSimplifyTemp" ''' arcpy.MakeFeatureLayer_management(duongDanNguon + "/ThuyHe/SongSuoiA", "ThuyHe_SongSuoiA_Lyr") arcpy.MakeFeatureLayer_management(duongDanNguon + "/ThuyHe/MatNuocTinh", "ThuyHe_MatNuocTinh_Lyr") arcpy.MakeFeatureLayer_management(duongDanNguon + "/ThuyHe/KenhMuongA", "ThuyHe_KenhMuongA_Lyr") in_barriers_Line = ["ThuyHe_SongSuoiA_Lyr", "ThuyHe_MatNuocTinh_Lyr", "ThuyHe_KenhMuongA_Lyr"] ''' arcpy.SimplifyLine_cartography(in_features = outPathMerge, out_feature_class = outPathSimplify, algorithm = _algorithm, tolerance = _tolerance, collapsed_point_option = _collapsed_point_option) ## MakeLayerFeature ## outPathSimplifyLayer = "in_memory\\outPathSimplifyTempLayer" arcpy.MakeFeatureLayer_management(outPathSimplify, outPathSimplifyLayer) ## Update Shape Feature Class ## arcpy.AddMessage("\n# Update Shape Feature Class:") for element in listPolyLine: if element["LayerType"] == "Polyline": arcpy.AddMessage("\n\t# Update {0}...".format(element["LayerName"])) ### MakeLayerFeature ### layerPath = duongDanNguon + "/" + element["DatasetName"] + "/" + element["LayerName"] arcpy.MakeFeatureLayer_management(layerPath, element["featureLayer"]) ### Select ### strQuery = element["FID_XXX"] + " IS NOT NULL" arcpy.SelectLayerByAttribute_management(outPathSimplifyLayer, "NEW_SELECTION", strQuery) ### Copy To Table Temp ### outTableTemp = "in_memory\\outTableTemp" arcpy.CopyFeatures_management(outPathSimplifyLayer, outTableTemp) ### ... ### with arcpy.da.UpdateCursor(element["featureLayer"], ["OID@", "SHAPE@"]) as cursor: for row in cursor: found = False with arcpy.da.UpdateCursor(outTableTemp, [element["FID_XXX"], "SHAPE@"]) as cursorSub: for rowSub in cursorSub: if row[0] == rowSub[0]: found = True row[1] = rowSub[1] cursor.updateRow(row) cursorSub.deleteRow() break if found == False: cursor.deleteRow() arcpy.AddMessage("\n# Hoan thanh Simplify Polyline!!!") ############################################## Snap Line to Polygon ############################# arcpy.AddMessage("\n# Bat dau Snap") for elementPolygon in listPolygon: if elementPolygon["LayerType"] == "Polyline": lineLayerName = elementPolygon["LayerName"][:elementPolygon["LayerName"].find('_Buffer')] if (lineLayerName <> "DuongBinhDo"): arcpy.AddMessage("\n\t# Snap: {0}".format(lineLayerName)) layerBufferPath = duongDanNguon + "/" + elementPolygon["DatasetName"] + "/" + elementPolygon["LayerName"] layerLinePath = duongDanNguon + "/" + elementPolygon["DatasetName"] + "/" + lineLayerName arcpy.Snap_edit(layerLinePath, [[layerBufferPath, "EDGE", self.snap_distance]]) ############## Snap Other if _checkExitLayer: arcpy.AddMessage("\n\t# Snap other: {0}".format("PhuBeMat")) arcpy.Integrate_management([[duongDanNguon + "/PhuBeMat/PhuBeMat", 1]], "2 Meters") arcpy.Densify_edit(duongDanNguon + "/PhuBeMat/PhuBeMat", "DISTANCE","2 Meters",None ,None) arcpy.Snap_edit(duongDanNguon + "/PhuBeMat/PhuBeMat", [[duongDanNguon + "/ThuyHe/SongSuoiL_KenhMuongL_SnapPBM", "EDGE", "35 Meters"]]) arcpy.Integrate_management([[duongDanNguon + "/ThuyHe/SongSuoiL_KenhMuongL_SnapPBM", 1],[duongDanNguon + "/PhuBeMat/PhuBeMat", 2]], "2 Meters") arcpy.Erase_analysis(in_features = duongDanNguon + "/PhuBeMat/PhuBeMat_Full", erase_features = duongDanNguon + "/PhuBeMat/PhuBeMat", out_feature_class = duongDanNguon + "/PhuBeMat/PhuBeMat_Lo") arcpy.CalculateField_management(duongDanNguon + "/PhuBeMat/PhuBeMat_Lo", "maNhanDang", '"temp123"', "PYTHON_9.3") arcpy.Append_management([duongDanNguon + "/PhuBeMat/PhuBeMat_Lo"], duongDanNguon + "/PhuBeMat/PhuBeMat", "NO_TEST",None,None) arcpy.MultipartToSinglepart_management(duongDanNguon + "/PhuBeMat/PhuBeMat", duongDanNguon + "/PhuBeMat/PhuBeMat2") arcpy.MakeFeatureLayer_management(duongDanNguon + "/PhuBeMat/PhuBeMat2", "PhuBeMat_Temp_Lyr") arcpy.SelectLayerByAttribute_management("PhuBeMat_Temp_Lyr", "NEW_SELECTION", "maNhanDang = 'temp123'") arcpy.Eliminate_management(in_features = "PhuBeMat_Temp_Lyr", out_feature_class = duongDanNguon + "/PhuBeMat/PhuBeMat3", selection = "LENGTH") arcpy.Densify_edit(duongDanNguon + "/ThuyHe/SongSuoiL", "DISTANCE","2 Meters",None ,None) arcpy.Snap_edit(duongDanNguon + "/ThuyHe/SongSuoiL", [[duongDanNguon + "/ThuyHe/SongSuoiL_KenhMuongL_SnapPBM", "EDGE", "2 Meters"]]) arcpy.CopyFeatures_management(duongDanNguon + "/PhuBeMat/PhuBeMat3", duongDanNguon + "/PhuBeMat/PhuBeMat") ############################################## Copy to final ############################# for element in listPolygon: if element["LayerType"] == "Polygon": if element["LayerName"] <> "PhuBeMat_Full": layerPath = duongDanNguon + "/" + element["DatasetName"] + "/" + element["LayerName"] layerFinalPath = duongDanDich + "/" + element["DatasetName"] + "/" + element["LayerName"] arcpy.DeleteField_management(layerPath, [element["FID_XXX"]]) arcpy.CopyFeatures_management(layerPath, layerFinalPath) for element in listPolyLine: if element["LayerType"] == "Polyline": if element["LayerName"] <> "SongSuoiL_KenhMuongL_SnapPBM": layerPath = duongDanNguon + "/" + element["DatasetName"] + "/" + element["LayerName"] layerFinalPath = duongDanDich + "/" + element["DatasetName"] + "/" + element["LayerName"] arcpy.DeleteField_management(layerPath, [element["FID_XXX"]]) arcpy.CopyFeatures_management(layerPath, layerFinalPath) #arcpy.AddMessage("\n# Hoan thanh!!!") else: arcpy.AddMessage("\n# Khong tim thay file cau hinh: \"{0}\"".format(urlFile)) except OSError as error: arcpy.AddMessage("Error" + error.message) except ValueError as error: arcpy.AddMessage("Error" + error.message) except arcpy.ExecuteError as error: arcpy.AddMessage("Error" + error.message) finally: arcpy.Delete_management("in_memory")
def dumpTable(fc, outName, isSpatial, outputDir, logfile, isOpen, fcName): dumpString = ' Dumping {}...'.format(outName) if isSpatial: dumpString = ' ' + dumpString addMsgAndPrint(dumpString) if isSpatial: logfile.write(' feature class {} dumped to shapefile {}\n'.format( fc, outName)) else: logfile.write(' table {} dumped to table\n'.format(fc, outName)) logfile.write(' field name remapping: \n') longFields = [] fieldmappings = arcpy.FieldMappings() fields = arcpy.ListFields(fc) for field in fields: #get the name string and chop off the joined table name if necessary fName = field.name for prefix in ('DescriptionOfMapUnits', 'DataSources', 'Glossary', fcName): if fc != prefix and fName.find( prefix) == 0 and fName != fcName + '_ID': fName = fName[len(prefix) + 1:] if not fName.lower() in forget: #make the FieldMap object based on this field fieldmap = arcpy.FieldMap() fieldmap.addInputField(fc, field.name) out_field = fieldmap.outputField #go back to the FieldMap object and set up the output field name out_field.name = remapFieldName(fName) fieldmap.outputField = out_field #logfile.write(' '+field.name+' > '+out_field.name+'\n') #save the FieldMap in the FieldMappings fieldmappings.addFieldMap(fieldmap) if field.length > 254: longFields.append(fName) check_unique(fieldmappings) for fm in fieldmappings: logfile.write(' {} > {}\n'.format(fm.getInputFieldName(0), fm.outputField.name)) if isSpatial: if debug: addMsgAndPrint('dumping ', fc, outputDir, outName) try: arcpy.FeatureClassToFeatureClass_conversion( fc, outputDir, outName, field_mapping=fieldmappings) except: addMsgAndPrint('failed to translate table ' + fc) else: arcpy.TableToTable_conversion(fc, outputDir, outName, field_mapping=fieldmappings) if isOpen: # if any field lengths > 254, write .txt file if len(longFields) > 0: outText = outName[0:-4] + '.txt' logfile.write(' table ' + fc + ' has long fields, thus dumped to file ' + outText + '\n') csv_path = os.path.join(outputDir, outText) csvFile = open(csv_path, 'w') fields = arcpy.ListFields(fc) f_names = [ f.name for f in fields if f.type not in ['Blob', 'Geometry', 'Raster'] ] col_names = "|".join(f_names) csvFile.write("{}\n|".format(col_names)) #addMsgAndPrint("FC name: "+ fc) with arcpy.da.SearchCursor(fc, f_names) as cursor: for row in cursor: rowString = str(row[0]) for i in range(1, len(row)): #use enumeration here? #if debug: addMsgAndPrint("Index: "+str(i)) #if debug: addMsgAndPrint("Current row is: " + str(row[i])) if row[i] != None: xString = str(row[i]) # if isinstance(row[i],Number) or isinstance(row[i], datetime.datetime): # xString = str(row[i]) # else: # #if debug: addMsgAndPrint("Current row type is: " + str(type(row[i]))) # xString = row[i].encode('ascii','xmlcharrefreplace') rowString = rowString + '|' + xString else: rowString = rowString + '|' csvFile.write(rowString + '\n') csvFile.close() addMsgAndPrint(' Finished dump\n')
def CreateDuongDiaGioi(self): try: arcpy.env.overwriteOutput = 1 duongDanNguon = "C:/Generalize_25_50/50K_Process.gdb" duongDanDich = "C:/Generalize_25_50/50K_Final.gdb" arcpy.env.workspace = duongDanNguon + "/BienGioiDiaGioi" DiaPhan_Name = "DiaPhan" DiaPhan_Lyr = "DiaPhan_Lyr" DiaPhan_Path = duongDanNguon + "/BienGioiDiaGioi/" + DiaPhan_Name DiaPhan_Path_Final = duongDanDich + "/BienGioiDiaGioi/" + DiaPhan_Name DiaPhan_Xa_Path = DiaPhan_Path + "_Xa" DiaPhan_Huyen_Path = DiaPhan_Path + "_Huyen" DiaPhan_Tinh_Path = DiaPhan_Path + "_Tinh" intersect_Xa_Path = duongDanNguon + "/BienGioiDiaGioi/DuongDiaGioi_Xa" intersect_Huyen_Path = duongDanNguon + "/BienGioiDiaGioi/DuongDiaGioi_Huyen" intersect_Tinh_Path = duongDanNguon + "/BienGioiDiaGioi/DuongDiaGioi_Tinh" joint_Xa_Path = duongDanNguon + "/BienGioiDiaGioi/DuongDiaGioi_Xa_Join" joint_Huyen_Path = duongDanNguon + "/BienGioiDiaGioi/DuongDiaGioi_Huyen_Join" joint_Tinh_Path = duongDanNguon + "/BienGioiDiaGioi/DuongDiaGioi_Tinh_Join" DuongDiaGioi_Name = "DuongDiaGioi" DuongDiaGioi_Path = duongDanNguon + "/BienGioiDiaGioi/" + DuongDiaGioi_Name DuongDiaGioi_Dich_Path = duongDanDich + "/BienGioiDiaGioi/" + DuongDiaGioi_Name songSuoiL_Path = duongDanNguon + "/ThuyHe/SongSuoiL" songSuoiL_Path_Final = duongDanDich + "/ThuyHe/SongSuoiL" doanTimDuongBo_Path = duongDanNguon + "/GiaoThong/DoanTimDuongBo" doanTimDuongBo_Path_Final = duongDanDich + "/GiaoThong/DoanTimDuongBo" #arcpy.Integrate_management([[DiaPhan_Path, 1], [songSuoiL_Path, 2], [doanTimDuongBo_Path, 3]], "5 Meters") arcpy.Integrate_management([[DiaPhan_Path, 1]], "1 Meters") arcpy.Snap_edit( DiaPhan_Path, [[duongDanNguon + "/ThuyHe/SongSuoiL", "VERTEX", "25 Meters"], [duongDanNguon + "/ThuyHe/SongSuoiL", "EDGE", "25 Meters"]]) arcpy.Snap_edit(DiaPhan_Path, [[ duongDanNguon + "/GiaoThong/DoanTimDuongBo", "VERTEX", "5 Meters" ], [ duongDanNguon + "/GiaoThong/DoanTimDuongBo", "EDGE", "5 Meters" ]]) #Xa arcpy.MakeFeatureLayer_management(DiaPhan_Path, DiaPhan_Lyr) arcpy.SelectLayerByAttribute_management(DiaPhan_Lyr, "NEW_SELECTION", "doiTuong = 3") arcpy.CopyFeatures_management(DiaPhan_Lyr, DiaPhan_Xa_Path) arcpy.Intersect_analysis([[DiaPhan_Xa_Path, 1]], intersect_Xa_Path, "ALL", None, "LINE") arcpy.DeleteIdentical_management(intersect_Xa_Path, ["Shape"], None, None) arcpy.AddField_management(intersect_Xa_Path, "loaiHienTrangPhapLy", "SHORT", None, None, None, "loaiHienTrangPhapLy", "NULLABLE") arcpy.AddField_management(intersect_Xa_Path, "donViHanhChinhLienKeTrai", "TEXT", None, None, None, "donViHanhChinhLienKeTrai", "NULLABLE") arcpy.AddField_management(intersect_Xa_Path, "donViHanhChinhLienKePhai", "TEXT", None, None, None, "donViHanhChinhLienKePhai", "NULLABLE") arcpy.AddField_management(intersect_Xa_Path, "chieuDai", "DOUBLE", None, None, None, "chieuDai", "NULLABLE") fieldMappings = arcpy.FieldMappings() fieldMappings.addTable(DiaPhan_Xa_Path) for field in fieldMappings.fields: if field.name not in ["doiTuong", "danhTuChung", "diaDanh"]: fieldMappings.removeFieldMap( fieldMappings.findFieldMapIndex(field.name)) arcpy.SpatialJoin_analysis(target_features=intersect_Xa_Path, join_features=DiaPhan_Xa_Path, out_feature_class=joint_Xa_Path, join_operation="JOIN_ONE_TO_MANY", join_type="KEEP_ALL", field_mapping=fieldMappings, match_option="WITHIN") with arcpy.da.UpdateCursor(intersect_Xa_Path, [ "OID@", "FID_DiaPhan_Xa", "loaiHienTrangPhapLy", "donViHanhChinhLienKeTrai", "donViHanhChinhLienKePhai", "chieuDai", "Shape_Length", "doiTuong" ]) as uCur: for uRow in uCur: with arcpy.da.SearchCursor(joint_Xa_Path, [ "TARGET_FID", "JOIN_FID", "doiTuong", "danhTuChung", "diaDanh" ]) as sCur: for sRow in sCur: if uRow[0] == sRow[0] and sRow[2] == 3: if uRow[1] == sRow[1]: uRow[2] = 1 uRow[5] = uRow[6] uRow[3] = sRow[3] + " " + sRow[4] uRow[7] = sRow[2] uCur.updateRow(uRow) else: uRow[2] = 1 uRow[5] = uRow[6] uRow[4] = sRow[3] + " " + sRow[4] uRow[7] = sRow[2] uCur.updateRow(uRow) #Huyen arcpy.SelectLayerByAttribute_management(DiaPhan_Lyr, "NEW_SELECTION", "doiTuong = 2") arcpy.CopyFeatures_management(DiaPhan_Lyr, DiaPhan_Huyen_Path) arcpy.Intersect_analysis([[DiaPhan_Huyen_Path, 1]], intersect_Huyen_Path, "ALL", None, "LINE") arcpy.DeleteIdentical_management(intersect_Huyen_Path, ["Shape"], None, None) arcpy.AddField_management(intersect_Huyen_Path, "loaiHienTrangPhapLy", "SHORT", None, None, None, "loaiHienTrangPhapLy", "NULLABLE") arcpy.AddField_management(intersect_Huyen_Path, "donViHanhChinhLienKeTrai", "TEXT", None, None, None, "donViHanhChinhLienKeTrai", "NULLABLE") arcpy.AddField_management(intersect_Huyen_Path, "donViHanhChinhLienKePhai", "TEXT", None, None, None, "donViHanhChinhLienKePhai", "NULLABLE") arcpy.AddField_management(intersect_Huyen_Path, "chieuDai", "DOUBLE", None, None, None, "chieuDai", "NULLABLE") fieldMappings = arcpy.FieldMappings() fieldMappings.addTable(DiaPhan_Huyen_Path) for field in fieldMappings.fields: if field.name not in ["doiTuong", "danhTuChung", "diaDanh"]: fieldMappings.removeFieldMap( fieldMappings.findFieldMapIndex(field.name)) arcpy.SpatialJoin_analysis(target_features=intersect_Huyen_Path, join_features=DiaPhan_Huyen_Path, out_feature_class=joint_Huyen_Path, join_operation="JOIN_ONE_TO_MANY", join_type="KEEP_ALL", field_mapping=fieldMappings, match_option="WITHIN") with arcpy.da.UpdateCursor(intersect_Huyen_Path, [ "OID@", "FID_DiaPhan_Huyen", "loaiHienTrangPhapLy", "donViHanhChinhLienKeTrai", "donViHanhChinhLienKePhai", "chieuDai", "Shape_Length", "doiTuong" ]) as uCur: for uRow in uCur: with arcpy.da.SearchCursor(joint_Huyen_Path, [ "TARGET_FID", "JOIN_FID", "doiTuong", "danhTuChung", "diaDanh" ]) as sCur: for sRow in sCur: if uRow[0] == sRow[0] and sRow[2] == 2: if uRow[1] == sRow[1]: uRow[2] = 1 uRow[5] = uRow[6] uRow[3] = sRow[3] + " " + sRow[4] uRow[7] = sRow[2] uCur.updateRow(uRow) else: uRow[2] = 1 uRow[5] = uRow[6] uRow[4] = sRow[3] + " " + sRow[4] uRow[7] = sRow[2] uCur.updateRow(uRow) #Tinh arcpy.SelectLayerByAttribute_management(DiaPhan_Lyr, "NEW_SELECTION", "doiTuong = 1") arcpy.CopyFeatures_management(DiaPhan_Lyr, DiaPhan_Tinh_Path) arcpy.Intersect_analysis([[DiaPhan_Tinh_Path, 1]], intersect_Tinh_Path, "ALL", None, "LINE") arcpy.DeleteIdentical_management(intersect_Tinh_Path, ["Shape"], None, None) arcpy.AddField_management(intersect_Tinh_Path, "loaiHienTrangPhapLy", "SHORT", None, None, None, "loaiHienTrangPhapLy", "NULLABLE") arcpy.AddField_management(intersect_Tinh_Path, "donViHanhChinhLienKeTrai", "TEXT", None, None, None, "donViHanhChinhLienKeTrai", "NULLABLE") arcpy.AddField_management(intersect_Tinh_Path, "donViHanhChinhLienKePhai", "TEXT", None, None, None, "donViHanhChinhLienKePhai", "NULLABLE") arcpy.AddField_management(intersect_Tinh_Path, "chieuDai", "DOUBLE", None, None, None, "chieuDai", "NULLABLE") fieldMappings = arcpy.FieldMappings() fieldMappings.addTable(DiaPhan_Tinh_Path) for field in fieldMappings.fields: if field.name not in ["doiTuong", "danhTuChung", "diaDanh"]: fieldMappings.removeFieldMap( fieldMappings.findFieldMapIndex(field.name)) arcpy.SpatialJoin_analysis(target_features=intersect_Tinh_Path, join_features=DiaPhan_Tinh_Path, out_feature_class=joint_Tinh_Path, join_operation="JOIN_ONE_TO_MANY", join_type="KEEP_ALL", field_mapping=fieldMappings, match_option="WITHIN") with arcpy.da.UpdateCursor(intersect_Tinh_Path, [ "OID@", "FID_DiaPhan_Tinh", "loaiHienTrangPhapLy", "donViHanhChinhLienKeTrai", "donViHanhChinhLienKePhai", "chieuDai", "Shape_Length", "doiTuong" ]) as uCur: for uRow in uCur: with arcpy.da.SearchCursor(joint_Tinh_Path, [ "TARGET_FID", "JOIN_FID", "doiTuong", "danhTuChung", "diaDanh" ]) as sCur: for sRow in sCur: if uRow[0] == sRow[0] and sRow[2] == 1: if uRow[1] == sRow[1]: uRow[2] = 1 uRow[5] = uRow[6] uRow[3] = sRow[3] + " " + sRow[4] uRow[7] = sRow[2] uCur.updateRow(uRow) else: uRow[2] = 1 uRow[5] = uRow[6] uRow[4] = sRow[3] + " " + sRow[4] uRow[7] = sRow[2] uCur.updateRow(uRow) #Xoa Xa bi trung arcpy.MakeFeatureLayer_management(intersect_Xa_Path, "DuongDiaGioi_Xa_Lyr") arcpy.MakeFeatureLayer_management(intersect_Huyen_Path, "DuongDiaGioi_Huyen_Lyr") arcpy.SelectLayerByLocation_management( in_layer="DuongDiaGioi_Xa_Lyr", overlap_type="WITHIN", select_features="DuongDiaGioi_Huyen_Lyr", selection_type="NEW_SELECTION") if int( arcpy.GetCount_management("DuongDiaGioi_Xa_Lyr").getOutput( 0)) > 0: arcpy.DeleteFeatures_management("DuongDiaGioi_Xa_Lyr") #Xoa Huyen bi trung arcpy.MakeFeatureLayer_management(intersect_Tinh_Path, "DuongDiaGioi_Tinh_Lyr") arcpy.SelectLayerByLocation_management( in_layer="DuongDiaGioi_Huyen_Lyr", overlap_type="WITHIN", select_features="DuongDiaGioi_Tinh_Lyr", selection_type="NEW_SELECTION") if int( arcpy.GetCount_management( "DuongDiaGioi_Huyen_Lyr").getOutput(0)) > 0: arcpy.DeleteFeatures_management("DuongDiaGioi_Huyen_Lyr") #Copy dữ liệu vào lớp DuongDiaGioi if int(arcpy.GetCount_management(DuongDiaGioi_Path).getOutput( 0)) > 0: arcpy.DeleteFeatures_management(DuongDiaGioi_Path) duongDiaGioiFields = [ "SHAPE@", "maNhanDang", "ngayThuNhan", "ngayCapNhat", "maDoiTuong", "loaiHienTrangPhapLy", "donViHanhChinhLienKeTrai", "donViHanhChinhLienKePhai", "chieuDai", "doiTuong", "maTrinhBay", "tenManh", "soPhienHieuManhBanDo" ] duongDiaGioiFields2 = [ "SHAPE@", "maNhanDang", "ngayThuNhan", "ngayCapNhat", "maDoiTuong", "loaiHienTrangPhapLy", "donViHanhChinhLienKeTrai", "donViHanhChinhLienKePhai", "chieuDai", "doiTuong", "maTrinhBay", "tenManh", "soPhienHieuManhBanDo", "DuongDiaGioi_Rep_ID", "RuleID" ] with arcpy.da.SearchCursor(intersect_Xa_Path, duongDiaGioiFields) as sCur: with arcpy.da.InsertCursor(DuongDiaGioi_Path, duongDiaGioiFields2) as iCur: for sRow in sCur: iCur.insertRow([ sRow[0], sRow[1], sRow[2], sRow[3], sRow[4], 1, sRow[6], sRow[7], sRow[8], sRow[9], sRow[10], sRow[11], sRow[12], 5, 1 ]) with arcpy.da.SearchCursor(intersect_Huyen_Path, duongDiaGioiFields) as sCur: with arcpy.da.InsertCursor(DuongDiaGioi_Path, duongDiaGioiFields2) as iCur: for sRow in sCur: iCur.insertRow([ sRow[0], sRow[1], sRow[2], sRow[3], sRow[4], 1, sRow[6], sRow[7], sRow[8], sRow[9], sRow[10], sRow[11], sRow[12], 3, 3 ]) with arcpy.da.SearchCursor(intersect_Tinh_Path, duongDiaGioiFields) as sCur: with arcpy.da.InsertCursor(DuongDiaGioi_Path, duongDiaGioiFields2) as iCur: for sRow in sCur: iCur.insertRow([ sRow[0], sRow[1], sRow[2], sRow[3], sRow[4], 1, sRow[6], sRow[7], sRow[8], sRow[9], sRow[10], sRow[11], sRow[12], 1, 2 ]) ############################################### Snap Other ############################ ''' arcpy.AddMessage("\n#Snap DoanTimDuongBo") arcpy.Densify_edit(duongDanNguon + "/GiaoThong/DoanTimDuongBo", "DISTANCE","10 Meters",None ,None) arcpy.Snap_edit(duongDanNguon + "/GiaoThong/DoanTimDuongBo", [[DuongDiaGioi_Path, "VERTEX", "10 Meters"], [DuongDiaGioi_Path, "EDGE", "10 Meters"]]) arcpy.AddMessage("\n#Snap SongSuoiL") arcpy.Densify_edit(duongDanNguon + "/ThuyHe/SongSuoiL", "DISTANCE","10 Meters",None ,None) arcpy.Snap_edit(duongDanNguon + "/ThuyHe/SongSuoiL", [[DuongDiaGioi_Path, "VERTEX", "10 Meters"], [DuongDiaGioi_Path, "EDGE", "10 Meters"]]) ''' ''' arcpy.AddMessage("\n#Snap DoanTimDuongBo") arcpy.Densify_edit(DuongDiaGioi_Path, "DISTANCE","10 Meters",None ,None) arcpy.Snap_edit(DuongDiaGioi_Path, [[duongDanNguon + "/GiaoThong/DoanTimDuongBo", "VERTEX", "10 Meters"], [duongDanNguon + "/GiaoThong/DoanTimDuongBo", "EDGE", "10 Meters"]]) arcpy.AddMessage("\n#Snap SongSuoiL") arcpy.Snap_edit(DuongDiaGioi_Path, [[duongDanNguon + "/ThuyHe/SongSuoiL", "VERTEX", "10 Meters"], [duongDanNguon + "/ThuyHe/SongSuoiL", "EDGE", "10 Meters"]]) ''' arcpy.CopyFeatures_management(DuongDiaGioi_Path, DuongDiaGioi_Dich_Path) arcpy.CopyFeatures_management(songSuoiL_Path, songSuoiL_Path_Final) arcpy.CopyFeatures_management(DiaPhan_Path, DiaPhan_Path_Final) arcpy.CopyFeatures_management(doanTimDuongBo_Path, doanTimDuongBo_Path_Final) except OSError as error: arcpy.AddMessage("Error" + error.message) except ValueError as error: arcpy.AddMessage("Error" + error.message) except arcpy.ExecuteError as error: arcpy.AddMessage("Error" + error.message) finally: arcpy.Delete_management("in_memory")
try: ap.Delete_management(grid_Manning) except: print 'Warning: Unable to delete original grid_Manning' ap.CopyFeatures_management(grid_raw, int_manning) # ----------------------- majority in ZonalStatisticsasTable is not working ap.FeatureToPoint_management(int_manning, grid_points, "CENTROID") # find majority landcover type in each grid cell, convert it to a point ccap_maj = ap.sa.ZonalStatistics(int_manning, "Zone", CCAP, "Majority", "DATA") ap.sa.ExtractValuesToPoints(grid_points, ccap_maj, ccap_points, "NONE", "VALUE_ONLY") # create list of desired fields to pass to final object fieldmappings = ap.FieldMappings() fieldmappings.addTable(int_manning) keepfields = [field.name for field in (ap.ListFields(int_manning))] keepfields.append("RASTERVALU") keepfields.append('Shape_Area') keepfields.append ('Shape_Length') # join all fields by location of points to grid cells ap.SpatialJoin_analysis(int_manning, ccap_points, grid_Manning, "JOIN_ONE_TO_ONE", "KEEP_ALL") # clean up fields in attribute table fieldmappings.addTable(grid_Manning) unwanted_fields = [field.name for field in fieldmappings.fields if field.name not in keepfields] ap.DeleteField_management (grid_Manning, unwanted_fields) ap.AlterField_management(grid_Manning, "RASTERVALU", "Majority", "Majority")
join_features = 'Buildings' out_feature_class = 'Address_with_building' join_operation = 'JOIN_ONE_TO_ONE' join_type = 'KEEP_ALL' match_option = 'CLOSEST' search_radius = '30 Meters' arcpy.analysis.SpatialJoin(target_features=target_features, join_features=join_features, out_feature_class=out_feature_class, join_operation=join_operation, join_type=join_type, match_option=match_option, search_radius=search_radius) field_mapping = arcpy.FieldMappings() name = arcpy.FieldMap() total_val = arcpy.FieldMap() name.addInputField('City', 'Name') total_val.addInputField('Buildings', 'Assessed_Value') total_val.mergeRule = 'SUM' name_out = name.outputField name_out.name = 'Name' name_out.aliasName = 'Name' name.outputField = name_out total_val_out = total_val.outputField
def createPolygonValueCountTable(inPolygonFeature, inPolygonIdField, inValueDataset, inValueField, outTable, metricConst, index, cleanupList): """Transfer a value count from an specified geodataset to input polygon features, using simple areal weighting. **Description:** This function uses Tabulate Intersection to construct a table with a field containing the area weighted value count (e.g., POPULATION) for each input polygon unit. The value field is renamed from the metric constants entry. Returns the created output table and the generated output value count field name. **Arguments:** * *inPolygonFeature* - input Polygon feature class with full path. * *inPolygonIdField* - the name of the field in the reporting unit feature class containing a unique identifier * *inValueDataset* - input value feature class or raster with full path * *inValueField* - the name of the field in the value feature class containing count values. Will be empty if the inValueDataset is a raster * *outTable* - the output table that will contain calculated population values * *metricConst* - an ATtILA2 object containing constant values to match documentation * *index* - if this function is going to be run multiple times, this index is used to keep track of intermediate outputs and field names. * *cleanupList* - object containing commands and parameters to perform at cleanup time. **Returns:** * table (type unknown - string representation?) * string - the generated output value count field name """ from arcpy import env from .. import errors from . import files tempEnvironment0 = env.snapRaster tempEnvironment1 = env.cellSize try: desc = arcpy.Describe(inValueDataset) if desc.datasetType == "RasterDataset": # set the raster environments so the raster operations align with the census grid cell boundaries env.snapRaster = inValueDataset env.cellSize = desc.meanCellWidth # calculate the population for the polygon features using zonal statistics as table arcpy.sa.ZonalStatisticsAsTable(inPolygonFeature, inPolygonIdField, inValueDataset, outTable, "DATA", "SUM") # Rename the population count field. outValueField = metricConst.valueCountFieldNames[index] arcpy.AlterField_management(outTable, "SUM", outValueField, outValueField) else: # census features are polygons # Create a copy of the census feature class that we can add new fields to for calculations. fieldMappings = arcpy.FieldMappings() fieldMappings.addTable(inValueDataset) [ fieldMappings.removeFieldMap( fieldMappings.findFieldMapIndex(aFld.name)) for aFld in fieldMappings.fields if aFld.name != inValueField ] tempName = "%s_%s" % (metricConst.shortName, desc.baseName) tempCensusFeature = files.nameIntermediateFile( [tempName + "_Work", "FeatureClass"], cleanupList) inValueDataset = arcpy.FeatureClassToFeatureClass_conversion( inValueDataset, env.workspace, os.path.basename(tempCensusFeature), "", fieldMappings) # Add a dummy field to the copied census feature class and calculate it to a value of 1. classField = "tmpClass" arcpy.AddField_management(inValueDataset, classField, "SHORT") arcpy.CalculateField_management(inValueDataset, classField, 1) # Construct a table with a field containing the area weighted value count for each input polygon unit arcpy.TabulateIntersection_analysis(inPolygonFeature, [inPolygonIdField], inValueDataset, outTable, [classField], [inValueField]) # Rename the population count field. outValueField = metricConst.valueCountFieldNames[index] arcpy.AlterField_management(outTable, inValueField, outValueField, outValueField) return outTable, outValueField except Exception as e: errors.standardErrorHandling(e) finally: env.snapRaster = tempEnvironment0 env.cellSize = tempEnvironment1
soilSaShpDict = dict() featPointShpDict = dict() featLineShpDict = dict() # lists containing SSURGO layer paths sorted according to the survey center key # This list will be passed over to the Merge command soilShpList = list() muLineShpList = list() muPointShpList = list() soilSaShpList = list() featPointShpList = list() featLineShpList = list() # Create FieldMappings objects that will contain all of the fields from each survey # (fieldmap). FMs will be used to remove every field but AREASYMBOL, FEATSYM, MUSYM soilsFM = arcpy.FieldMappings() muLineFM = arcpy.FieldMappings() muPointFM = arcpy.FieldMappings() soilSaFM = arcpy.FieldMappings() featPointFM = arcpy.FieldMappings() featLineFM = arcpy.FieldMappings() # list containing the (Xcenter * Ycenter) for every SSURGO soil layer extentList = list() # ------------------------------------------------------------------------------------- Populate Dictionaries, lists and Fieldmappings for SSA in ssurgoDatasetDict: # Paths to individual SSURGO layers soilShpPath = os.path.join(os.path.join(ssurgoDatasetDict[SSA],"spatial"),"soilmu_a_" + SSA.lower() + ".shp") muLineShpPath = os.path.join(os.path.join(ssurgoDatasetDict[SSA],"spatial"),"soilmu_l_" + SSA.lower() + ".shp")
def PopCountDensity(inFeature, featureField, inCensus, censusField, outFC, index): try: ### Initialization # Start the timer startDateTime1 = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S") arcpy.AddMessage(startDateTime1 + " setting up environment variables.") #Begin calculating population density #Create an index value to keep track of intermediate outputs and fieldnames arcpy.AddMessage("Calculating population density...") #Perform population density caculation for first (only?) population feature class #If the user specified an index, add an underscore as prefix if index != "": index = "_" + index #Create a copy of the census feature class that we can add new fields to for calculations. #This is more appropriate than altering the user's input data. fieldMappings = arcpy.FieldMappings() fieldMappings.addTable(inCensus) [ fieldMappings.removeFieldMap( fieldMappings.findFieldMapIndex(aFld.name)) for aFld in fieldMappings.fields if aFld.name != censusField ] desc = arcpy.Describe(inCensus) tempName = "CensusFeature_%s%s" % (desc.baseName, index) inCensus = arcpy.FeatureClassToFeatureClass_conversion( inCensus, arcpy.env.scratchGDB, tempName, "", fieldMappings) #Add and populate the area field to the Census Feature arcpy.AddField_management(inCensus, "popArea", "DOUBLE") with arcpy.da.UpdateCursor(inCensus, ['SHAPE@AREA', 'popArea']) as cursor: for row in cursor: row[1] = row[0] / 1E6 cursor.updateRow(row) #Calculate the population density arcpy.AddField_management(inCensus, "popDens" + index, "DOUBLE") with arcpy.da.UpdateCursor( inCensus, ('popDens' + index, censusField, 'popArea')) as cursor: for row in cursor: row[0] = row[1] / row[2] cursor.updateRow(row) #Intersect the reporting units with the population features. tempName = "IntersectTEST2_%s%s" % (desc.baseName, index) intersectOutput = arcpy.Intersect_analysis([inFeature, inCensus], os.path.join( arcpy.env.scratchGDB, tempName)) #Add and populate the area field of the intersected polygons arcpy.AddField_management(intersectOutput, "intArea", "DOUBLE") with arcpy.da.UpdateCursor(intersectOutput, ['SHAPE@AREA', 'intArea']) as cursor: for row in cursor: row[1] = row[0] / 1E6 cursor.updateRow(row) #Calculate the population of the itersected areas by multiplying population density by intersected area #Set up a calculation expression for the density calculation pd_name = "popDens" + index #Calculate the population density for the intersected areas arcpy.AddField_management(intersectOutput, "popCount", "DOUBLE") with arcpy.da.UpdateCursor(intersectOutput, ('popCount', pd_name, 'intArea')) as cursor: for row in cursor: row[0] = row[1] * row[2] cursor.updateRow(row) #Dissolve the feature by the featureField (specified by the user) tempName = "Dissolve%s" % (index) arcpy.AddMessage("Dissolving the Feature Class by featureField.") dissolve = arcpy.Dissolve_management( intersectOutput, os.path.join(arcpy.env.scratchGDB, tempName), featureField, "intArea SUM;popCount SUM", "MULTI_PART") #Rename the fields that hold information on population and feature area #Create variable to hold population information pc_name = "popCount" + index fieldList = arcpy.ListFields(dissolve) for field in fieldList: if "sum_pop" in field.name.lower(): popVar = field.name popVar.encode('ascii', 'ignore') arcpy.AddField_management( os.path.join(arcpy.env.scratchGDB, tempName), pc_name, "DOUBLE") with arcpy.da.UpdateCursor( os.path.join(arcpy.env.scratchGDB, tempName), (pc_name, popVar)) as cursor: for row in cursor: row[0] = row[1] cursor.updateRow(row) arcpy.DeleteField_management( os.path.join(arcpy.env.scratchGDB, tempName), popVar) #Create variable to hold area information area_name = "AREAKM2" for field in fieldList: if "sum_int" in field.name.lower(): areaVar = field.name areaVar.encode('ascii', 'ignore') arcpy.AddField_management( os.path.join(arcpy.env.scratchGDB, tempName), area_name, "DOUBLE") with arcpy.da.UpdateCursor( os.path.join(arcpy.env.scratchGDB, tempName), (area_name, areaVar)) as cursor: for row in cursor: row[0] = row[1] cursor.updateRow(row) arcpy.DeleteField_management( os.path.join(arcpy.env.scratchGDB, tempName), areaVar) #Calculate the population density arcpy.AddMessage("Calculating population density...") popDens = "popDens" + index arcpy.AddField_management(os.path.join(arcpy.env.scratchGDB, tempName), popDens, "DOUBLE") with arcpy.da.UpdateCursor( os.path.join(arcpy.env.scratchGDB, tempName), (popDens, pc_name, area_name)) as cursor: for row in cursor: row[0] = row[1] / row[2] cursor.updateRow(row) #Remove all but the desired fields retainList = [pc_name, area_name, popDens] fieldMappings = arcpy.FieldMappings() fieldMappings.addTable(os.path.join(arcpy.env.scratchGDB, tempName)) for fld in fieldMappings.fields: if fld.name not in retainList: fieldMappings.removeFieldMap( fieldMappings.findFieldMapIndex(fld.name)) #If there is only one time point (ie, the POPCHG box in the GUI is not checked), run the next block of code #and output the feature class as the final shapefile. if index == "": #Output written to disk result = arcpy.FeatureClassToFeatureClass_conversion( os.path.join(arcpy.env.scratchGDB, tempName), os.path.dirname(outFC), os.path.basename(outFC), "", fieldMappings) #Layer feature is created so that the result automatically displays in viewer once tool successfully runs makeLayer = arcpy.MakeFeatureLayer_management( result, os.path.join(arcpy.env.scratchGDB, "FC_layer")) lyr = makeLayer.getOutput(0) mxd = arcpy.mapping.MapDocument("CURRENT") dataframes = arcpy.mapping.ListDataFrames(mxd)[0] arcpy.mapping.AddLayer(dataframes, lyr, "TOP") #If there are two time points (ie, the POPCHG box in the GUI is checked), run this elif statement #The elif statement will save timepoint 1 to the scratch geodatabase. elif index == "_1": arcpy.FeatureClassToFeatureClass_conversion( os.path.join(arcpy.env.scratchGDB, tempName), os.path.join(arcpy.env.scratchGDB), os.path.join(tempName_1), "", fieldMappings) arcpy.AddMessage("Beginning the second time point.") #If there are two time points (ie, the POPCHG box in the GUI is checked), run this else statement #The first step in the else statement is to find a field that will be used to join the two time points. else: tempName_2 = "PopDens_2" arcpy.FeatureClassToFeatureClass_conversion( os.path.join(arcpy.env.scratchGDB, tempName), os.path.join(arcpy.env.scratchGDB), os.path.join(tempName_2), "", fieldMappings) #Look to see if a field of OID type exists for the first time point feature class type_1 = arcpy.ListFields( os.path.join(arcpy.env.scratchGDB, tempName_1)) for field in type_1: if field.type == "OID": OID_1 = field.name break else: OID_1 = "" #Look tosee if a field of OID type exists for the second time point feature class type_2 = arcpy.ListFields( os.path.join(arcpy.env.scratchGDB, tempName_2)) for field in type_2: if field.type == "OID": OID_2 = field.name break else: OID_2 = "" #If both time points contain a field of OID type, then join the two feature classes using this field if OID_1 != "" and OID_2 != "": arcpy.AddMessage("Joining first and second time point.") arcpy.JoinField_management( os.path.join(arcpy.env.scratchGDB, tempName_1), OID_1, os.path.join(arcpy.env.scratchGDB, tempName_2), OID_2, retainList) #Calculate the change in population between the two time points arcpy.AddField_management( os.path.join(arcpy.env.scratchGDB, tempName_1), "PopGrowth", "DOUBLE") with arcpy.da.UpdateCursor( os.path.join(arcpy.env.scratchGDB, tempName_1), ("PopGrowth", "popCount_1", "popCount_2")) as cursor: for row in cursor: row[0] = ((row[2] - row[1]) / row[1]) * 100 cursor.updateRow(row) #Remove all but the desired fields retainList = [ "popCount_1", "AREAKM2", "popDens_1", "popCount_2", "popDens_2", "PopGrowth" ] fieldMappings = arcpy.FieldMappings() fieldMappings.addTable( os.path.join(arcpy.env.scratchGDB, tempName_1)) for fld in fieldMappings.fields: if fld.name not in retainList: fieldMappings.removeFieldMap( fieldMappings.findFieldMapIndex(fld.name)) #Output written to disk result = arcpy.FeatureClassToFeatureClass_conversion( os.path.join(arcpy.env.scratchGDB, tempName_1), os.path.dirname(outFC), os.path.basename(outFC), "", fieldMappings) #Layer feature is created so that the result automatically displays in viewer once tool successfully runs makeLayer = arcpy.MakeFeatureLayer_management( result, os.path.join(arcpy.env.scratchGDB, "FC_layer")) lyr = makeLayer.getOutput(0) mxd = arcpy.mapping.MapDocument("CURRENT") dataframes = arcpy.mapping.ListDataFrames(mxd)[0] arcpy.mapping.AddLayer(dataframes, lyr, "TOP") #If at least one of the time points did not contain a field of OID type, then a new field will be added to both time points. #This field will be used to join the two feature classes. else: iterateList = [ os.path.join(arcpy.env.scratchGDB, tempName_1), os.path.join(arcpy.env.scratchGDB, tempName_2) ] #Add a field of unique IDs to both time points. for i in iterateList: if i == iterateList[0]: arcpy.AddField_management(i, "join_field", "LONG") with arcpy.da.UpdateCursor(iterateList[0], "join_field") as cursor: for row in cursor: row[0] = autoIncrement() cursor.updateRow(row) del cursor else: arcpy.AddField_management(i, "join_field", "LONG") rec = 0 with arcpy.da.UpdateCursor(iterateList[-1], "join_field") as cursor: for row in cursor: row[0] = autoIncrement() cursor.updateRow(row) del cursor #Join the two time points using the newly created field arcpy.AddMessage("Joining first and second time point.") arcpy.JoinField_management( os.path.join(arcpy.env.scratchGDB, tempName_1), "join_field", os.path.join(arcpy.env.scratchGDB, tempName_2), "join_field") #Calculate the change in population between the two time points arcpy.AddField_management( os.path.join(arcpy.env.scratchGDB, tempName_1), "PopGrowth", "DOUBLE") with arcpy.da.UpdateCursor( os.path.join(arcpy.env.scratchGDB, tempName_1), ("PopGrowth", "popCount_1", "popCount_2")) as cursor: for row in cursor: row[0] = ((row[2] - row[1]) / row[1]) * 100 cursor.updateRow(row) #Remove all but te desired fields retainList = [ "popCount_1", "AREAKM2", "popDens_1", "popCount_2", "popDens_2", "PopGrowth" ] fieldMappings = arcpy.FieldMappings() fieldMappings.addTable( os.path.join(arcpy.env.scratchGDB, tempName_1)) for fld in fieldMappings.fields: if fld.name not in retainList: fieldMappings.removeFieldMap( fieldMappings.findFieldMapIndex(fld.name)) #Output written to disk result = arcpy.FeatureClassToFeatureClass_conversion( os.path.join(arcpy.env.scratchGDB, tempName_1), os.path.dirname(outFC), os.path.basename(outFC), "", fieldMappings) #Layer feature is created so that the result automatically displays in viewer once tool successfully runs makeLayer = arcpy.MakeFeatureLayer_management( result, os.path.join(arcpy.env.scratchGDB, "FC_layer")) lyr = makeLayer.getOutput(0) mxd = arcpy.mapping.MapDocument("CURRENT") dataframes = arcpy.mapping.ListDataFrames(mxd)[0] arcpy.mapping.AddLayer(dataframes, lyr, "TOP") except Exception: e = sys.exc_info()[1] arcpy.AddError('An error occurred: {}'.format(e.args[0]))
def crashReportLRS(GDBspot, csv, fatalwt, seriouswt, nonseriouswt, possiblewt, IntersectionThreshold, SegmentThreshold): # workspace= "Z:/fullerm/Safety Locations/Safety.gdb" # Input parameters # GCAT file/location GCATfile = csv # csv created after mapping fields with schemaCleaner # Intermediate file/location? # Intersection polygon file/location # IntersectionFeatures = arcpy.GetParameterAsText(1) # IntersectionFeatures = "Z:/fullerm/Safety Locations/Crash_Report_Script_Tool.gdb/Fed_Aid_2010_LucWoo_Intersection_Buffer_Dissolve" IntersectionFeatures = "Z:/fullerm/Safety Locations/Crash_Report_Script_Tool.gdb/LMW_intersection_250ft_buffer_5Jul2017_3857" psFeatures = "Z:/fullerm/Safety Locations/Crash_Report_Script_Tool.gdb/CS_IP_Merge_copy_clip_16june2017_LMW_3857" psThreshold = str(0) countyFeatures = "Z:/fullerm/Safety Locations/Crash_Report_Script_Tool.gdb/County_FLOWHHMS_Clipped_3857" # Segment polygon file/location # SegmentFeatures = arcpy.GetParameterAsText(2) SegmentFeatures = "Z:/fullerm/Safety Locations/Crash_Report_Script_Tool.gdb/LMW_segments_70ft_buffer_5Jul2017_3857" # output file name/location for the spatial join # GDBspot = arcpy.GetParameterAsText(1) # user input location for gdb and result excel tables # psThreshold = arcpy.GetParameterAsText(8) # output file name/location for excel table # TableFolder = arcpy.GetParameterAsText(4) # rdinv = arcpy.GetParameterAsText(8) rdinv = "C:/Users/fullerm/Documents/ArcGIS/Projects/Safety Report Script/Safety Report Script.gdb/Road_Inventory_CopyFeatures" # create geodatabase TimeDate = datetime.now() TimeDateStr = "CrashLocations" + TimeDate.strftime('%Y%m%d%H%M') + "_LRS" outputGDB = arcpy.CreateFileGDB_management(GDBspot, TimeDateStr) arcpy.env.workspace = str(outputGDB).replace('/', '\\') # I kept getting errors because arcmap sets the field type based on first dozen features and some ids were numeric '''fldmppng = arcpy.FieldMappings() fldmppng.addTable(GCATfile) nameFI = fldmppng.findFieldMapIndex("LOCAL_REPORT_NUMBER_ID") fldmp = fldmppng.getFieldMap(nameFI) fld = fldmp.outputField fld.name = "LOCAL_REPORT_NUMBER_ID" fld.aliasName = "LOCAL_REPORT_NUMBER_ID" fld.type = "String" fldmp.outputField = fld fldmppng.replaceFieldMap(nameFI,fldmp)''' # convert GCAT txt file to gdb table and add to map NewTable = arcpy.TableToTable_conversion(GCATfile, outputGDB, "OHMI_data") arcpy.TableSelect_analysis(NewTable, "oh_table", '"NLF_COUNTY_CD" <> \'Monroe\' ') ohTable = arcpy.CopyRows_management("oh_table", "ohTable") arcpy.TableSelect_analysis(NewTable, "mi_table", '"NLF_COUNTY_CD" = \'Monroe\' ') miTable = arcpy.CopyRows_management('mi_table', 'miTable') # arcpy.SelectLayerByAttribute_management(NewTable,"CLEAR_SELECTION") rdlyr = arcpy.MakeFeatureLayer_management(rdinv, "rdlyr") rtloc = os.path.join( GDBspot, "Road_Inventory3456_CreateRoutes" + TimeDateStr + ".shp") lrs = arcpy.CreateRoutes_lr(rdlyr, "NLF_ID", rtloc, "TWO_FIELDS", "CTL_BEGIN", "CTL_END") event_props = "NLFID POINT COUNTY_LOG_NBR" PointFile = arcpy.MakeRouteEventLayer_lr(lrs, "NLF_ID", ohTable, event_props, "Crash_Events") # creating this extra feature class and working from it instead of the event layer # decreased script tool runtime from ~8 min to ~2 min arcpy.SelectLayerByAttribute_management(PointFile, "clear_selection") pointOH = arcpy.FeatureClassToFeatureClass_conversion( PointFile, outputGDB, "GCAT_LUCWOO_lrs_points_" + TimeDateStr) # pointOH = arcpy.CopyFeatures_management(PointFile, "LRS_Events_copy") mi_points = milocationsxy(miTable, outputGDB) pointcopy = arcpy.Merge_management([pointOH, mi_points], 'miohpointsmerge') dict = { 'fatalities_count': "ODPS_TOTAL_FATALITIES_NBR<>0", 'incapac_inj_count': "Incapac_injuries_NBR<>0 and ODPS_TOTAL_FATALITIES_NBR=0", 'non_incapac_inj_count': "non_incapac_injuries_NBR<>0 and ODPS_TOTAL_FATALITIES_NBR=0 and incapac_injuries_nbr=0", 'possible_inj_count': "possible_injuries_nbr<>0 and ODPS_TOTAL_FATALITIES_NBR=0 and non_incapac_injuries_nbr=0 and incapac_injuries_nbr=0" } fld_lst = [ 'SEVERITY_BY_TYPE_CD', 'fatalities_count', 'incapac_inj_count', 'non_incapac_inj_count', 'possible_inj_count' ] # add fields for point layer for key in dict: arcpy.AddField_management(pointcopy, key, "LONG") '''arcpy.SelectLayerByAttribute_management(PointFile, "NEW_SELECTION", dict[key]) arcpy.CalculateField_management(PointFile, key, 1) arcpy.SelectLayerByAttribute_management(PointFile, "Switch_selection") arcpy.CalculateField_management(PointFile, key, 0)''' # fillCountFields(pointcopy, fld_lst) with arcpy.da.UpdateCursor(pointcopy, fld_lst) as cursor: for row in cursor: if row[0] == 'Fatal Crashes': row[1] = 1 row[2] = 0 row[3] = 0 row[4] = 0 elif row[0] == 'Incapacitating Injury Crashes': row[1] = 0 row[2] = 1 row[3] = 0 row[4] = 0 elif row[0] == 'Non-Incapacitating Injury Crashes': row[1] = 0 row[2] = 0 row[3] = 1 row[4] = 0 elif row[0] == 'Possible Injury Crashes': row[1] = 0 row[2] = 0 row[3] = 0 row[4] = 1 else: row[1] = 0 row[2] = 0 row[3] = 0 row[4] = 0 cursor.updateRow(row) # Clear Selected Features arcpy.SelectLayerByAttribute_management(PointFile, "clear_selection") # PointFeatures2 = arcpy.CopyFeatures_management(PointFeatures,os.path.join(GDBspot, TimeDateStr + ".gdb\PointFeatures2")) PointFeatures = arcpy.FeatureClassToFeatureClass_conversion( pointcopy, outputGDB, "ohmi_points_copy" + TimeDateStr) ftype = { 'Intersection': [IntersectionThreshold, IntersectionFeatures], 'Segment': [SegmentThreshold, SegmentFeatures], 'Subdivision': [psThreshold, psFeatures] } # field map and merge rules attchmnt = [] writer = pandas.ExcelWriter(os.path.join(GDBspot, "Top_Locations.xlsx"), engine='xlsxwriter') for f in ftype: # Create a new fieldmappings and add the two input feature classes. fieldmappings = arcpy.FieldMappings() fieldmappings.addTable(ftype[f][1]) fieldmappings.addTable(PointFeatures) # First get the fieldmaps. POP1990 is a field in the cities feature class. # The output will have the states with the attributes of the cities. Setting the # field's merge rule to mean will aggregate the values for all of the cities for # each state into an average value. The field is also renamed to be more appropriate # for the output. addSumFlds(fieldmappings) # Run the Spatial Join tool, using the defaults for the join operation and join type loc = os.path.join(GDBspot, TimeDateStr + ".gdb\\" + f + "Join_LRS") Join = arcpy.SpatialJoin_analysis(ftype[f][1], PointFeatures, loc, "Join_one_to_one", "keep_all", fieldmappings) arcpy.AddField_management(Join, "PDO_", "LONG") arcpy.AddField_management(Join, "EPDO_Index", "DOUBLE") # CRLRS_EPDO_index(Join) CursorFlds = [ 'PDO_', 'EPDO_Index', 'Join_Count', 'sum_fatalities_count', 'sum_incapac_inj_count', 'sum_non_incapac_inj_count', 'sum_possible_inj_count' ] # determine PDO and EPDO Index/Rate with arcpy.da.UpdateCursor(Join, CursorFlds) as cursor: for row in cursor: try: row[0] = row[2] - int(row[3]) - int(row[4]) - int( row[5]) - int(row[6]) except: row[0] = 0 # null or divide by zero are the major exceptions we are handling here try: row[1] = (float(row[3]) * fatalwt + float(row[4]) * seriouswt + float(row[5]) * nonseriouswt + float(row[6]) * possiblewt + float(row[0])) / float(row[2]) except: row[1] = 0 # null or divide by zero are the major exceptions we are handling here cursor.updateRow(row) # delete unnecessary fields keepFlds = [ 'OBJECTID', 'Shape', 'Shape_Area', 'Shape_Length', 'Name', 'NAMELSAD', 'COUNTY', 'COUNTY_NME', 'Join_Count', 'sum_fatalities_count', 'sum_incapac_inj_count', 'sum_non_incapac_inj_count', 'sum_possible_inj_count', 'PDO_', 'EPDO_Index', 'Fed_Aid_Buffer_Segments_2_Name', 'Length_ft', 'County' ] # lstFlds = arcpy.ListFields(Join) dropFlds = [ x.name for x in arcpy.ListFields(Join) if x.name not in keepFlds ] # delete fields arcpy.DeleteField_management(Join, dropFlds) # select high crash locations JoinLayer = arcpy.MakeFeatureLayer_management( Join, os.path.join(GDBspot, TimeDateStr + ".gdb\\" + f + "JoinLayer")) arcpy.AddMessage("{}".format(type(JoinLayer))) # arcpy.SelectLayerByAttribute_management(JoinLayer, "NEW_SELECTION", "Join_Count >=" + ftype[f][0]) fld_nmes = [fld.name for fld in arcpy.ListFields(JoinLayer)] fld_nmes.remove( 'Shape' ) # I think this field kept causing an exception: Data must be 1 dimensional arcpy.AddMessage("{}".format(fld_nmes)) arcpy.AddMessage("{}".format( type( os.path.join(GDBspot, TimeDateStr + ".gdb\\" + f + "JoinLayer")))) # do this because political sud # fields can be list or tuple, list works when 'Shape' field removed n = arcpy.da.FeatureClassToNumPyArray(JoinLayer, fld_nmes, where_clause="Join_Count >=" + ftype[f][0], skip_nulls=False, null_value=0) df = pandas.DataFrame(n) CRLRS_excel_export(df, f, writer) writer.save() return os.path.join(GDBspot, "Top_Locations.xlsx")
outfield5 = "DataSourceID" outfield6 = "Symbol" outfield7 = "Notes" print (("Adding " + featureclassin + " field map to. . .")) # Create a fieldmappings object and two fieldmap objects # input1 = arcpy.FieldMap() input2 = arcpy.FieldMap() input3 = arcpy.FieldMap() input4 = arcpy.FieldMap() input5 = arcpy.FieldMap() input6 = arcpy.FieldMap() input7 = arcpy.FieldMap() fieldmappings = arcpy.FieldMappings() # Add input fields # to fieldmap object. # input1.addInputField(inFC,infield1) input2.addInputField(inFC,infield2) input3.addInputField(inFC,infield3) input4.addInputField(inFC,infield4) input5.addInputField(inFC,infield5) input6.addInputField(inFC,infield6) input7.addInputField(inFC,infield7) # Set the Name of the Field output from this field map. # output1 = input1.outputField
rename_fields(out_group_table,names_dict,clear_alias) # Join to PPA summary feature class created as result of Sumarize Within step join_args = {'in_layer_or_view': out_feature_class, 'in_field': 'Join_ID', 'join_table': out_group_table, 'join_field': 'Join_ID', 'join_type': 'KEEP_ALL' } ppa_elig_join = arcpy.AddJoin_management(**join_args) # Copy joined table to file geodatabase join_output_location = r'\\Mac\Home\Documents\Planning\Growth_Framework_Analysis\Growth_Framework_Analysis_Areas.gdb' join_output_table = r'Draft_Regional_PPA_2019_Eligibility_Analysis' elg_analysis_field_mapping = arcpy.FieldMappings() elg_analysis_final_fields = ['County','Jurisdiction','PPA_Name','Total_Criteria','Acres_Intersect','Percent_Intersect'] elg_analysis_field_mapping.addTable(ppa_elig_join) issolate_fields(elg_analysis_final_fields, elg_analysis_field_mapping) #arcpy.TableToTable_conversion(pda_elig_join, join_output_location, join_output_table, None, elg_analysis_field_mapping) fc_to_fc_args = {'in_features': pda_elig_join, 'out_path':join_output_location, 'out_name':join_output_table, 'where_clause': None, 'field_mapping':elg_analysis_field_mapping }
row[3] = 0 cursor.updateRow(row) # Spatial join between the buffer and the vugis_point where BSM_INDICE is Summed. # The list of feature which need a spatial join fc = ['in_memory\\buffer250', 'in_memory\\buffer150', 'in_memory\\buffer50'] x = 0 for feature in fc: # Create the necessary FieldMap and FieldMappings objects fm1 = arcpy.FieldMap() # Le FieldMap de la couche en entré fm2 = arcpy.FieldMap() # Le fieldMap de la couche joint fms = arcpy.FieldMappings() # L'object FieldMapping ## NOTE A MOI-MEME, penser à recommender au Responsable R&D de chez ESRI ## de ne plus balancer du LSD dans le café des ingénieurs avant 17h. # Each field are added to the fieldmap fm1 fm1.addInputField(feature, 'GENRE_ROUTE') fm1.addInputField(feature, 'NO_ACCIDENT') fm1.addInputField(feature, 'LOCALITE') # Add the BSM_INDICE field to the second FieldMap object fm2.addInputField(vugis_copy, "BSM_INDICE") # Set the merge rule to find the Sum of all fields in the # FieldMap object fm2
def __copyFeatures(self, out_workspace, output_fields=None, data_type="FEATURE_CLASS"): '''Copies inputs to a new feature class preserving the objectids and output_fields if present on the input. This is used to make a copy of the input feature sets before modifying them. output_fields is a list of field names. If output_fields is None only OID and Shape field from inputs is preserved as "ORIG_FID". output_fields should NOT contain shape field. data_type can be 'FEATURE_CLASS or TABLE and determines if copy is made as feature class or table''' if self.count == 0: self.isCopy = False return copy_functions = { "FEATURE_CLASS": arcpy.conversion.FeatureClassToFeatureClass, "TABLE": arcpy.conversion.TableToTable } if data_type in copy_functions: copy_function = copy_functions[data_type] else: raise arcpy.ExecuteError( "unsupported data type {0} for copying inputs".format( data_type)) input_oid_field_name = self.describeObject.oidFieldName #Convert input field names to upper case to perform a case insensitive exists check input_field_names = [f.upper() for f in self.fieldNames] #Create a unique name for the inputs in the out_workspace unique_name = "TempInput" output_feature_class = arcpy.CreateUniqueName(unique_name, out_workspace) output_feature_class_name = os.path.basename(output_feature_class) #Create a field mappings to only transfer output_fields present on input_features field_mappings = arcpy.FieldMappings() #Transfer the OID field from input as ORIG_ID if self.describeObject.hasOID: oid_field_map = arcpy.FieldMap() try: oid_field_map.addInputField(self.inputFeatures, input_oid_field_name) except Exception as ex: oid_field_map.addInputField(self.describeObject.catalogPath, input_oid_field_name) output_fld = oid_field_map.outputField output_fld.name = "ORIG_FID" output_fld.aliasName = "ORIG_FID" oid_field_map.outputField = output_fld field_mappings.addFieldMap(oid_field_map) for fld in output_fields: #Skip if field with name SHAPE exists as copy will copy shape field. ucase_fld_name = fld.upper() if ucase_fld_name == "SHAPE": continue if ucase_fld_name in input_field_names: field_map = arcpy.FieldMap() try: field_map.addInputField(self.inputFeatures, fld) except Exception as ex: field_map.addInputField(self.describeObject.catalogPath, fld) field_mappings.addFieldMap(field_map) #Make sure we don't use arcpy.env.extent or arcpy.env.outputCoordinateSystem when making #a copy as we want all features from inputs and in same spatial reference. orig_extent = arcpy.env.extent orig_outsr = arcpy.env.outputCoordinateSystem arcpy.env.extent = None arcpy.env.outputCoordinateSystem = None try: copy_function(self.inputFeatures, out_workspace, output_feature_class_name, field_mapping=field_mappings) finally: arcpy.env.extent = orig_extent arcpy.env.outputCoordinateSystem = orig_outsr #Update the describe object self.isCopy = True self.describeObject = arcpy.Describe(output_feature_class)