def construct_sql_queries_from_time_bin(nested_time_bin_pairs, dataSource, start_time_field, end_time_field=None): """Takes in nested time bin pairs and constructed ESRI file formatted SQL queries to extract data between the two date time pairs of each bin. Returns a list of SQL queries based on the time bins. """ if end_time_field is None: end_time_field = start_time_field QueryList = [] time_format = "%Y-%m-%d %H:%M:%S" prepended_sql_time = "date " start_field = arcpy.AddFieldDelimiters(dataSource, start_time_field) end_field = arcpy.AddFieldDelimiters(dataSource, end_time_field) for bin in nested_time_bin_pairs: start_time = bin[0] end_time = bin[1] start_string = start_time.strftime(time_format) end_string = end_time.strftime(time_format) SQLQuery = "{0} >= {1} '{2}' AND {3} < {4} '{5}'".format( start_field, prepended_sql_time, start_string, end_field, prepended_sql_time, end_string) QueryList.append(SQLQuery) return QueryList
def addOrUpdateProject(self, project_Id, UID, project_AOI): # Where ProjectID = '<project_ID>' where_clause = "{} = '{}'".format( arcpy.AddFieldDelimiters(self.fclass, field_QC_ProjID), project_Id) row = [UID, project_Id, project_AOI] Utility.addOrUpdateRecord(in_table=self.fclass, field_names=self.fields, uidIndex=self.uid_index, where_clause=where_clause, rowValueList=row) return row
def BuildSQL(inputFC, field, value): # Add appropriate delimiters fieldDelim = arcpy.AddFieldDelimiters(inputFC, field) # determine field type fieldType = arcpy.ListFields(inputFC, field)[0].type # Add single quotes for string types if str(fieldType) == 'String': value = "'{0}'".format(value) whereClause = "{0} = {1}".format(fieldDelim, value) return (True, whereClause)
def deleteParentAPN(table, apn): # tempList = [] for i in range(len(apn)): expression = "{}='{}'".format( arcpy.AddFieldDelimiters(master_18_19, 'APN'), apn[i]) with arcpy.da.UpdateCursor(table, ["*"], where_clause=expression) as cursor: for delRow in cursor: # tempList.append(apn[1]) print "Deleting:\n {}".format(delRow) # cursor.deleteRow() del cursor
def buildTableList(table, missingAPNs): field_names = [f.name for f in arcpy.ListFields(table)] fields = field_names[1:] tableList = [] for missingValues in missingAPNs: with arcpy.da.SearchCursor(table, field_names=fields, where_clause="{} = '{}'".format( arcpy.AddFieldDelimiters( table, 'pl_csa_apn'), missingValues)) as cursor: for row in cursor: tableList.append(row) return tableList
def get_trib_confluences(fcInputPoints): """ Find and selects network nodes that are tributary confluences and separates these points out into a new point shapefile. :param fcInputPoints: point shapefile (nodes from stream network) :return: point shapefile representing only tributary confluences """ test_field(fcInputPoints, "_nodetype_") sql = "{0} = 'TC' OR {0} = 'HC'".format( arcpy.AddFieldDelimiters(fcInputPoints, "_nodetype_")) arcpy.SelectLayerByAttribute_management(fcInputPoints, "NEW_SELECTION", sql) nodesTC = "in_memory\\nodes_tc" arcpy.CopyFeatures_management(fcInputPoints, nodesTC) return nodesTC
def findBetween68and100(db): expression = "{} = {}".format(arcpy.AddFieldDelimiters(db, 'District_ID'), 681852) levyCounter = 0 with arcpy.da.UpdateCursor(db, ['APN', 'Levy', 'MaxTax', 'tract'], where_clause=expression) as levyCursor: for values in levyCursor: apn = values[0] levy = values[1] maxTax = values[2] tract = values[3] if 68.82 < levy < 100: levyCounter += 1 print '{}:\n\t{}\t{}\t{}'.format(apn, levy, maxTax, tract) print levyCounter
def SelectRandomByCount(layer, count): layerCount = int(arcpy.GetCount_management(layer).getOutput(0)) if layerCount < count: print "input count is greater than layer count" print "number of selection: " + str(arcpy.GetCount_management(layer)) return oids = [oid for oid, in arcpy.da.SearchCursor(layer, "OID@")] oidFldName = arcpy.Describe(layer).OIDFieldName path = arcpy.Describe(layer).path delimOidFld = arcpy.AddFieldDelimiters(path, oidFldName) randOids = random.sample(oids, count) oidsStr = ", ".join(map(str, randOids)) sql = "{0} IN ({1})".format(delimOidFld, oidsStr) arcpy.SelectLayerByAttribute_management(layer, "", sql) print "number of selection: " + str(arcpy.GetCount_management(layer))
def getJobAoi(jobId): if jobId not in shape.keys(): in_table = os.path.join(SDE_WMX_FILE_PATH, WMX_AOI_FC) field_names = ["SHAPE@"] uidIndex = None where_clause = "{} = {}".format( arcpy.AddFieldDelimiters(in_table, "JOB_ID"), jobId) # arcpy.AddMessage(where_clause) aoi = getExistingRecord(in_table, field_names, uidIndex, where_clause)[0] # arcpy.AddMessage(aoi[0]) shape[jobId] = aoi[0] arcpy.AddMessage("Job {} AOI: {}".format(jobId, shape[jobId])) return shape[jobId]
def calculateCountToIdentity(inputAOI, outputTheissen,endIdentPloy): arcpy.AddMessage("Calculate density start ...") outputWorkspace = os.path.dirname(outputTheissen) arcpy.env.scratchWorkspace = os.path.dirname(outputTheissen) arcpy.env.workspace = os.path.dirname(outputTheissen) IdentLayer = "IdentLayer" arcpy.Identity_analysis(inputAOI,outputTheissen,IdentLayer) arcpy.Select_analysis(outputTheissen,"tempCopy") arcpy.AddField_management("tempCopy","count_","LONG") fields = ('hexagonID', 'count_') arcpy.CalculateField_management("tempCopy","count_","0") values = [s[0] for s in arcpy.da.SearchCursor(IdentLayer, ("hexagonID"))] uniqueValues = set(values) arcpy.SetProgressor("step", "Calculate...", 0,len(uniqueValues), 1) for uv in uniqueValues: f = 0 for v in values: if uv == v: f = f + 1 expression = arcpy.AddFieldDelimiters("tempCopy", "hexagonID") + " = %d"%int(uv) with arcpy.da.UpdateCursor("tempCopy", fields,expression) as cursor: for row in cursor: if int(row[0]) == int(uv): row[1] = long(f) cursor.updateRow(row) arcpy.SetProgressorPosition() expression2 = arcpy.AddFieldDelimiters("tempCopy", "count_") + " > 0" arcpy.Select_analysis("tempCopy",endIdentPloy,expression2) arcpy.Delete_management("tempCopy") arcpy.Delete_management("IdentLayer") arcpy.AddMessage("Calculate density success ...")
def buildWhereClause(table, field, value): # Add DBMS-specific field delimiters fieldDelimited = arcpy.AddFieldDelimiters(table, field) # Determine field type fieldType = arcpy.ListFields(table, field)[0].type # Add single-quotes for string field values if str(fieldType) == 'String': value = "'%s'" % value # Format WHERE clause whereClause = "%s = %s" % (fieldDelimited, value) return whereClause
def testcreate_pie_chart(self): tab = fc = os.path.join(self.testing_gdb, 'Illinois_county_info') oid = arcpy.AddFieldDelimiters(tab, arcpy.Describe(tab).OIDFieldName) where = '{0} < 11'.format(oid) tv = arcpy.MakeTableView_management(tab, 'IL_table', where) fig = os.path.join(self.testingfolder, 'IL_county_pop.png') # will use 'CNTY_FIPS' as case field since our pop field is # already populated for each county ap.create_pie_chart(fig, tv, 'NAME','POP2000', 'IL Counties') self.assertTrue(os.path.exists(fig)) #### try: #### arcpy.Delete_management(fig) # may want to look at the figure, pretty cool! #### except: #### pass pass
def execute(self, parameters, messages): """The source code of the tool.""" in_drainage_line = parameters[0].valueAsText out_csv_file = parameters[1].valueAsText in_max_nbr_upstreams = parameters[2].value with open(out_csv_file, 'w') as csvfile: connectwriter = csv.writer(csvfile, dialect='excel') fields = ['HydroID', 'NextDownID'] list_all = [] max_count_Upstream = 0 '''The script line below makes sure that rows in the output connectivity file are arranged in ascending order of HydroIDs of stream segements''' for row in sorted(arcpy.da.SearchCursor(in_drainage_line, fields)): expression = arcpy.AddFieldDelimiters( arcpy.Describe(in_drainage_line).catalogPath, 'NextDownID') + '=' + str(row[0]) list_upstreamID = [] # find the HydroID of the upstreams with arcpy.da.SearchCursor(in_drainage_line, fields, where_clause=expression) as cursor: for row2 in cursor: list_upstreamID.append(row2[0]) # count the total number of the upstreams count_upstream = len(list_upstreamID) if count_upstream > max_count_Upstream: max_count_Upstream = count_upstream # replace the nextDownID with 0 if it equals to -1 (no next downstream) nextDownID = row[1] if nextDownID == -1: nextDownID = 0 # append the list of Stream HydroID, NextDownID, Count of Upstream ID, and HydroID of each Upstream into a larger list list_all.append([row[0], nextDownID, count_upstream] + list_upstreamID) # If the input maximum number of upstreams is none, the actual max number of upstreams is used if in_max_nbr_upstreams == None: in_max_nbr_upstreams = max_count_Upstream for row_list in list_all: out = row_list + [ 0 for i in range(in_max_nbr_upstreams - row_list[2]) ] connectwriter.writerow(out) return
def buildWhereClause(table, field, value): """Constructs a SQL WHERE clause to select rows having the specified value within a given field and table.""" # Add DBMS-specific field delimiters fieldDelimited = arcpy.AddFieldDelimiters(table, field) # Determine field type fieldType = arcpy.ListFields(table, field)[0].type # Add single-quotes for string field values if str(fieldType) == 'String': value = "'%s'" % value # Format WHERE clause whereClause = "%s = %s" % (fieldDelimited, value) return whereClause
def get_streetlight_count(road_name, distance): ws = os.path.dirname(roads_cl_fc) field = arcpy.AddFieldDelimiters(ws, 'ROAD_NAME_') if road_name not in _get_unique_value(roads_cl_fc, 'ROAD_NAME_'): print(f"{road_name} doesn t exist") wc = f"{field} = '{road_name.upper()}'" select_road_name = arcpy.management.SelectLayerByAttribute(roads_cl_fc, 'NEW_SELECTION', where_clause=wc) select_street_lights = arcpy.management.SelectLayerByLocation( streetlight_fc, 'WITHIN_A_DISTANCE', select_road_name, distance, "SUBSET_SELECTION") number_of_selected = arcpy.GetCount_management(select_street_lights) return number_of_selected
def create_percent_access_polys(raw_cell_counts, percents, out_fc, fields_to_preserve, scratch_workspace): '''For each percent threshold, dissolve the cells where the number of times reached exceeds the threshold. Each threshold gets its own polygon, and they are all output to the same feature class. Params: raw_cell_counts: Feature class of cell-like polygons with counts generated from create_raw_cell_counts_fc() count_field: The field in raw_cell_counts containing the number of times the cell was reached percents: List of percents to calculate results for. Example: 80 means crate a polygon representing the area that could be reached for at least 80% of start times. num_time_steps: The total number of time steps present in the input time lapse polygon dataset out_fc: Path of the output feature class for storing the percent access polygons ''' first = True temp_out_dissolve_fc = os.path.join(scratch_workspace, "Temp_" + guid + "_Dissolve") for percent in sorted(percents): # Select all the cells where the number of times with access is >= our percent threshold # The result is all the cells that are reachable at least X% of start times query = arcpy.AddFieldDelimiters(raw_cell_counts, "Percent") + " >= " + str(percent) percent_layer = arcpy.management.MakeFeatureLayer( raw_cell_counts, "PercentLayer", query).getOutput(0) # Dissolve everything that meets the threshold into one polygon if first: out_Dissolve = out_fc else: out_Dissolve = temp_out_dissolve_fc arcpy.management.Dissolve(percent_layer, out_Dissolve, fields_to_preserve) percent_field = "Percent" arcpy.management.AddField(out_Dissolve, percent_field, "DOUBLE") arcpy.management.CalculateField(out_Dissolve, percent_field, str(percent)) if not first: # If this wasn't the first percent output, append it to the master output fc arcpy.management.Append(out_Dissolve, out_fc, "TEST") first = False # Clean up temporary output if arcpy.Exists(temp_out_dissolve_fc): arcpy.management.Delete(temp_out_dissolve_fc)
def BuildQuery(featureClass,whereField,value): """Builds a valid query string for finding all records in featureclass that have value in whereField. Building a valid query is a pain. We need to correctly delimit the field name (with "" or []) depending on the database and correctly delimit the value based on the field type Does not correctly handle delimiting datetime data types. This will throw an exception if whereField is not in featureClass.""" field = arcpy.AddFieldDelimiters(featureClass, whereField) fields = arcpy.ListFields(featureClass) type = [f.type.lower() for f in fields if f.name == whereField] if type and type[0] == "string": quote = "'" else: quote = "" return field + " = " + quote + str(value) + quote
def SelectRandomByPercent(layer, layer2, percent): fc = arcpy.Describe(layer).catalogPath featureCount = float(arcpy.GetCount_management(fc).getOutput(0)) count = int(featureCount * float(percent) / float(100)) if not count: arcpy.SelectLayerByAttribute_management(layer, "CLEAR_SELECTION") return oids = [oid for oid, in arcpy.da.SearchCursor(fc, "OID@")] oidFldName = arcpy.Describe(layer).OIDFieldName delimOidFld = arcpy.AddFieldDelimiters(layer, oidFldName) randOids = random.sample(oids, count) oidsStr = ", ".join(map(str, randOids)) sql = "{0} IN ({1})".format(delimOidFld, oidsStr) output1 = arcpy.SelectLayerByAttribute_management(layer, "", sql) sql = "{0} NOT IN ({1})".format(delimOidFld, oidsStr) output2 = arcpy.SelectLayerByAttribute_management(layer2, "", sql) return output1, output2
def updateProjIDAOI(project_id, row, aoi, fclass, fields, uid_index): # Where WMXJobID = '<Job_ID>' where_clause = "{} = '{}'".format( arcpy.AddFieldDelimiters(fclass, field_Contract_ProjID), project_id) index = fields.index(field_ProjectJob_SHAPE) row = list(row) if aoi is not None: row[index] = aoi Utility.addOrUpdateRecord(in_table=fclass, field_names=fields, uidIndex=uid_index, where_clause=where_clause, rowValueList=row) return row
def updateJobAOI(self, project_row, project_AOI): wmx_job_id = self.getWMXJobID(project_row) # Where WMXJobID = '<Job_ID>' where_clause = "{} = {}".format( arcpy.AddFieldDelimiters(self.fclass, field_ProjectJob_WMXJobID), wmx_job_id) project_row = list(project_row) index = self.fields.index(field_ProjectJob_SHAPE) project_row[index] = project_AOI Utility.addOrUpdateRecord(in_table=self.fclass, field_names=self.fields, uidIndex=self.uid_index, where_clause=where_clause, rowValueList=project_row) return project_row
def buildWhereClauseFromList(table, field, valueList): """Takes a list of values and constructs a SQL WHERE clause to select those values within a given field and table.""" # Add DBMS-specific field delimiters fieldDelimited = arcpy.AddFieldDelimiters(arcpy.Describe(table).path, field) # Determine field type fieldType = arcpy.ListFields(table, field)[0].type # Add single-quotes for string field values if str(fieldType) == 'String': valueList = ["'%s'" % value for value in valueList] # Format WHERE clause in the form of an IN statement whereClause = "%s IN(%s)" % (fieldDelimited, ', '.join(map(str, valueList))) return whereClause
def CreateMasterMosaicDatasets(wmxJobID): Utility.printArguments(["wmxJobID"], [wmxJobID], "B02 CreateMasterMosaicDatasets") Utility.setWMXJobDataAsEnvironmentWorkspace(wmxJobID) MDMaster = CMDR.MDMaster() mdMaster_row = MDMaster.getMDMaster(wmxJobID) parent_path = MDMaster.getMDParentPath(mdMaster_row) mdMaster_path = MDMaster.getMDPath(mdMaster_row) MasterMDName = MDMaster.getMDName(mdMaster_row) MasterMDCellSize_Meters = MDMaster.getMDCellSize(mdMaster_row) # mdMaster_aoi = MDMaster.getMDAOI(mdMaster_row) if arcpy.Exists(parent_path): if not os.path.exists(mdMaster_path): os.makedirs(mdMaster_path) # master_fgdb_path = os.path.join(mdMaster_path, MasterMDName) md_list = [FoldersConfig.DTM, FoldersConfig.DSM, FoldersConfig.DLM, FoldersConfig.DHM, FoldersConfig.DCM, FoldersConfig.INT] for md_name in md_list: local_fgdb_name = "{}_{}.gdb".format(MasterMDName, md_name) arcpy.AddMessage("local_fgdb_name '{}'".format(local_fgdb_name)) local_fgdb_path = os.path.join(mdMaster_path, local_fgdb_name) arcpy.AddMessage("local_fgdb_path '{}'".format(local_fgdb_path)) if not os.path.exists(local_fgdb_path): arcpy.AddMessage("creating MD master fGDB '{} / {}'".format(mdMaster_path, local_fgdb_name)) arcpy.CreateFileGDB_management(mdMaster_path, local_fgdb_name) Utility.addToolMessages() where_clause = "{} = {}".format(arcpy.AddFieldDelimiters(MDMaster.fclass, CMDRConfig.field_MDMaster_WMXJobID), wmxJobID) # mdMasterLayer = "MDMasterLayer" # arcpy.MakeFeatureLayer_management(in_features= MDMaster.fclass, out_layer = mdMasterLayer, where_clause=where_clause) local_fgdb_MDMasterFC = os.path.join(local_fgdb_path, MasterMDName) if not arcpy.Exists(local_fgdb_MDMasterFC): arcpy.FeatureClassToFeatureClass_conversion (in_features=MDMaster.fclass, out_path=local_fgdb_path, out_name=MasterMDName, where_clause=where_clause) CreateMasterMosaicDataset(local_fgdb_path, md_name, local_fgdb_MDMasterFC, MasterMDCellSize_Meters) else: arcpy.AddError("MD Master parent path doesn't exist '{}'. Cannot continue.".format(parent_path))
def roadtoheat(site, inshp, res, kernel_dir, keyw, inFID, heatfield, sitedic, snapras, outdir): expr = """{0} = {1}""".format(arcpy.AddFieldDelimiters(inshp, inFID), str(site)) print(expr) arcpy.MakeFeatureLayer_management(in_features=inshp, out_layer='lyr') arcpy.SelectLayerByAttribute_management('lyr', selection_type='NEW_SELECTION', where_clause=expr) #print(site) nshp = len( [row[0] for row in arcpy.da.SearchCursor('lyr', [inFID])] ) #int(arcpy.GetCount_management('lyr').getOutput(0)) would be faster but often has a bug #print(nshp) if nshp > 0: #print('{} features'.format(nshp)) arcpy.ResetEnvironments() arcpy.env.extent = sitedic[site] arcpy.env.snapRaster = snapras outras = os.path.join(outdir, 'hpmstiger_{0}{1}.tif'.format(heatfield, site)) if not arcpy.Exists(outras): print('{} does not exist, generate heatmap'.format(outras)) tmpdir = os.path.join(os.path.dirname(outdir), 'tmp_{}'.format(str(site))) os.mkdir(tmpdir) arcpy.env.scratchWorkspace = tmpdir arcpy.PolylineToRaster_conversion('lyr', value_field=heatfield, out_rasterdataset=outras, priority_field=heatfield, cellsize=res) customheatmap(kernel_dir=kernel_dir, in_raster=outras, scratch_dir=tmpdir, out_gdb=outdir, out_var=heatfield + str(site), divnum=100, keyw=keyw, ext='.tif', verbose=True) arcpy.Delete_management(tmpdir) arcpy.SelectLayerByAttribute_management( 'lyr', selection_type='CLEAR_SELECTION') #might not be necessary arcpy.Delete_management('lyr') #might not be necessary
def get_complete_cases(workspace, in_shapefile, out_shapefile, csv_file): # after http://desktop.arcgis.com/en/arcmap/10.4/tools/data-management-toolbox/delete-features.htm # Set environment settings arcpy.env.workspace = workspace # Set local variables inFeatures = in_shapefile outFeatures = out_shapefile tempLayer = "tempLayer" expression = arcpy.AddFieldDelimiters(tempLayer, "namrad_th") + " < 0" csv_delimeter = 'COMMA' # Execute CopyFeatures to make a new copy of the feature class arcpy.CopyFeatures_management(inFeatures, outFeatures) # Execute MakeFeatureLayer arcpy.MakeFeatureLayer_management(outFeatures, tempLayer) # Execute SelectLayerByAttribute to determine which features to delete arcpy.SelectLayerByAttribute_management(tempLayer, "NEW_SELECTION", expression) # Execute GetCount and if some features have been selected, then # execute DeleteFeatures to remove the selected features. if int(arcpy.GetCount_management(tempLayer).getOutput(0)) > 0: arcpy.DeleteFeatures_management(tempLayer) #os.remove(workspace + csv_file) csv_out = workspace + csv_file #Get field names fields = [x.name for x in arcpy.ListFields(outFeatures)] with open(csv_out, "wb") as f: wr = csv.writer(f) wr.writerow(fields) with arcpy.da.SearchCursor(outFeatures, fields) as cursor: for row in cursor: long_meters = int(round(row[1][0])) lat_meters = int(round(row[1][1])) new_row = list(row) new_row[4] = long_meters new_row[5] = lat_meters wr.writerow(new_row)
def importCSVIntoGeodatabase(csvFile, geodatabase): #Create folder for the data folder folder = geodatabase #Set the workspace as the folder arcpy.env.workspace = folder #create file in the geodatabase arcpy.CreateFileGDB_management(folder, geodatabase) #set new workspace as the geodatabase arcpy.env.workspace = geodatabase #set input table inTable = folder + csvFile #set output table outTable = "yearly" # Set the expression, with help from the AddFieldDelimiters function, to select the appropriate field delimiters for the data type expression = arcpy.AddFieldDelimiters(arcpy.env.workspace) #export table to new geodatabase arcpy.TableToTable_conversion(inTable, arcpy.env.workspace, outTable, expression)
def save_streetlights(road_name, distance): ws = os.path.dirname(roads_cl_fc) field = arcpy.AddFieldDelimiters(ws, 'ROAD_NAME_') if road_name not in _get_unique_value(roads_cl_fc, 'ROAD_NAME_'): print(f"{road_name} doesn t exist") wc = f"{field} = '{road_name.upper()}'" select_road_name = arcpy.management.SelectLayerByAttribute(roads_cl_fc, 'NEW_SELECTION', where_clause=wc) selected_layer = arcpy.management.SelectLayerByLocation( streetlight_fc, 'WITHIN_A_DISTANCE', select_road_name, distance, "SUBSET_SELECTION") output = arcpy.SaveToLayerFile_management( selected_layer, r"C:\Users\admin\Desktop\4207\data\Ottawa\selected_street_lights.lyrx") return output
def truncate_date_time(in_fc, input_field, new_field_name, set_year=None, set_month=None, set_day=None, set_hour=None, set_minute=None, set_second=None, set_microsecond=None): """ This function will take in an feature class, and use pandas/numpy to truncate a date time so that the passed date-time attributes are set to a target.""" try: # arc_print(pd.__version__) Does not have dt lib. arcpy.env.overwriteOutput = True desc = arcpy.Describe(in_fc) workspace = os.path.dirname(desc.catalogPath) col_new_field = arcpy.ValidateFieldName(san.create_unique_field_name(new_field_name, in_fc), workspace) san.add_new_field(in_fc, col_new_field, "DATE") OIDFieldName = arcpy.Describe(in_fc).OIDFieldName san.arc_print("Creating Pandas Dataframe from input table.") query = "{0} {1} {2}".format(arcpy.AddFieldDelimiters(in_fc, input_field), "is NOT", "NULL") fcDataFrame = san.arcgis_table_to_dataframe(in_fc, [input_field, col_new_field], query) JoinField = arcpy.ValidateFieldName("DFIndexJoin", workspace) fcDataFrame[JoinField] = fcDataFrame.index try: san.arc_print("Creating new date-time column based on field {0}.".format(str(input_field)), True) fcDataFrame[col_new_field] = fcDataFrame[input_field].apply( lambda dt: assign_new_datetime(dt, set_year, set_month, set_day, set_hour, set_minute, set_second, set_microsecond)).astype(datetime.datetime) del fcDataFrame[input_field] except Exception as e: del fcDataFrame[input_field] san.arc_print( "Could not process datetime field. " "Check that datetime is a year appropriate to your python version and that " "the time format string is appropriate.") san.arc_print(e.args[0]) pass san.arc_print("Exporting new time field dataframe to structured numpy array.", True) finalStandardArray = fcDataFrame.to_records() san.arc_print("Joining new date-time field to feature class.", True) arcpy.da.ExtendTable(in_fc, OIDFieldName, finalStandardArray, JoinField, append_only=False) san.arc_print("Delete temporary intermediates.") del fcDataFrame, finalStandardArray san.arc_print("Script Completed Successfully.", True) except arcpy.ExecuteError: san.arc_print(arcpy.GetMessages(2)) except Exception as e: san.arc_print(e.args[0])
def WhereClauseFromList(table, field, valueList): """Takes a list of values and constructs a SQL WHERE clause to select those values within a given field and table.""" # Add DBMS-specific field delimiters fieldDelimited = arcpy.AddFieldDelimiters( arcpy.Describe(table).path, field) # Determine field type fieldType = arcpy.ListFields(table, field)[0].type # Add single-quotes for string field values if unicode(fieldType) == u'String': valueList = [u'\'{}\''.format(value) for value in valueList] # Format WHERE clause in the form of an IN statement whereClause = u'{} IN({})'.format(fieldDelimited, u', '.join(map(unicode, valueList))) return whereClause
def tableTotable(): # Import system modules import arcpy from arcpy import env # Set environment settings env.workspace = "C:/data" # Set local variables inTable = "vegtable.dbf" outLocation = "C:/output/output.gdb" outTable = "estuarine" # Set the expression, with help from the AddFieldDelimiters function to select the appropriate field delimiters for the data type expression = arcpy.AddFieldDelimiters(env.workspace, "VEG_TYPE") + " = 'Estuarine'" # Execute TableToTable arcpy.TableToTable_conversion(inTable, outLocation, outTable, expression)
def getIntersectOfGrids(lccObj, inLandCoverGrid, inSlopeGrid, inSlopeThresholdValue, timer): # Generate the slope X land cover grid where areas below the threshold slope are # set to the value 'Maximum Land Cover Class Value + 1'. LCGrid = Raster(inLandCoverGrid) SLPGrid = Raster(inSlopeGrid) # find the highest value found in LCC XML file or land cover grid lccValuesDict = lccObj.values maxValue = LCGrid.maximum xmlValues = lccObj.getUniqueValueIdsWithExcludes() for v in xmlValues: if v > maxValue: maxValue = v AddMsg(timer.split() + " Generating land cover above slope threshold grid...") AreaBelowThresholdValue = int(maxValue + 1) delimitedVALUE = arcpy.AddFieldDelimiters(SLPGrid, "VALUE") whereClause = delimitedVALUE + " >= " + inSlopeThresholdValue SLPxLCGrid = Con(SLPGrid, LCGrid, AreaBelowThresholdValue, whereClause) # determine if a grid code is to be included in the effective reporting unit area calculation # get the frozenset of excluded values (i.e., values not to use when calculating the reporting unit effective area) excludedValues = lccValuesDict.getExcludedValueIds() # if certain land cover codes are tagged as 'excluded = TRUE', generate grid where land cover codes are # preserved for areas coincident with steep slopes, areas below the slope threshold are coded with the # AreaBelowThresholdValue except for where the land cover code is included in the excluded values list. # In that case, the excluded land cover values are maintained in the low slope areas. if excludedValues: # build a whereClause string (e.g. "VALUE" = 11 or "VALUE" = 12") to identify where on the land cover grid excluded values occur AddMsg( timer.split() + " Inserting EXCLUDED values into areas below slope threshold...") stringStart = delimitedVALUE + " = " stringSep = " or " + delimitedVALUE + " = " whereExcludedClause = stringStart + stringSep.join( [str(item) for item in excludedValues]) SLPxLCGrid = Con(LCGrid, LCGrid, SLPxLCGrid, whereExcludedClause) return SLPxLCGrid