def projekt_kopieren(self): """""" arcpy.SetProgressorLabel('Kopiere Projekt') arcpy.SetProgressorPosition(10) # get new Project Name as input projectNameOld = self.par.existing_project.value # copy template folder template_path = self.folders.get_projectpath(projectNameOld) project_path = self.folders.get_projectpath(check=False) try: def ignore_locks(path, filenames): return [f for f in filenames if f.endswith('.lock')] shutil.copytree(template_path, project_path, ignore=ignore_locks) except Exception as e: arcpy.AddMessage(e) arcpy.AddMessage("Es ist ein Fehler beim Kopieren aufgetreten.") arcpy.AddMessage("Es scheint bereits ein Projekt " "mit diesem Namen zu existieren") arcpy.AddMessage("Bitte geben Sie einen anderen Namen ein " "oder nutzen Sie die 'Projekt löschen' Funktion " "in der Toolbox") sys.exit() arcpy.SetProgressorPosition(70) # output information to user arcpy.AddMessage("Succesfully copied") arcpy.AddMessage("New Project registered at {}".format(project_path))
def populate_dll_defined_fields(dll,calculation,table,idfield): global outlength,names out_buffer_type = ctypes.c_float * outlength out_buffer = out_buffer_type() num_rows = int(arcpy.GetCount_management(table).getOutput(0)) if num_rows > 100: increment = num_rows/100 else: increment = num_rows arcpy.SetProgressor("step", "Writing output", 0, num_rows, increment) rows = arcpy.UpdateCursor(table) for n,row in enumerate(rows): if n%increment==0: arcpy.SetProgressorPosition(n) arcid = row.getValue(idfield) dll.calc_get_all_outputs(calculation,out_buffer,arcid) for i,name in enumerate(names): row.setValue(name,out_buffer[i]) rows.updateRow(row) arcpy.SetProgressorPosition(num_rows) del row del rows
def main(fcA, fcB, fcC): with arcpy.da.SearchCursor(fcA, 'TSTYBM') as xfealist: count = 0 fields = [ 'BSM', 'YSDM', 'FHDM', 'TSTYBM', 'CJR', 'CJRQ', 'BZ', 'MC', 'LJ', 'JD', 'WD', 'DWMS', 'WXSL', 'FWJ', 'QCJ', 'FYJ', 'RKXH', 'RKSJ', 'FJLX', 'SFSS', 'MD5', 'CHECK_CODE', 'SHAPE' ] zfealist = arcpy.da.InsertCursor(fcC, fields) for x in xfealist: yfealist = arcpy.da.SearchCursor(fcB, fields, 'TSTYBM=\'{0}\''.format(x[0])) for y in yfealist: zfealist.insertRow(y) count += 1 if count == 100: count = 0 arcpy.SetProgressorPosition() arcpy.SetProgressorPosition()
def main(argv=None): success = True name = '' try: if not arcpy.Exists(gzSupport.workspace): gzSupport.addMessage(gzSupport.workspace + " does not exist, attempting to create") gzSupport.createGizintaGeodatabase() else: gzSupport.compressGDB(gzSupport.workspace) if len(datasets) > 0: progBar = len(datasets) + 1 arcpy.SetProgressor("step", "Importing Layers...", 0, progBar, 1) arcpy.SetProgressorPosition() for dataset in datasets: gzSupport.sourceIDField = dataset.getAttributeNode( "sourceIDField").nodeValue sourceName = dataset.getAttributeNode("sourceName").nodeValue targetName = dataset.getAttributeNode("targetName").nodeValue arcpy.SetProgressorLabel("Loading " + sourceName + " to " + targetName + "...") if not arcpy.Exists(sourceLayer): gzSupport.addError("Layer " + sourceLayer + " does not exist, exiting") return target = os.path.join(gzSupport.workspace, targetName) arcpy.env.Workspace = gzSupport.workspace if not arcpy.Exists(target): gzSupport.addMessage("Feature Class " + target + " does not exist") else: arcpy.Delete_management(target) try: retVal = exportDataset(sourceLayer, targetName, dataset) if retVal == False: success = False except: gzSupport.showTraceback() success = False retVal = False gzSupport.logDatasetProcess(sourceName, targetName, retVal) arcpy.SetProgressorPosition() except: gzSupport.addError("A Fatal Error occurred") gzSupport.showTraceback() success = False gzSupport.logDatasetProcess("extractLayerToGDB", name, False) finally: arcpy.ResetProgressor() arcpy.RefreshCatalog(gzSupport.workspace) arcpy.ClearWorkspaceCache_management(gzSupport.workspace) if success == False: gzSupport.addError( "Errors occurred during process, look in log files for more information" ) if gzSupport.ignoreErrors == True: success = True gzSupport.closeLog() arcpy.SetParameter(SUCCESS, success)
def getRelation(outputxzkpath1, outputxzkpath2, enviroment, relationpath): """插入新旧图属统一编码关系""" relationList = [] count = int(arcpy.GetCount_management(outputxzkpath1).getOutput(0)) + int( arcpy.GetCount_management(outputxzkpath2).getOutput(0)) arcpy.SetProgressor('step', '7_获取照片点融合', 0, count, 1) with arcpy.da.SearchCursor(outputxzkpath1, ["TSTYBM", "tstybmlist"]) as cur: for row in cur: if len(row[1]) > 36: tstybmlist = row[1].split(",") for oldtstybm in tstybmlist: relationList.append([oldtstybm, row[0]]) arcpy.SetProgressorPosition() with arcpy.da.SearchCursor(outputxzkpath2, ["TSTYBM", "tstybmlist"]) as cur: for row in cur: if len(row[1]) > 36: tstybmlist = row[1].split(",") for oldtstybm in tstybmlist: relationList.append([oldtstybm, row[0]]) arcpy.SetProgressorPosition() arcpy.CreateTable_management(enviroment, relationpath) newFields = ["oldfield", "newfields"] arcpyDeal.ensureFields(relationpath, newFields) insertcur = arcpy.da.InsertCursor(relationpath, newFields) for relation in relationList: insertcur.insertRow(relation) arcpy.SetProgressorPosition()
def dealNull(xzkPath, cskPath): """清理空值并更新MC值""" searchFields = [ 'bsm', 'dlbm', 'dlmc', 'gdlx', 'tbxhdm', 'tbxhmc', 'gdzzsxdm', 'gdzzsxmc', 'czcsxm', 'wjzlx', 'LINKTBS' ] xzkcur = arcpy.da.UpdateCursor(xzkPath, searchFields) for row in xzkcur: #去除空值 for i in range(len(searchFields)): if row[i] != None and row[i] in (u"0"): row[i] = "" else: row[i] = dealNone.dealNoneAndBlank(row[i]) #更新mc值 dlbm = row[1] tbxhdm = row[4] gdzzsxdm = row[6] row[2] = relation.getMC(dlbm) row[5] = relation.getMC(tbxhdm) row[7] = relation.getMC(gdzzsxdm) xzkcur.updateRow(row) arcpy.SetProgressorPosition() searchFields = ['bsm', 'dlbm', 'czcsxm'] cskcur = arcpy.da.UpdateCursor(cskPath, searchFields) for row in cskcur: for i in range(len(searchFields)): row[i] = dealNone.dealNoneAndBlank(row[i]) cskcur.updateRow(row) arcpy.SetProgressorPosition()
def updateTarget(targetpath,unionfzhlist,mergeunionfzhlist,targetValueDict): """更新数据""" #根据查询的数据更新数据 searchFields = ['TSTYBM','unionfzh'] with arcpy.da.UpdateCursor(targetpath,searchFields) as UpdateCursor: for updaterow in UpdateCursor: tstybm = updaterow[0] unionfzh = targetValueDict[tstybm]['unionfzh'] if unionfzh in mergeunionfzhlist: unionfzh = mergeunionfzhlist[unionfzh] if unionfzh == "": unionfzh = getNewFzh(unionfzhlist) updaterow[1] = unionfzh UpdateCursor.updateRow(updaterow) arcpy.SetProgressorPosition()
def createRelation(relationList, enviroment, relationpath, dellinktbspath): """融合照片点输出照片""" arcpy.SetProgressor('step', '6_设置LINKTBS值', 0, len(relationList), 1) arcpy.CreateTable_management(enviroment, relationpath) newFields = ["oldfield", "newfields"] arcpyDeal.ensureFields(relationpath, newFields) insertcur = arcpy.da.InsertCursor(relationpath, newFields) arcpy.MakeFeatureLayer_management(dellinktbspath, "dellinktbspath") for relation in relationList: arcpy.SelectLayerByAttribute_management("dellinktbspath", where_clause=" TSTYBM = '%s'" % (relation[0])) count = int(arcpy.GetCount_management("dellinktbspath").getOutput(0)) if count > 0: insertcur.insertRow(relation) arcpy.SetProgressorPosition()
def checkdatas(dccgtb): datas = [] newdatas = {} searchFields = [ "objectid", "TBYBH", "WYM", "BHLX", "DLBM", "ZZSXDM", "ZZSXMC", "TBXHDM", "TBXHMC", "GDLXMC" ] tempFields = [ "objectid", "TBYBH", "WYM", "BHLX", "DLBM", "ZZSXDM", "ZZSXMC", "TBXHDM", "TBXHMC", "GDLXMC" ] arcpyDeal.createTempDatas(searchFields, tempFields, dccgtb, datas) for data in datas: data = checkTBYBHAndTBWYM(data) data = check1234(data) newdatas[data["objectid"]] = data arcpy.SetProgressorPosition() return newdatas
def main(argv = None): # main function - list the datasets and delete rows success = True name = '' gzSupport.workspace = sourceGDB try: if len(datasetNames) == 0: names = gzSupport.listDatasets(sourceGDB) tNames = names[0] else: tNames = datasetNames arcpy.SetProgressor("Step","Deleting rows...",0,len(tNames),1) i = 0 for name in tNames: arcpy.SetProgressorPosition(i) arcpy.SetProgressorLabel(" Deleting rows in " + name + "...") # for each full name if len(datasetNames) == 0 or gzSupport.nameTrimmer(name.upper()) in datasetNames: retVal = doTruncate(os.path.join(sourceGDB,name)) gzSupport.logDatasetProcess("deleteRowsGDB",name,retVal) if retVal == False: success = False else: gzSupport.addMessage("Skipping " + gzSupport.nameTrimmer(name)) i = i + i except: gzSupport.showTraceback() gzSupport.addError("Failed to delete rows") success = False gzSupport.logDatasetProcess("deleteRowsGDB",name,success) finally: arcpy.SetParameter(SUCCESS, success) arcpy.ResetProgressor() gzSupport.closeLog() arcpy.ClearWorkspaceCache_management(sourceGDB)
def geojson_lines_for_feature_class(in_feature_class): shape_field = arcpy.Describe(in_feature_class).shapeFieldName spatial_reference = arcpy.SpatialReference('WGS 1984') aliased_fields = { field.name: (field.aliasName or field.name) for field in arcpy.ListFields(in_feature_class) } record_count = int(arcpy.management.GetCount(in_feature_class)[0]) arcpy.SetProgressor("step", "Writing records", 0, record_count) with arcpy.da.SearchCursor(in_feature_class, ['SHAPE@', '*'], spatial_reference=spatial_reference) as in_cur: counter = 0 col_names = [aliased_fields.get(f, f) for f in in_cur.fields[1:] if f not in ['Shape_Area', 'Shape_Length']] yield '{"type": "FeatureCollection", "features": [' for row_idx, row in enumerate(in_cur): counter += 1 if (row_idx % 100 == 1): arcpy.SetProgressorPosition(row_idx) geometry_dict = geometry_to_struct(row[0]) property_dict = dict(zip(col_names, row[1:])) if shape_field in property_dict: del property_dict[shape_field] row_struct = OrderedDict([ ("type", "Feature"), ("properties", property_dict), ("geometry", geometry_dict), ]) if counter < record_count: yield ' ' + json.dumps(row_struct) + ',' else: yield ' ' + json.dumps(row_struct) # No comma after final feature yield ']}'
def execute(self, parameters, messages): #Define progressor readTime = 2.5 start = 0 maximum = 100 step = 50 #setup progressor arcpy.SetProgressor("step", "applying symbology...", start, maximum, step) time.sleep(readTime) arcpy.AddMessage("applying symbology...") pro_path = parameters[0].valueAsText project = arcpy.mp.ArcGISProject(pro_path) campus = project.listMaps('Map')[0] for lyr in campus.listLayers(): if lyr.isFeatureLayer: symbology = lyr.symbology if hasattr(symbology, 'renderer'): if lyr.name == parameters[1].valueasText: symbology.updateRenderer("UniqueValueRenderer") symbology.renderer.fields = ["LotType"] lyr.symbology = symbology else: print("NOT GarageParking") arcpy.SetProgressorPosition(start + step) arcpy.SetProgressorLabel("saving new project...") time.sleep(readTime) arcpy.AddMessage("saving new project...") project.saveACopy(parameters[2].valueAsText) return
def run(self): # prevent eventual locks gc.collect() self.fragments_left = [] arcpy.AddMessage(encode('Lösche Projekte')) projects_to_delete = self.par.projekte.values step = 100 / len(projects_to_delete) pos = 0 for project in projects_to_delete: pos += step arcpy.SetProgressorPosition(pos) arcpy.SetProgressorLabel( encode(u'Lösche Projekt {}'.format(project))) self.compact_gdbs(project) self.remove_project_from_output(project) self.projekt_loeschen(project) config = self.parent_tbx.config if len(self.fragments_left) > 0: arcpy.AddError(u'Folgende Projekte konnten aufgrund von ' u'Schemasperren nicht restlos entfernt werden:') arcpy.AddError(', '.join(self.fragments_left)) arcpy.AddError('Bitte starten Sie ArcMap neu und ' 'versuchen Sie es erneut!') # change active project, if it was deleted if config.active_project in projects_to_delete: projects = self.folders.get_projects() config.active_project = projects[0] if len(projects) > 0 else ''
def main(argv=None): # main function - list the datasets and delete rows success = True try: names = gzSupport.listDatasets(sourceGDB) tNames = names[0] tFullNames = names[1] arcpy.SetProgressor("Step", "Deleting rows...", 0, len(tFullNames), 1) i = 0 for name in tFullNames: arcpy.SetProgressorPosition(i) arcpy.SetProgressorLabel(" Deleting rows in " + name + "...") # for each full name if len(datasetNames) == 0 or tNames[i].upper() in datasetNames: retVal = doTruncate(name) gzSupport.logDatasetProcess(name, "deleteRowsGDB", retVal) if retVal == False: success = False else: gzSupport.addMessage("Skipping " + tNames[i]) i += i except: gzSupport.showTraceback() gzSupport.addError(pymsg) success = False gzSupport.logDatasetProcess(name, "deleteRowsGDB", success) finally: arcpy.SetParameter(SUCCESS, success) arcpy.ResetProgressor() gzSupport.closeLog() arcpy.ClearWorkspaceCache_management(sourceGDB)
def geojson_lines_for_table(in_feature_class, fieldlist): #TY shape_field = arcpy.Describe(in_feature_class).shapeFieldName #TY spatial_reference = arcpy.SpatialReference('WGS 1984') aliased_fields = { field: field for field in fieldlist } record_count = int(arcpy.management.GetCount(in_feature_class)[0]) arcpy.SetProgressor("step", "Writing records", 0, record_count) with arcpy.da.SearchCursor(in_feature_class, fieldlist) as in_cur: col_names = [aliased_fields.get(f, f) for f in in_cur.fields[0:]] yield '{' yield ' "type": "FeatureCollection",' yield ' "features": [' for row_idx, row in enumerate(in_cur): if row_idx: yield " ," if (row_idx % 100 == 1): arcpy.SetProgressorPosition(row_idx) #TY geometry_dict = geometry_to_struct(row[0]) property_dict = dict(zip(col_names, row[0:])) #TY if shape_field in property_dict: #TY del property_dict[shape_field] row_struct = { "type": "Feature", "properties": property_dict } for line in json.dumps(row_struct, indent=2).split("\n"): yield " " + line yield ' ]' yield '}'
def UpdateDats(jzzp): updateCurosr = arcpy.da.UpdateCursor(jzzp, [ 'WJZLX', 'NAME', 'PSR', 'AZIM', 'ROLL', 'TILT', 'PICX', 'PICY', 'FJLX', 'FJFW' ]) for updateRow in updateCurosr: WJZLX = dealNone.dealNoneAndBlank(updateRow[0]) if WJZLX == "": updateRow[0] = "" else: updateRow[1] = "" updateRow[2] = "" updateRow[3] = 0 updateRow[4] = 0 updateRow[5] = 0 updateRow[6] = 0 updateRow[7] = 0 updateRow[8] = "" updateRow[9] = "" updateCurosr.updateRow(updateRow) arcpy.SetProgressorPosition()
def check(datas): """遍历图斑进行检查""" targetValueList = [] delTargetValueList = [] for data in datas: bool01 = check01(data) bool02 = check02(data) bool03 = check03(data) bool04 = check04(data) if bool01 or bool02 or bool03 or bool04: targetValueList.append(data["TSTYBM"]) else: delTargetValueList.append(data["TSTYBM"]) arcpy.SetProgressorPosition() return targetValueList, delTargetValueList
def update(self, i, msg): arcpy.SetProgressorPosition(i) if msg: arcpy.AddMessage(msg) # return to dll IsCancelled (1 = user clicked cancel) return int(arcpy.env.isCancelled)
def updateTarget(targetpath, unionfzhlist, targetValueDict, mergeFzhList): """更新数据""" #根据查询的数据更新数据 searchFields = ['exp_tbwym', 'unionfzh', 'fzh'] with arcpy.da.UpdateCursor(targetpath, searchFields) as UpdateCursor: for updaterow in UpdateCursor: exp_tbwym = updaterow[0] unionfzh = targetValueDict[exp_tbwym]["unionfzhlist"] fzh = targetValueDict[exp_tbwym]["fzh"] if fzh in mergeFzhList: fzh = mergeFzhList[fzh] updaterow[1] = unionfzh updaterow[2] = fzh UpdateCursor.updateRow(updaterow) arcpy.SetProgressorPosition()
def generate_cls_boundary(cls_input,cntr_id_field,boundary_output,cpu_core): arcpy.env.parallelProcessingFactor=cpu_core arcpy.SetProgressorLabel('Generating Delaunay Triangle...') arrays=arcpy.da.FeatureClassToNumPyArray(cls_input,['SHAPE@XY',cntr_id_field]) cid_field_type=[f.type for f in arcpy.Describe(cls_input).fields if f.name==cntr_id_field][0] delaunay=Delaunay(arrays['SHAPE@XY']).simplices.copy() arcpy.CreateFeatureclass_management('in_memory','boundary_temp','POLYGON',spatial_reference=arcpy.Describe(cls_input).spatialReference) fc=r'in_memory\boundary_temp' arcpy.AddField_management(fc,cntr_id_field,cid_field_type) cursor = arcpy.da.InsertCursor(fc, [cntr_id_field,"SHAPE@"]) arcpy.SetProgressor("step", "Copying Delaunay Triangle to Temp Layer...",0, delaunay.shape[0], 1) for tri in delaunay: arcpy.SetProgressorPosition() cid=arrays[cntr_id_field][tri[0]] if cid == arrays[cntr_id_field][tri[1]] and cid == arrays[cntr_id_field][tri[2]]: cursor.insertRow([cid,arcpy.Polygon(arcpy.Array([arcpy.Point(*arrays['SHAPE@XY'][i]) for i in tri]))]) arcpy.SetProgressor('default','Merging Delaunay Triangle...') if '64 bit' in sys.version: arcpy.PairwiseDissolve_analysis(fc,boundary_output,cntr_id_field) else: arcpy.Dissolve_management(fc,boundary_output,cntr_id_field) arcpy.Delete_management(fc) return
def set_field_value(input_fc, fieldname, value): """ Update the named field in every row of the input feature class with the given value. """ arcpy.AddMessage("Version %s" % __version__) print("field, value = ", fieldname, value) start = 0 step = 1 maxcount = int(arcpy.GetCount_management(input_fc).getOutput(0)) arcpy.SetProgressor("step", "Doing serious work here.", start, maxcount, step) # We don't use OID here, this just an example # The updateRow operation is faster if you load only the fields you need, # in our case that would be specified by 'fieldname'. fields = ["OID@", fieldname] with arcpy.da.UpdateCursor(input_fc, fields) as cursor: t = 0 for row in cursor: msg = "Working.. step %d of %d" % (t,maxcount) print(msg) # This shows up in the IDE Debug Console. arcpy.SetProgressorLabel(msg) # If there is a type error here, I really expect arcpy # to throw an error but it does not appear to! row[1] = value cursor.updateRow(row) sleep(.50) # pretend we're doing something so progressor will work. arcpy.SetProgressorPosition(t) t += 1 return
def markError(xzkpath, targetValueList): """标记错误图斑""" searchFields = ["TSTYBM", "error"] arcpyDeal.deleteFields(xzkpath, ["error"]) arcpyDeal.ensureFields(xzkpath, searchFields) with arcpy.da.UpdateCursor(xzkpath, searchFields) as cur: for row in cur: arcpy.SetProgressorPosition() TSTYBM = row[0] if TSTYBM in targetValueList: row[1] = '0' else: row[1] = "1" cur.updateRow(row)
def updateTarget(targetpath, fzhlist, mergeFzhList, targetValueDict): """更新数据""" #根据查询的数据更新数据 searchFields = ['TSTYBM', 'bhlx', 'fzh'] with arcpy.da.UpdateCursor(targetpath, searchFields) as UpdateCursor: for updaterow in UpdateCursor: tstybm = updaterow[0] bhlx = targetValueDict[tstybm]['bhlx'] fzh = targetValueDict[tstybm]['fzh'] if fzh in mergeFzhList: fzh = mergeFzhList[fzh] if fzh == "": fzh = getNewFzh(fzhlist) updaterow[1] = bhlx updaterow[2] = fzh UpdateCursor.updateRow(updaterow) arcpy.SetProgressorPosition()
def set_field_value(input_fc, fieldname, value): """ Update the named field in the input feature class with the given value. """ arcpy.AddMessage("Version %s" % __version__) print(fieldname,value) start = 0 step = 1 maxcount = int(arcpy.GetCount_management(input_fc).getOutput(0)) arcpy.SetProgressor("step", "Doing serious work here.", start, maxcount, step) # We don't need OID here, just an example fields = ["OID@", fieldname] with arcpy.da.UpdateCursor(input_fc, fields) as cursor: t = 0 for row in cursor: msg = "Working.. step %d of %d" % (t,maxcount) arcpy.SetProgressorLabel(msg) row[1] = value cursor.updateRow(row) arcpy.SetProgressorPosition(t) return
def updateDatas(targetpath): """根据OLDTAG和ZZJZTB更新图斑变化类型,添加图斑预编号(tbybh)""" #根据查询的数据更新数据 searchFields = [ 'OLDTAG', 'ZZJZTB', "exp_tblx", "exp_tbybh", "exp_tbwym", "TSTYBM" ] arcpyDeal.ensureFields(targetpath, searchFields) number = 0 with arcpy.da.UpdateCursor(targetpath, searchFields) as UpdateCursor: for updaterow in UpdateCursor: number += 1 oldTag = updaterow[0] zzjztb = updaterow[1] exp_tblx = judgeTBLX(oldTag, zzjztb) exp_tbybh = "00000000"[0:8 - len(str(number))] + str(number) updaterow[2] = exp_tblx updaterow[3] = exp_tbybh updaterow[4] = updaterow[5] UpdateCursor.updateRow(updaterow) arcpy.SetProgressorPosition()
def nsquaredDist(points, weights=None, potent=None, dType="EUCLIDEAN"): """Method used to calculate the distance between each feature in the dataset. The algorithm is near 0(n**2). Effort to improve algorithn is currently underway. INPUTS: points (array, numObs x 2): xy-coordinates for each feature weights {array, numObs x 1}: weights for each feature potent {array. numObs x 1}: self weights for each feature dType {str, EUCLIDEAN}: EUCLIDEAN or MANHATTAN (distance) OUTPUT: final (list): ids with minimum sum distance minSumDist (float): minimum sum distance """ n, k = NUM.shape(points) maxMinSumDist = 3.402823466E+38 if weights == None: weights = NUM.ones((n, ), float) if potent == None: potent = NUM.zeros((n, ), float) weightedPotential = weights * potent res = {} #### Calculate Sum of Weighted Distances For Each Feature #### weights.shape = n, 1 if dType == "EUCLIDEAN": for idx, point in enumerate(points): weightedDist = eucDistArray(point, points, weights) res[idx] = weightedDist + (weights[idx] * potent[idx]) ARCPY.SetProgressorPosition() else: for idx, point in enumerate(points): weightedDist = manDistArray(point, points, weights) res[idx] = weightedDist + weightedPotential[idx] ARCPY.SetProgressorPosition() #### Minimum Sum of Weighted Distances (Central Feature) #### minSumDist = min(res.itervalues()) final = [key for key, val in res.iteritems() if val == minSumDist] return final, minSumDist
def execute(self, parameters, messages): try: layer = parameters[0].value cntCol = parameters[1].valueAsText pntNum = parameters[2].value outFullName = parameters[3].valueAsText #messages.addMessage(layer.name) #if not (cntCol is None): # messages.addMessage(cntCol) #messages.addMessage(outFullName) geomType = "POINT" template = None outWs = os.path.dirname(outFullName) outFc = os.path.basename(outFullName) # messages.addMessage("WS:{0} / Name:{1}".format(outWs,outFc)) has_m = "DISABLED" has_z = "DISABLED" spRef = None dataset = layer.dataSource spRef = arcpy.Describe(dataset).spatialReference arcpy.CreateFeatureclass_management(outWs, outFc, geomType, template, has_m, has_z, spRef) arcpy.AddField_management(outFullName, "orgOid", "LONG", 10, "", "", "OriginalOID", "NON_NULLABLE") oidCol = arcpy.Describe(layer).OIDFieldName outcols = [oidCol,"SHAPE@"] if not (cntCol is None): outcols.append(cntCol) # for Progress step count result = arcpy.GetCount_management(layer) count = int(result.getOutput(0)) arcpy.SetProgressor("step", "Inserting ...", 0, count, 1) with arcpy.da.SearchCursor(layer, outcols) as cursor, arcpy.da.InsertCursor(outFullName, ["orgOid","SHAPE@"]) as ins: for row in cursor: x = 0 loopCnt = pntNum if not (cntCol is None) and not (row[2] is None): loopCnt = row[2] while x < loopCnt: px = row[1].extent.XMin + row[1].extent.width * np.random.random() py = row[1].extent.YMin + row[1].extent.height * np.random.random() pt = arcpy.Point(px,py) #caution: loop end is contains == true if row[1].contains(pt): x += 1 ins.insertRow([ row[0], pt ]) # step count arcpy.SetProgressorPosition() except Exception as e: messages.AddErrorMessage(e.message)
def calculateImpact(intersectionArray, dem, flowAccumulation, cellSize, tempData): arcpy.AddMessage("Calculating Impact Probability...") i = 0 arcpy.SetProgressor( "step", "Calculating intersection " + str(i) + " out of " + str(len(intersectionArray)), 0, len(intersectionArray), 1) for intersection in intersectionArray: i += 1 try: streamOneDrainageArea = findFlowAccumulation( intersection.streamOne, flowAccumulation, cellSize, tempData) streamTwoDrainageArea = findFlowAccumulation( intersection.streamTwo, flowAccumulation, cellSize, tempData) if streamOneDrainageArea < 0 or streamTwoDrainageArea < 0: raise ValueError("Could not properly find drainage area") if streamOneDrainageArea > streamTwoDrainageArea: mainstem = intersection.streamOne mainstemDrainageArea = streamOneDrainageArea tributary = intersection.streamTwo tributaryDrainageArea = streamTwoDrainageArea else: tributary = intersection.streamOne tributaryDrainageArea = streamOneDrainageArea mainstem = intersection.streamTwo mainstemDrainageArea = streamTwoDrainageArea tributarySlope = findSlope(tributary, dem, tempData) if tributarySlope == -9999: raise ValueError("Could not properly find slope") if mainstemDrainageArea < 1.0: varAr = 0 mainstemDrainageArea = 0.0001 else: varAr = tributaryDrainageArea / mainstemDrainageArea varPsiT = tributaryDrainageArea * tributarySlope varAr = abs(varAr) varPsiT = abs(varPsiT) if varAr == 0: varAr = 0.0001 if varPsiT == 0: varPsiT = 0.0001 eToPower = e**(8.68 + 6.08 * log(varAr) + 10.04 * log(varPsiT)) impact = eToPower / (eToPower + 1) intersection.setImpact(impact) intersection.mainDrainArea = mainstemDrainageArea intersection.tribDrainArea = tributaryDrainageArea arcpy.SetProgressorLabel("Calculating intersection " + str(i) + " out of " + str(len(intersectionArray))) arcpy.SetProgressorPosition() except ValueError as error: arcpy.AddWarning(str(error))
def UpdateTarget(targetpath, matchedDataDict): """更新还原初始库属性""" bsmDifference = [] zldwdmDifference = [] sjdlbmDifference = [] tstybmDifference = [] fields = ["TSTYBM", "exp_bsm", "exp_sjdlbm", "exp_zldwdm"] arcpyDeal.ensureFields(targetpath, fields) cursor = arcpy.da.UpdateCursor(targetpath, fields, where_clause=" TSTYBM is not null", sql_clause=(None, 'ORDER BY TSTYBM')) for row in cursor: arcpy.SetProgressorPosition() tstybm = row[0] if tstybm not in matchedDataDict: tstybmDifference.append(tstybm) continue if matchedDataDict[tstybm]["bsm"] != matchedDataDict[tstybm]["bsm_1"]: bsmDifference.append(tstybm) if matchedDataDict[tstybm]["zldwdm"] != matchedDataDict[tstybm][ "zldwdm_1"]: zldwdmDifference.append(tstybm) if matchedDataDict[tstybm]["sjdlbm"] != matchedDataDict[tstybm][ "sjdlbm_1"]: sjdlbmDifference.append(tstybm) row[1] = matchedDataDict[tstybm]["bsm"] row[2] = matchedDataDict[tstybm]["sjdlbm"] row[3] = matchedDataDict[tstybm]["zldwdm"] cursor.updateRow(row) arcpy.AddMessage("2_共有%s个图斑bsm不同" % (len(bsmDifference))) arcpy.AddMessage("2_共有%s个图斑zldwdm不同" % (len(zldwdmDifference))) arcpy.AddMessage("2_共有%s个图斑sjdlbm不同" % (len(sjdlbmDifference))) arcpy.AddMessage("2_共有%s个图斑无初始库图斑" % (len(tstybmDifference))) arcpy.AddMessage("2_" + json.dumps(bsmDifference)) arcpy.AddMessage("2_" + json.dumps(zldwdmDifference)) arcpy.AddMessage("2_" + json.dumps(sjdlbmDifference)) arcpy.AddMessage("2_" + json.dumps(tstybmDifference))
def verifyShapeStructure(lines_of_sight, visibility_lines): spatial_ref = arcpy.Describe(visibility_lines).spatialReference number_of_LoS = int(arcpy.GetCount_management(visibility_lines).getOutput(0)) arcpy.SetProgressor("step", "Verifing structure of " + str(number_of_LoS) + " lines of sight...", 0, number_of_LoS, 1) with arcpy.da.UpdateCursor(visibility_lines, ["SHAPE@", "OBJECTID"]) as cursor: for row in cursor: with arcpy.da.SearchCursor(lines_of_sight, ["SHAPE@"], """"OID" = """ + str(row[1])) as cursor_lines: for row_lines in cursor_lines: start_point_x = row_lines[0].firstPoint.X start_point_y = row_lines[0].firstPoint.Y #end_point_x = row_lines[0].lastPoint.X #end_point_y = row_lines[0].lastPoint.Y wkt = row[0].WKT.replace("))", "").replace(" ((", "").replace("MULTILINESTRING ", "") \ .replace("ZM", "").replace("Z", "").replace("), (", ", ") poi = wkt.split(", ") start_p_x= float(poi[0].split(" ")[0]) start_p_y = float(poi[0].split(" ")[1]) #end_p_x = float(poi[len(poi)-1].split(" ")[0]) #end_p_y = float(poi[len(poi)-1].split(" ")[1]) points = [] if distance(start_point_x, start_point_y, start_p_x, start_p_y) > 1: for i in range(0, len(poi)): parts = poi[i].split(" ") x = float(parts[0]) y = float(parts[1]) z = float(parts[2]) dist = distance(x, y, start_point_x, start_point_y) points.append([x, y, z, dist]) points = sorted(points, key=itemgetter(3)) point = arcpy.Point() array = arcpy.Array() for i in range(0, len(poi)): point.X = points[i][0] point.Y = points[i][1] point.Z = points[i][2] array.add(point) polyline = arcpy.Polyline(array, spatial_ref, True) row[0] = polyline cursor.updateRow(row) arcpy.SetProgressorPosition() arcpy.ResetProgressor()