Beispiel #1
0
def Test_Locked(basemap_list,facility_list):
    '''
    To test if there is feature class locked
    :param basemap_list: feature class name
    :param facility_list: feature class name
    :return:
    '''
    for key,value in basemap_list.items():
        if not arcpy.TestSchemaLock(value):
            print("Feature class "+value+" is locked!")
        else: print("Feature class "+value+" is not locked!" )
    for key,value in facility_list.items():
        if not arcpy.TestSchemaLock(value):
            print("Feature class "+value+" is locked!")
        else: print("Feature class "+value+" is not locked!" )
Beispiel #2
0
def makeTextID(field, table):
    ''' This function creates a copy of an existing field with the String format.
        
    ** Description: **
        
        Certain types of fields cause problems when performing joins, and Strings are generally the most reliable.
        This function creates a new field with string format of length 30 and copies all data from the problem field.
    
    **Arguments:**
    
        * *field* - input arcpy field object
        * *table* - name with full path of input table to be modified)
    
    **Returns:**
    
        * *textFieldName* - validated field name of added field.
        
    '''
    # Obtain valid fieldname
    textFieldName = arcpy.ValidateFieldName("txt" + field.name, table)
    # Test for Schema Lock
    if arcpy.TestSchemaLock(table):
        # Add the output text field
        arcpy.AddField_management(table, textFieldName, "TEXT", "#", "#", "30")
    else:
        arcpy.AddMessage(
            "Unable to acquire the necessary schema lock to add the new field")
    # Calculate the field values
    arcpy.CalculateField_management(table, textFieldName,
                                    '!' + field.name + '!', "PYTHON")
    # Since this field will be used in joins, index the field.
    arcpy.AddIndex_management(table, textFieldName, "idIDX", "UNIQUE")
    return textFieldName
Beispiel #3
0
def main(argv=None):
    success = True
    gzSupport.compressGDB(gzSupport.workspace)
    arcpy.ClearWorkspaceCache_management(gzSupport.workspace)
    tables = gzSupport.listDatasets(gzSupport.workspace)
    tNames = tables[0]
    tFullNames = tables[1]
    name = ''

    for dataset in datasets:
        arcpy.env.Workspace = gzSupport.workspace
        name = dataset.getAttributeNode("name").nodeValue
        table = gzSupport.getFullName(name, tNames, tFullNames)
        gzSupport.sourceIDField = dataset.getAttributeNode(
            "sourceIDField").nodeValue
        gzSupport.sourceNameField = dataset.getAttributeNode(
            "sourceNameField").nodeValue
        if not arcpy.Exists(table):
            gzSupport.addError("Feature Class " + table +
                               " does not exist, exiting")
            arcpy.SetParameter(SUCCESS, False)
            return
        if not arcpy.TestSchemaLock(table):
            gzSupport.addError("Unable to obtain a schema lock for " + table +
                               ", exiting")
            arcpy.SetParameter(SUCCESS, False)
            return -1
        desc = arcpy.Describe(table)
        fields = dataset.getElementsByTagName("Field")
        try:
            attrs = [f.name for f in arcpy.ListFields(table)]
            for field in fields:
                arcpy.env.Workspace = gzSupport.workspace
                targetName = gzSupport.getNodeValue(field, "TargetName")
                gzSupport.addGizintaField(table, targetName, field, attrs)

            retVal = setFieldValues(table, fields)
            if retVal == False:
                success = False
            gzSupport.logDatasetProcess(name, "Fields", retVal)
            arcpy.ClearWorkspaceCache_management(gzSupport.workspace)
            gzSupport.cleanupGarbage()

        except:
            gzSupport.showTraceback()
            success = False
            gzSupport.logDatasetProcess("fieldCalculator", name, False)
        finally:
            arcpy.RefreshCatalog(table)
            arcpy.ClearWorkspaceCache_management(gzSupport.workspace)
    if success == False:
        gzSupport.addError(
            "Errors occurred during process, look in log file tools\\log\\fieldCalculator.log for more information"
        )
    if gzSupport.ignoreErrors == True:
        success = True
    arcpy.SetParameter(SUCCESS, success)
    arcpy.ResetProgressor()
    gzSupport.closeLog()
    return
Beispiel #4
0
def delete_all_fields_except_as_specified_and_geometry(input_table, fields_to_keep):
    all_fields = return_list_of_fields_from_table(input_table)
    fields_to_delete = [x for x in all_fields if x not in fields_to_keep]

    if arcpy.TestSchemaLock(input_table):
        arcpy.DeleteField_management(input_table, fields_to_delete)
    else:
        pass
Beispiel #5
0
    def locksexist(self):

        if arcpy.TestSchemaLock(self.featureclass):

            # "True A schema lock can be applied to the dataset"
            return False

        else:

            return True
Beispiel #6
0
    def __workspaceValid__(self, workspace):
        if not arcpy.Exists(workspace):
            self.__sm__("Workspace " + workspace + " does not exist")
            return False

        if arcpy.TestSchemaLock(workspace):
            self.__sm__("Workspace " + workspace + " has a schema lock",
                        "AHMSG")
            return False
        self.__sm__("Workspace " + workspace + " is valid")
        return True
Beispiel #7
0
    def createCustomRasters(self):
        self.debug_logger("Starting TRMM Custom Raster Creation Process")

        try:
            arcpy.env.extent = arcpy.Extent(
                -180.0, -50.0, 180.0,
                50.0)  # max and min extent values a given TRMM raster
            arcpy.env.workspace = self.workspace_fullpath
            arcpy.env.overwriteOutput = True
            arcpy.CheckOutExtension("spatial")

            for custom_raster in self.custom_raster_requests:
                self.debug_logger("Processing Raster")

                factory_specifications = custom_raster.getFactorySpecifications(
                )
                output_raster_fullpath = factory_specifications[
                    'output_raster_fullpath']
                raster_catalog_is_not_locked = arcpy.TestSchemaLock(
                    custom_raster.getRasterCatalogFullpath())
                self.debug_logger("DEBUG: self.workspace_fullpath " +
                                  str(self.workspace_fullpath))

                #extracted_raster_list = custom_raster.extractRastersToWorkspace(self.workspace_fullpath)
                extracted_raster_list = custom_raster.extractRastersToWorkspace(
                    self.workspace_fullpath, self.fileFolder_With_TRMM_Rasters)

                self.debug_logger("Len(extracted_raster_list) " +
                                  str(len(extracted_raster_list)))

                if extracted_raster_list and raster_catalog_is_not_locked:

                    final_raster = self._createCumulativeRaster(
                        extracted_raster_list, factory_specifications)
                    self._saveRaster(final_raster, output_raster_fullpath,
                                     factory_specifications)

            self._finishCustomRasterManagment()
            self.debug_logger("Finished TRMM Custom Raster Creation Process")

        except Exception as e:

            self.debug_logger(
                "==================== EXCEPTION ====================")
            self.debug_logger("System Error Message: " + str(e) +
                              " | ArcPy Error Message: " +
                              str(arcpy.GetMessages(2)))
            #self.exception_handler(dict(exception=str(e), messages=str(arcpy.GetMessages(2))))

        finally:
            arcpy.CheckInExtension("spatial")
            self.debug_logger("checked IN spatial extension")
Beispiel #8
0
def add_field_other(featureClass, fieldName, fieldType):
    if arcpy.TestSchemaLock(featureClass) == "False":
        raise SchemaLockError
    try:
        fieldList = arcpy.ListFields(featureClass)
        for field in fieldList:
            if field.name == fieldName:
                delete_field(featureClass, fieldName)
        arcpy.AddField_management(featureClass, fieldName, fieldType, "", "",
                                  "", "", "NULLABLE", "NON_REQUIRED", "")
    except:
        config.run_error_message(featureClass,
                                 "Could not delete\add: " + fieldName)
Beispiel #9
0
def add_fields(layer, fields, type, scale):
    """
    This function will add a new field with the defined name if it does not exist
    """
    current_fields = arcpy.ListFields(layer)
    current_field_names = [f.name for f in current_fields]
    for field in fields:
        if field not in current_field_names:
            if arcpy.TestSchemaLock(layer):
                if scale:
                    arcpy.AddField_management(layer, field, type, field_scale=scale)
                else:
                    arcpy.AddField_management(layer, field, type)
            else:
                arcpy.AddError("Cannot aquire a schema lock on: " + str(layer))
def Backup(backupFolder, fc):
    print('backing up data...')
    # create backup GDB if it doesn't exist
    if not os.access(backupFolder, os.W_OK):
        arcpy.CreateFileGDB_management(saveFolder, 'Backup')
    fcdesc = arcpy.Describe(fc)
    backupFC = os.path.join(backupFolder, fcdesc.basename)

    # if file not locked - make copy
    if arcpy.Exists(backupFC):
        if arcpy.TestSchemaLock(backupFC):
            arcpy.Delete_management(backupFC)
        # if file locked - exit program
        else:
            print('ERROR: ' + backupFC + ' locked. Exiting program')
            logging.error('ERROR: ' + backupFC + ' locked. Exiting program')
            SendEmail('GeodataRetriever failed',
                      'ERROR: ' + backupFC + ' locked. Exiting program')
            sys.exit()
    arcpy.Copy_management(fc, backupFC)
Beispiel #11
0
def createPolyShades(inRaster, reclass, rasterRes, minPolyArea):

    for interval in remapDict:

        outPolyFGD = r'D:\VectorTiles\HillshadePolygons.gdb'

        outRasterName = '{}_Interval{}'.format(inRaster, interval)
        outPolys = join(outPolyFGD,
                        'PolyShade_{}_{}class'.format(rasterRes, interval))

        newRange = RemapRange(remapDict[interval])

        outRaster = Reclassify(inRaster, 'Value', newRange)
        outRaster.save(outRasterName)
        print outRasterName + ' Created'

        arcpy.RasterToPolygon_conversion(outRasterName, outPolys, 'SIMPLIFY')
        print outPolys + ' Created'

        sql_254 = """"{}" = {}""".format('gridcode', 254)
        outPolys_254FL = arcpy.MakeFeatureLayer_management(
            outPolys, 'outPolysFL', sql_254)
        arcpy.DeleteFeatures_management(outPolys_254FL)
        arcpy.Delete_management(outPolys_254FL)
        print 'Deleted gridcode 254'

        sql_MinPolyArea = """"{}" < {}""".format('Shape_Area', minPolyArea)
        outPolys_minPolysFL = arcpy.MakeFeatureLayer_management(
            outPolys, 'outPolys_minPolysFL', sql_MinPolyArea)
        arcpy.DeleteFeatures_management(outPolys_minPolysFL)
        arcpy.Delete_management(outPolys_minPolysFL)
        print 'Deleted polygons less than {} sq meters'.format(minPolyArea)

        if not arcpy.TestSchemaLock(outRasterName):
            print outRasterName + ' is LOCKED'
            print ''
            continue
        else:
            arcpy.Delete_management(outRasterName)
            print 'Deleted ' + outRasterName
            print ''
Beispiel #12
0
def geocode_ffe_points_with_address_locator(not_found_path, feature_class_path):
    address_locator = egh_public + r"\ARCMAP_ADMIN.Streets_Geocoding_pdx_no_zone"
    arcpy.CalculateField_management(not_found_path, "City", "'Portland'", "PYTHON")
    arcpy.AlterField_management(not_found_path, "Address", "original_address")
    arcpy.AlterField_management(not_found_path, "conditioned_address", "Address")
    arcpy.GeocodeAddresses_geocoding(not_found_path, address_locator, "", feature_class_path)
    arcpy.AlterField_management(feature_class_path, "Address", "conditioned_address")
    arcpy.AlterField_management(feature_class_path, "original_address", "Address")
    add_text_field_to_feature_class(feature_class_path,'GeocodingNotes', 15)

    with arcpy.da.UpdateCursor(feature_class_path, ['GeocodingNotes', 'Status']) as cursor:
        for row in cursor:
            if row[1] =='U':
                row[0] = "UNMATCHED"
            else:
                row[0] = "Address Locater"
            cursor.updateRow(row)

    drop_fields = ["Status", "Score", "Match_addr", "Pct_along", "Side", "Ref_ID", "X", "Y", "Addr_type", "ARC_Street", "ARC_City", "ARC_ZIP"]

    if arcpy.TestSchemaLock(feature_class_path):
        arcpy.DeleteField_management(feature_class_path, drop_fields)
    else:
        pass
                oras.save(results)

if options.oneras:
    (temp_path, initRaster) = os.path.split(options.oneras)
    initRasType, initSP, initLay = initRaster.split("_", 2)
    print(arraylistB)
    for bRas in arraylistB:
        (temp_path, ras) = os.path.split(bRas)
        rasType, stressPeriod, layer = ras.split("_", 2)
        if initRasType == rasType:
            if initLay == layer:
                outputName = rasType + "_diff_" + stressPeriod + "_" + layer
                ras2 = arcpy.Raster(bRas)
                ras1 = arcpy.Raster(options.oneras)
                oras = ras2 - ras1
                results = os.path.join(oWorkspace, outputName)
                if arcpy.TestSchemaLock(results) or not arcpy.Exists(results):
                    print("{} minus {} equals {}".format(
                        ras2, ras1, outputName))
                    oras.save(results)
                else:
                    print(
                        "Output SKIPPED [Schema lock present]. Can't save {}".
                        format(results))
            else:
                print("Layers Do Not Match [{} <> {}]".format(initLay, layer))
        else:
            print("Raster Types Do Not Match[{} <> {}]".format(
                initRasType, rasType))

print("End of Execution")
def calculate(xmlFileName, workspace, name, ignore):

    dla.workspace = workspace
    success = True
    arcpy.ClearWorkspaceCache_management(dla.workspace)
    xmlDoc = dla.getXmlDoc(xmlFileName)

    arcpy.env.Workspace = dla.workspace
    table = dla.getTempTable(name)

    if not arcpy.Exists(table):
        dla.addError("Feature Class " + table + " does not exist, exiting")
        arcpy.SetParameter(SUCCESS, False)
        return
    if not arcpy.TestSchemaLock(table):
        dla.addError("Unable to obtain a schema lock for " + table +
                     ", exiting")
        arcpy.SetParameter(SUCCESS, False)
        return -1

    desc = arcpy.Describe(table)
    fields = dla.getXmlElements(xmlFileName, "Field")
    sourceFields = dla.getXmlElements(xmlFileName, "SourceField")
    targetFields = dla.getXmlElements(xmlFileName, "TargetField")
    attrs = [f.name for f in arcpy.ListFields(table)]

    for field in fields:
        arcpy.env.Workspace = dla.workspace
        targetName = dla.getNodeValue(field, "TargetName")
        sourceName = dla.getNodeValue(field, "SourceName")

        type = "String"
        length = "50"
        for target in targetFields:
            nm = target.getAttributeNode("Name").nodeValue
            if nm == targetName:
                type = target.getAttributeNode("Type").nodeValue
                length = target.getAttributeNode("Length").nodeValue
        # uppercase compare, later need to check for orig/upper name for calc
        #ups = [nm.upper() for nm in attrs]
        dla.addDlaField(table, targetName, field, attrs, type, length)

    allFields = sourceFields + targetFields
    names = []
    types = []
    lengths = []
    for field in allFields:
        nm = field.getAttributeNode("Name").nodeValue
        if nm != dla.noneName:
            names.append(nm)
            typ = field.getAttributeNode("Type").nodeValue
            leng = field.getAttributeNode("Length").nodeValue
            types.append(typ)
            lengths.append(leng)

    retVal = setFieldValues(table, fields, names, types, lengths)
    if retVal == False:
        success = False
    arcpy.ClearWorkspaceCache_management(dla.workspace)
    dla.cleanupGarbage()

    arcpy.ResetProgressor()
    if ignore == True:
        success = True
    return success
Beispiel #15
0
        # Append to the list of Polygon objects
        featureList.append(feaERIS)

    # Create a copy of the Polygon objects, by using featureList as input to the CopyFeatures tool.
    outshp = os.path.join(scratch, "orderGeoName.shp")
    #outshp = r"in_memory/orderGeoName"

    arcpy.CopyFeatures_management(featureList, outshp)
    arcpy.DefineProjection_management(outshp, srGCS83)

    del point
    del array

    #2. Calculate Centroid of Geometry
    passLockTest = arcpy.TestSchemaLock(outshp)
    while not passLockTest:
        arcpy.AddWarning("There is a lock, wait, in #2")
        time.sleep(10)
        passLockTest = arcpy.TestSchemaLock(outshp)
    arcpy.AddField_management(outshp, "xCentroid", "DOUBLE", 18, 11)
    arcpy.AddField_management(outshp, "yCentroid", "DOUBLE", 18, 11)
    arcpy.AddField_management(outshp, "ERIS_ID", "LONG", 10)

    xExpression = '!SHAPE.CENTROID.X!'
    yExpression = '!SHAPE.CENTROID.Y!'

    arcpy.CalculateField_management(outshp, 'xCentroid', xExpression,
                                    "PYTHON_9.3")
    arcpy.CalculateField_management(outshp, 'yCentroid', yExpression,
                                    "PYTHON_9.3")
Beispiel #16
0
def dipNumbers(gdb, mapScaleDenominator):
    OPfc = os.path.join(gdb, 'GeologicMap', 'OrientationPoints')
    if not arcpy.Exists(OPfc):
        addMsgAndPrint(
            '  Geodatabase {} lacks feature class OrientationPoints.'.format(
                os.path.basename(gdb)))
        return

    desc = arcpy.Describe(OPfc)
    mapUnits = desc.spatialReference.linearUnitName
    if 'meter' in mapUnits.lower():
        mapUnitsPerMM = float(mapScaleDenominator) / 1000.0
    else:
        mapUnitsPerMM = float(mapScaleDenominator) / 1000.0 * 3.2808

    if numberOfRows(OPfc) == 0:
        addMsgAndPrint('  0 rows in OrientationPoints.')
        return

    ## MAKE ORIENTATIONPOINTLABELS FEATURE CLASS
    arcpy.env.workspace = os.path.join(gdb, 'GeologicMap')
    OPL = os.path.join(gdb, 'GeologicMap', 'OrientationPointLabels')
    if arcpy.TestSchemaLock(OPL) == False:
        addMsgAndPrint('    TestSchemaLock({}) = False.'.format(OPLName))
        addMsgAndPrint('Cannot get a schema lock!')
        forceExit()

    testAndDelete(OPL)
    arcpy.CreateFeatureclass_management(fds, 'OrientationPointLabels', 'POINT')
    arcpy.AddField_management(OPL, 'OrientationPointsID', 'TEXT', "", "", 50)
    arcpy.AddField_management(OPL, 'Inclination', 'TEXT', "", "", 3)
    arcpy.AddField_management(OPL, 'PlotAtScale', 'FLOAT')

    ## ADD FEATURES FOR ROWS IN ORIENTATIONPOINTS WITHOUT 'HORIZONTAL' OR 'VERTICAL' IN THE TYPE VALUE
    OPfields = [
        'SHAPE@XY', 'OrientationPoints_ID', 'Type', 'Azimuth', 'Inclination',
        'PlotAtScale'
    ]
    attitudes = arcpy.da.SearchCursor(OPfc, OPfields)
    OPLfields = [
        'SHAPE@XY', 'OrientationPointsID', 'Inclination', 'PlotAtScale'
    ]
    inclinLabels = arcpy.da.InsertCursor(OPL, OPLfields)
    for row in attitudes:
        oType = row[2]
        if showInclination(oType):
            x = row[0][0]
            y = row[0][1]
            OP_ID = row[1]
            azi = row[3]
            inc = int(round(row[4]))
            paScale = row[5]
            if isPlanar(oType):
                geom = ' S '
                inclinRadius = 2.4 * mapUnitsPerMM
                azir = math.radians(azi)
            else:  # assume linear
                geom = ' L '
                inclinRadius = 7.4 * mapUnitsPerMM
                azir = math.radians(azi - 90)
            ix = x + math.cos(azir) * inclinRadius
            iy = y - math.sin(azir) * inclinRadius

            addMsgAndPrint('    inserting ' + oType + geom +
                           str(int(round(azi))) + '/' + str(inc))
            inclinLabels.insertRow(([ix, iy], OP_ID, inc, paScale))

    del inclinLabels
    del attitudes

    # INSTALL NEWLY-MADE FEATURE CLASS USING .LYR FILE. SET DATA SOURCE. SET DEFINITION QUERY

    #make a copy of OrientationPointsLabels.lyrx in \Resources. ArcGIS Pro cannot write to lyr files
    newLyr = os.path.join(os.path.dirname(gdb), 'OrientationPointsLabels.lyrx')
    shutil.copy(lyrx_path, newLyr)

    # create a LayerFile object based on the copy in order to get a handle on the labels layer
    OPLyrFile = arcpy.mp.LayerFile(newLyr)
    # reset data source through the updateConnectionProperties method
    current_connect = OPLyrFile.listLayers()[0].connectionProperties
    current_workspace = current_connect['connection_info']['database']
    OPLyrFile.updateConnectionProperties(current_workspace, gdb)

    # find the layer in the layer file object
    OPLyr = OPLyrFile.listLayers()[0]
    # set definition query
    pasName = arcpy.AddFieldDelimiters(gdb, 'PlotAtScale')
    defQuery = pasName + ' >= ' + str(mapScaleDenominator)
    OPLyr.definitionQuery = defQuery
    OPLyrFile.save()

    # Insert new OrientationPointLabels.lyr
    #try:
    OPfc = os.path.join(gdb, 'GeologicMap', 'OrientationPoints')
    insert_layer(OPfc, OPLyr)
Beispiel #17
0
# -*- coding: cp936 -*-
import arcpy,os
# 创建测试要素类
arcpy.CreateFeatureclass_management(os.getcwd(),"test.shp","POLYGON")

# 如果要素在Gis中打开会得到else的结果
if arcpy.TestSchemaLock(os.getcwd()+os.sep+"test.shp"):
    print "可以获取方案锁,可为要素添加字段!"
    arcpy.AddField_management(os.getcwd()+os.sep+"test.shp","TESTFIELD","TEXT")
else :
    print "无法获取方案锁!!!"

# 删除用于测试的要素类
#arcpy.Delete_management(os.getcwd()+os.sep+"test.shp")
Beispiel #18
0
def fillsummarytable(summaryTable,out_basename,out_basename_validated,grid_thickn,grid_mask,nb_iter,tol):
	# Output the main results to the console
	str_message = 'SLBL computed in {} iterations'.format(nb_iter)
	arcpy.AddMessage(str_message)
	str_message = 'Maximum thickness: {:.3f} m'.format(np.amax(grid_thickn))
	arcpy.AddMessage(str_message)
	str_message = 'Average thickness: {:.3f} m'.format(np.mean(grid_thickn[grid_mask]))
	arcpy.AddMessage(str_message)
	volume = (np.sum(grid_thickn)*cellSize*cellSize)
	str_message = 'Total volume: {:.6f} million m3'.format(volume/1000000)
	arcpy.AddMessage(str_message)
	if volume < 251464.767769637:
		scheidegger = 30.9637565320735
	else:
		scheidegger = np.rad2deg(np.arctan(np.power(volume,-0.15666)*np.power(10,0.62419)))
	str_message = 'Angle of reach: {:.3f} degrees'.format(scheidegger)
	arcpy.AddMessage(str_message)
	
	# Saves the key parameters in the summary table (creates the table if necessary)
	# The length of field names is limited to 10 characters in a dbf file
	if not arcpy.Exists(summaryTable):
		str_message = 'Creating summary table...'
		arcpy.AddMessage(str_message)
		arcpy.CreateTable_management(ws,summaryTableName)
		arcpy.AddField_management(summaryTable,"Name","TEXT")
		if saveInGDB:
			arcpy.AddField_management(summaryTable,"Volume_10e6m3","FLOAT")
			arcpy.AddField_management(summaryTable,"Reach_angle","FLOAT")
		else: #max length = 10
			arcpy.AddField_management(summaryTable,"Volume","FLOAT")
			arcpy.AddField_management(summaryTable,"Reach_angl","FLOAT")
		arcpy.AddField_management(summaryTable,"Max_thick","FLOAT")
		arcpy.AddField_management(summaryTable,"Avg_thick","FLOAT")
		arcpy.AddField_management(summaryTable,"Iterations","LONG")
		arcpy.AddField_management(summaryTable,"Cell_size","SHORT")
		arcpy.AddField_management(summaryTable,"Tolerance","FLOAT")
		arcpy.AddField_management(summaryTable,"Max_depth","FLOAT")
		arcpy.AddField_management(summaryTable,"Max_vol","FLOAT")
		arcpy.AddField_management(summaryTable,"Method","TEXT")
		if saveInGDB:
			arcpy.AddField_management(summaryTable,"Stop_criterion","FLOAT")
			arcpy.AddField_management(summaryTable,"Nb_neighbours","SHORT")
			arcpy.AddField_management(summaryTable,"Not_deepening","TEXT")
		else:
			arcpy.AddField_management(summaryTable,"Stop_crite","FLOAT")
			arcpy.AddField_management(summaryTable,"Nb_neighbo","SHORT")
			arcpy.AddField_management(summaryTable,"Not_deepen","TEXT")
		arcpy.AddField_management(summaryTable,"Inverse","TEXT")
		arcpy.AddField_management(summaryTable,"Date","DATE")
	
	if arcpy.TestSchemaLock(summaryTable) == False:
		str_message = 'The table {} is locked and cannot be edited here. Make sure it is not being edited elsewhere'.format(summaryTable)
		arcpy.AddError(str_message)
		
	str_message = 'Filling summary table...'
	arcpy.AddMessage(str_message)
	field_names = [f.name for f in arcpy.ListFields(summaryTable)]
	if not "Method" in field_names: #field added in a later version
		arcpy.AddField_management(summaryTable,"Method","TEXT")
	if not "Inverse" in field_names: #field added in a later version
		arcpy.AddField_management(summaryTable,"Inverse","TEXT")
	if not "Max_vol" in field_names: #field added in a later version
		arcpy.AddField_management(summaryTable,"Max_vol","FLOAT")
	if "Volume_Mm3" in field_names: #field name changed in a later version
		arcpy.AlterField_management(summaryTable, "Volume_Mm3", 'Volume_10e6m3')
	cur = arcpy.InsertCursor(summaryTable)
	row = cur.newRow()
	if str(out_basename_validated) == str(out_basename):
		row.setValue('Name', out_basename)
	else:
		row.setValue('Name', '{} ({})'.format(out_basename,out_basename_validated))
	if saveInGDB:
		row.setValue('Volume_10e6m3', (volume/1000000))
		row.setValue('Reach_angle', scheidegger)
	else:
		row.setValue('Volume', (volume/1000000))
		row.setValue('Reach_angl', scheidegger)
	row.setValue('Max_thick', np.amax(grid_thickn))
	row.setValue('Avg_thick', np.mean(grid_thickn[grid_mask]))
	row.setValue('Iterations', nb_iter)
	row.setValue('Cell_size', cellSize)
	row.setValue('Tolerance', tol)
	if np.isinf(maxt):
		row.setValue('Max_depth', -1)
	else:
		row.setValue('Max_depth', maxt)
	if np.isinf(maxv):
		row.setValue('Max_vol', -1)
	else:
		row.setValue('Max_vol', maxv)
	row.setValue('Method', criteria)
	if saveInGDB:
		row.setValue('Stop_criterion', stop)
		row.setValue('Nb_neighbours', int(nb_neigh))
		row.setValue('Not_deepening', not_deepen)
	else:
		row.setValue('Stop_crite', stop)
		row.setValue('Nb_neighbo', int(nb_neigh))
		row.setValue('Not_deepen', not_deepen)
	row.setValue('Inverse', inverse)
	row.setValue('Date', datetime.datetime.today())
	cur.insertRow(row)
Beispiel #19
0
#set featureClasses  (ContactsAndFaults, OrientationPoints, GeologicLines)
caf = getCaf(inFds)
if inFds.find('CorrelationOfMapUnits') == -1:
    gel = caf.replace('ContactsAndFaults','GeologicLines')
    orp = caf.replace('ContactsAndFaults','OrientationPoints')
    mup = caf.replace('ContactsAndFaults','MapUnitPolys')
    fields = ['Type','IsConcealed','LocationConfidenceMeters','ExistenceConfidence','IdentityConfidence','Symbol']
else:  # is CMU
    caf = ''
    mup = inFds+'/CMUMapUnitPolys'
    gel = ''
    orp = ''
    fields = ['Type']
dmu = os.path.dirname(inFds)+'/DescriptionOfMapUnits'
    
addMsgAndPrint('  Feature dataset '+inFds+', isLocked = '+str(arcpy.TestSchemaLock(inFds)))

for fc in caf,gel:
    if arcpy.Exists(fc):
      if numberOfRows(fc) > 0:
        addMsgAndPrint('  processing '+os.path.basename(fc))
        hasRep, repDomain = hasCartoRep(inFds,fc)
        if hasRep:
            fields.append('RuleID1')
            repRuleDict = buildRepRuleDict(repDomain)
        addMsgAndPrint('fields = '+str(fields))
        with arcpy.da.UpdateCursor(fc, fields) as cursor:
            for row in cursor:
                rowChanged = False
                typ = row[0]
                isCon = row[1]
Beispiel #20
0
def calculate(xmlFileName, workspace, name, ignore):

    dla.workspace = workspace
    success = True
    arcpy.ClearWorkspaceCache_management(dla.workspace)
    xmlDoc = dla.getXmlDoc(xmlFileName)
    dla.addMessage("Field Calculator: " + xmlFileName)
    arcpy.env.Workspace = dla.workspace
    table = dla.getTempTable(name)

    if not arcpy.Exists(table):
        dla.addError("Feature Class " + table + " does not exist, exiting")
        arcpy.SetParameter(SUCCESS, False)
        return
    if not arcpy.TestSchemaLock(table):
        dla.addError("Unable to obtain a schema lock for " + table +
                     ", exiting")
        arcpy.SetParameter(SUCCESS, False)
        return -1

    desc = arcpy.Describe(table)
    fields = dla.getXmlElements(xmlFileName, "Field")
    sourceFields = dla.getXmlElements(xmlFileName, "SourceField")
    targetFields = dla.getXmlElements(xmlFileName, "TargetField")
    attrs = [f.name for f in arcpy.ListFields(table)]
    target_values = CaseInsensitiveDict()

    #Fix read into dict, using NM as key
    # at this point just getting the list of all target field names/types/lengths
    for target in targetFields:
        nm = target.getAttributeNode("Name").nodeValue
        target_values[nm] = dict(
            ftype=target.getAttributeNode("Type").nodeValue,
            flength=target.getAttributeNode("Length").nodeValue)

    for field in fields:
        arcpy.env.Workspace = dla.workspace
        targetName = dla.getNodeValue(field, "TargetName")
        sourceName = dla.getNodeValue(field, "SourceName")

        ftype = "String"
        flength = "50"
        if targetName in target_values:
            ftype = target_values[targetName]['ftype']
            flength = target_values[targetName]['flength']

        # make sure the field exists in the field calculator dataset, this will include all source and target fields.
        retcode = dla.addDlaField(table, targetName, field, attrs, ftype,
                                  flength)
        if retcode == False:
            addError("Unable to add field " + targetName +
                     " to database to calculate values, exiting")

    allFields = sourceFields + targetFields  # this should be the same as the dataset fields at this point
    desc = arcpy.Describe(table)
    layerNames = []
    names = []
    ftypes = []
    lengths = []
    ignore = dla.getIgnoreFieldNames(
        desc
    )  # gdb system fields that will be handled automatically and cannot be calculated
    ignore = [nm.upper() for nm in ignore]

    for field in desc.fields:  # get the uppercase names for everything that exists in the dataset
        if field.name.upper() not in ignore:
            layerNames.append(field.name.upper())

    for field in allFields:  # loop through everything that might exist
        nm = field.getAttributeNode("Name").nodeValue.replace(
            '.', '_')  #  handle joins and remaining . in field names
        if nm != dla._noneFieldName and nm.upper() not in ignore and nm.upper(
        ) in layerNames:  # ignore the None and ignore fields and names not in the dataset
            idx = dla.getFieldIndexList(names, nm)
            if idx is None:  # if the name is not already in the list
                names.append(nm)
                typ = field.getAttributeNode("Type").nodeValue
                leng = field.getAttributeNode("Length").nodeValue
                ftypes.append(typ)
                lengths.append(leng)

            #FIXME : Steve, was not sure why you were capturing an error here, and then doing something # from Steve - was looking for names that actually exist in the dataset and are not gdb system fields. No guarantee Xml matches dataset
            #try:
            #names.index(nm)
            #except:
            #names.append(nm)
            #typ = field.getAttributeNode("Type").nodeValue
            #leng = field.getAttributeNode("Length").nodeValue
            #ftypes.append(typ)
            #lengths.append(leng)
    retVal = setFieldValues(table, fields, names, ftypes, lengths)
    if retVal == False:
        success = False
    arcpy.ClearWorkspaceCache_management(dla.workspace)
    dla.cleanupGarbage()

    arcpy.ResetProgressor()
    if ignore == True:
        success = True
    return success
Beispiel #21
0
def publish(xmlFileNames):
    # function called from main or from another script, performs the data update processing
    global _useReplaceSettings
    dla._errCount = 0

    arcpy.SetProgressor("default","Data Assistant")
    arcpy.SetProgressorLabel("Data Assistant")
    xmlFiles = xmlFileNames.split(";")
    layers = []

    for xmlFile in xmlFiles: # multi value parameter, loop for each file
        xmlFile = dla.getXmlDocName(xmlFile)
        dla.addMessage("Configuration file: " + xmlFile)
        xmlDoc = dla.getXmlDoc(xmlFile) # parse the xml document
        if xmlDoc == None:
            return
        prj = dla.setProject(xmlFile,dla.getNodeValue(xmlDoc,"Project"))
        if prj == None:
            dla.addError("Unable to open your project, please ensure it is in the same folder as your current project or your Config file")
            return False

        source = dla.getDatasetPath(xmlDoc,"Source")
        target = dla.getDatasetPath(xmlDoc,"Target")
        targetName = dla.getDatasetName(target)
        dla.addMessage(source)
        dla.addMessage(target)

        if dlaService.checkLayerIsService(source) or dlaService.checkLayerIsService(target):
            token = dlaService.getSigninToken() # when signed in get the token and use this. Will be requested many times during the publish
            # exit here before doing other things if not signed in
            if token == None:
                dla.addError("User must be signed in for this tool to work with services")
                return False

        expr = getWhereClause(xmlDoc)
        if _useReplaceSettings == True and (expr == '' or expr == None):
            dla.addError("There must be an expression for replacing by field value, current value = " + str(expr))
            return False

        errs = False
        if dlaService.validateSourceUrl(source) == False:
            dla.addError("Source path does not appear to be a valid feature layer")
            errs = True

        if _useReplaceSettings == True:
            if dlaService.validateTargetReplace(target) == False:
                dla.addError("Target path does not have correct privileges")
                errs = True
        elif _useReplaceSettings == False:
            if dlaService.validateTargetAppend(target) == False:
                dla.addError("Target path does not have correct privileges")
                errs = True

        if errs:
            return False


        dla.setWorkspace()

        if dla.isTable(source) or dla.isTable(target):
            datasetType = 'Table'
        else:
            datasetType = 'FeatureClass'

        if not dla.isStaged(xmlDoc):
            res = dlaExtractLayerToGDB.extract(xmlFile,None,dla.workspace,source,target,datasetType)
            if res != True:
                table = dla.getTempTable(targetName)
                msg = "Unable to export data, there is a lock on existing datasets or another unknown error"
                if arcpy.TestSchemaLock(table) != True and arcpy.Exists(table) == True:
                    msg = "Unable to export data, there is a lock on the intermediate feature class: " + table
                dla.addError(msg)
                print(msg)
                return
            else:
                res = dlaFieldCalculator.calculate(xmlFile,dla.workspace,targetName,False)
                if res == True:
                    dlaTable = dla.getTempTable(targetName)
                    res = doPublish(xmlDoc,dlaTable,target,_useReplaceSettings)
        else:
            dla.addMessage('Data previously staged, will proceed using intermediate dataset')
            dlaTable = dla.workspace + os.sep + dla.getStagingName(source,target)
            res = doPublish(xmlDoc,dlaTable,target,_useReplaceSettings)
            if res == True:
                dla.removeStagingElement(xmlDoc)
                xmlDoc.writexml(open(xmlFile, 'wt', encoding='utf-8'))
                dla.addMessage('Staging element removed from config file')

        arcpy.ResetProgressor()
        if res == False:
            err = "Data Assistant Update Failed, see messages for details"
            dla.addError(err)
            print(err)
        else:
            layers.append(target)

    arcpy.SetParameter(_outParam,';'.join(layers))
Beispiel #22
0
def testSchemaLock(dataset):
    # test if a schema lock is possible
    res = arcpy.TestSchemaLock(dataset)
    return res
Beispiel #23
0
        # set final output to shapefile if input is shapefile and make the new field name compatible with the database type
        #
        # need to test shapefile option some more
        #
        if inputFC.endswith(".shp"):
            comFC2 = comFC2 + ".shp"
            fld1Name = fld1Name[0:10]

        if arcpy.Exists(comFC2):
            # Having problems removing this when two successive runs are made with this tool.
            #
            try:
                sleep(3)

                if arcpy.TestSchemaLock(comFC2):
                    arcpy.Delete_management(comFC2)

                else:
                    raise MyError, "Unable to overwrite existing featureclass '" + comFC2+ "' (schemalock)"

            except:
                errorMsg()
                raise MyError, "Unable to overwrite existing featureclass '" + comFC2

        if arcpy.Exists(comFC):
            # Having problems removing this when two successive runs are made with this tool.
            #
            try:
                sleep()
    orp = caf.replace('ContactsAndFaults', 'OrientationPoints')
    mup = caf.replace('ContactsAndFaults', 'MapUnitPolys')
    fields = [
        'Type', 'IsConcealed', 'LocationConfidenceMeters',
        'ExistenceConfidence', 'IdentityConfidence', 'Symbol'
    ]
else:  # is CMU
    caf = ''
    mup = inFds + '/CMUMapUnitPolys'
    gel = ''
    orp = ''
    fields = ['Type']
dmu = os.path.dirname(inFds) + '/DescriptionOfMapUnits'

addMsgAndPrint('  Feature dataset ' + inFds + ', isLocked = ' +
               str(arcpy.TestSchemaLock(inFds)))

for fc in caf, gel:
    if arcpy.Exists(fc):
        if numberOfRows(fc) > 0:
            addMsgAndPrint('  processing ' + os.path.basename(fc))
            hasRep, repDomain = hasCartoRep(inFds, fc)
            if hasRep:
                fields.append('RuleID1')
                repRuleDict = buildRepRuleDict(repDomain)
            addMsgAndPrint('fields = ' + str(fields))
            with arcpy.da.UpdateCursor(fc, fields) as cursor:
                for row in cursor:
                    rowChanged = False
                    typ = row[0]
                    isCon = row[1]
                               field_names=['name', 'version',
                                            'date']) as cursor:
        date = datetime.datetime.now()
        date_string = str(date).split(' ')[0]
        cursor.insertRow(('attribute rules', version, date_string))


if __name__ == '__main__':
    '''Main entry point for program. Parse arguments and pass to engine module
    '''
    args = docopt(__doc__, version=VERSION)

    sde = get_sde_path_for(args['--env'])
    print('acting on {}'.format(sde))

    if not arcpy.TestSchemaLock(os.path.join(sde, facility.TABLE)):
        print(
            'Unable to reach the database or acquire the necessary schema lock to add rules'
        )
        exit(0)

    if args['update']:
        for rule in get_rules(sde, args['--rule']):
            rule.execute()

        update_version(sde, VERSION)
    elif args['delete']:
        for rule in get_rules(sde, args['--rule']):
            rule.delete()

    arcpy.management.ClearWorkspaceCache(sde)
Beispiel #26
0
def ReplaceIntData(config_file, log_file_location):
    # --------------------------------
    # Setup
    # check for log file location & set up logging
    if not os.path.isdir(log_file_location):
        os.mkdir(log_file_location)
    try:
        configlogging.ConfigLogging(log_file_location)
    except:
        print('log file error')
        sys.exit(1)
    logger = logging.getLogger('logger')

    # set to overwrite
    arcpy.env.overwriteOutput = True

    # check for config file
    if os.path.isfile(config_file):
        if not os.path.splitext(config_file)[1] == ".ini":
            error = "Configuration file is not a *.ini file " + str(
                config_file)
            logger.error(error)
            return (False, error)

        # initialize config parser object
        config = ConfigParser.ConfigParser()

        # read config file
        config.read(config_file)
    else:
        error = "Configuration file " + str(
            config_file) + " not found or is not a *.ini file. Exiting."
        logger.error(error)
        return (False, error)

    # --------------------------------
    # Get configurations from file for data sources, SQL & datum transformation
    logger.debug("Reading configuration file for... " + str(config_file))
    # p_val = config.get('SECTION','p_name') from config script
    try:
        inputFeatureClass = config.get('LOCAL_DATA', 'INPUTFC')  ## required
        outputFileGdb = config.get('LOCAL_DATA', 'OUTPUTFGDB')  ## required
        outputFeatureClass = config.get('LOCAL_DATA', 'OUTPUTFC')  ## required

        srcOfData = config.get('SQL_PARAMETERS', 'SOURCE')  ## required
        addlSql = config.get('SQL_PARAMETERS', 'ADDLSQL')  ## not required

        datumTransformation = config.get('SPATIAL_REF',
                                         'DATUMTRANS')  ## required
    except ConfigParser.Error as cperror:
        error = "Config parser error " + str(cperror)
        logger.error(error)
        return (False, error)

    # check for required inputs
    if not arcpy.Exists(inputFeatureClass):
        error = str(inputFeatureClass
                    ) + " does not exist. Run the config tool. Exiting."
        logger.error(error)
        return (False, error)

    if not arcpy.Exists(outputFileGdb):
        error = str(
            outputFileGdb) + " does not exist. Run the config tool. Exiting."
        logger.error(error)
        return (False, error)

    if not arcpy.Exists(outputFeatureClass):
        error = str(outputFeatureClass
                    ) + " does not exist. Run the config tool. Exiting."
        logger.error(error)
        return (False, error)

    if srcOfData == '':
        error = "Please specify an SOD/source of data by running the config tool. Exiting."
        logger.error(error)
        return (False, error)

    if datumTransformation == '':
        error = "Please specify a datum transformation by running the config tool. Exiting."
        logger.error(error)
        return (False, error)

    # get feature class base names for use in logging
    descInput = arcpy.Describe(inputFeatureClass)
    inputName = descInput.baseName
    descOutput = arcpy.Describe(outputFeatureClass)
    outputName = descOutput.baseName

    # build list of fields to check input data for SOD field
    sod = "SOD"
    fieldNamesInput = [
        f.name for f in arcpy.ListFields(inputFeatureClass, sod)
    ]

    # fail if SOD field does not exist in source
    if not sod in fieldNamesInput:
        error = inputName + "| Failed: check for existence of SOD field"
        logger.error(error)
        return (False, error)

    # fail if cannot acquire a schema lock for the output feature class
    if not arcpy.TestSchemaLock(outputFeatureClass):
        error = "Failed: cannot obtain schema lock for {0}".format(outputName)
        logger.error(error)
        return (False, error)

    #---------------------------------
    # Field mapping: create dictionary from all field name pairs ("options" & "values")
    #   in field mapper "section" of config file
    #
    # http://stackoverflow.com/questions/8578430/python-config-parser-to-get-all-the-values-from-a-section
    # http://resources.arcgis.com/en/help/main/10.2/index.html#//002z00000014000000

    fldMapDict = dict(config.items('FIELD_MAPPER'))

    # Check for fields that are too long
    logger.debug("Checking field lengths")
    outcome = validatefields.ValidateFields(inputFeatureClass,
                                            outputFeatureClass, fldMapDict)
    if not outcome[0]:
        error = inputName + " | Failed: validate fields | " + outcome[1]
        logger.error(error)
        return (False, error)
    else:
        if not outcome[1] == "":
            logger.warning(inputName + " | " + outcome[1])
        logger.debug("Succeeded: validate fields")

    #--------------------------
    # Pass dictionary of field maps to field mapper function, return fieldMappings object
    logger.debug("Field mapping")
    outcome = createfm.CreateFieldMapping(inputFeatureClass,
                                          outputFeatureClass, fldMapDict)
    if not outcome[0]:
        error = inputName + " | Failed: field mapping | " + outcome[1]
        logger.error(error)
        return (False, error)
    else:
        logger.debug("Succeeded: field mapping  " + outcome[1])
        fieldMappings = outcome[2]

    #--------------------------
    # Delete all records in output feature class using truncate function
    logger.debug("Truncating table")
    outcome = truncatetable.Truncate(outputFeatureClass)
    if not outcome[0]:
        error = outputName + " | Failed: truncate table | " + outcome[1]
        logger.error(error)
        return (False, error)
    else:
        logger.debug("Succeeded: truncate table")

    #--------------------------
    # Set environments for append so it will project using datum transformation
    #   append respects the output coordinate system, so does the projection for us
    arcpy.env.workspace = outputFileGdb
    arcpy.env.geographicTransformations = datumTransformation

    #--------------------------
    # Build SQL query with correct delimiters; "SOD" is "source of data" field name
    logger.debug("Building SQL expression")
    outcome = buildsql.BuildSQL(inputFeatureClass, sod, srcOfData)
    whereClause = outcome[1]
    if not addlSql == '':
        whereClause = whereClause + ' ' + addlSql
    logger.debug(inputName + " | Final SQL expression: " + whereClause)

    #--------------------------
    # Append new records to output feature class using append function
    #   Fields which don't match the output schema will be mapped from above function
    #   Fields which exist in the input but not output schema will be dropped
    #   Fields that match will be automatically mapped
    logger.debug("Appending data")
    outcome = appenddata.AppendData(inputFeatureClass, outputFeatureClass,
                                    whereClause, fieldMappings)
    if not outcome[0]:
        error = inputName + " | Failed: append data | " + outcome[1]
        logger.error(error)
        return (False, error)
    logger.debug("Succeeded: append data")

    #--------------------------
    # Check output feature class for unique ids, create message if there are issues
    logger.debug("Checking for unique IDs")
    idFieldName = 'SID'

    # Build field names list to check for SID field in output data
    fieldNamesOutput = [
        f.name for f in arcpy.ListFields(outputFeatureClass, idFieldName)
    ]

    if idFieldName in fieldNamesOutput:
        outcome = uniqueid.UniqueId(outputFeatureClass, idFieldName)
        if not outcome[0]:
            error = outputName + " | Failed: validate ids | " + outcome[1]
            logger.error(error)
            return (False, error)
        if not outcome[1] == "":
            logger.warning(outputName + " | " + outcome[1])
    else:
        logger.debug(outputName + " | No SID field; skipped.")
    logger.debug("Checked for unique IDs")

    #--------------------------
    # Repair output feature class geometry
    logger.debug("Repairing geometry")
    try:
        arcpy.RepairGeometry_management(outputFeatureClass)
    except Exception as e:
        logger.warning(outputName + " | Failed: repair geometry")
        logger.warning(str(e))
    logger.debug("Repaired geometry")

    #--------------------------
    # Calculate Lat Long fields
    if arcpy.Describe(outputFeatureClass).shapeType == "Point":
        logger.debug("Calculating lat and long fields")
        outcome = calclatlong.CalcLatLong(outputFeatureClass)
        if not outcome[0]:
            error = outputName + " | Failed: calculate lat and long | " + outcome[
                1]
            logger.error(error)
            return (False, error)
        logger.debug("Succeeded: calculate lat and long fields")

    #--------------------------
    # Strip unwanted metadata and geoprocessing history
    logger.debug("Removing unwanted metadata and gp history")
    outcome = removemetadata.RemoveUnwantedMetadata(outputFileGdb,
                                                    outputFeatureClass)
    if not outcome[0]:
        error = outputName + " | Failed: remove metadata. | " + outcome[1]
        logger.error(error)
        return (False, error)
    logger.debug("Succeeded: removed unwanted metadata and gp history")

    return (True, "Succeeded")
def publish(xmlFileNames):
    # function called from main or from another script, performs the data update processing
    global sourceLayer, targetLayer, _success
    dla._errorCount = 0

    arcpy.SetProgressor("default", "Data Assistant")
    arcpy.SetProgressorLabel("Data Assistant")
    xmlFiles = xmlFileNames.split(";")
    for xmlFile in xmlFiles:  # multi value parameter, loop for each file
        dla.addMessage("Configuration file: " + xmlFile)
        xmlDoc = dla.getXmlDoc(xmlFile)  # parse the xml document
        if xmlDoc == None:
            return
        svceS = False
        svceT = False
        if sourceLayer == "" or sourceLayer == None:
            sourceLayer = dla.getNodeValue(xmlDoc, "Source")
            svceS = dla.checkLayerIsService(sourceLayer)
        if targetLayer == "" or targetLayer == None:
            targetLayer = dla.getNodeValue(xmlDoc, "Target")
            svceT = dla.checkLayerIsService(targetLayer)

        dla.addMessage(targetLayer)
        ## Added May2016. warn user if capabilities are not correct, exit if not a valid layer
        if not dla.checkServiceCapabilities(sourceLayer, True):
            return False
        if not dla.checkServiceCapabilities(targetLayer, True):
            return False

        if svceS == True or svceT == True:
            token = dla.getSigninToken(
            )  # when signed in get the token and use this. Will be requested many times during the publish
            if token == None:
                dla.addError(
                    "User must be signed in for this tool to work with services"
                )
                return

        expr = getWhereClause(xmlDoc)
        if useReplaceSettings == True and (expr == '' or expr == None):
            dla.addError(
                "There must be an expression for replacing by field value, current value = "
                + str(expr))
            return False

        dla.setWorkspace()
        targetName = dla.getTargetName(xmlDoc)
        res = dlaExtractLayerToGDB.extract(xmlFile, None, dla.workspace,
                                           sourceLayer, targetName)
        if res != True:
            table = dla.getTempTable(targetName)
            msg = "Unable to export data, there is a lock on existing datasets or another unknown error"
            if arcpy.TestSchemaLock(table) != True:
                msg = "Unable to export data, there is a lock on the intermediate feature class: " + table
            dla.addError(msg)
            print(msg)
            return
        else:
            res = dlaFieldCalculator.calculate(xmlFile, dla.workspace,
                                               targetName, False)
            if res == True:
                dlaTable = dla.getTempTable(targetName)
                res = doPublish(xmlDoc, dlaTable, targetLayer)

        arcpy.ResetProgressor()
        sourceLayer = None  # set source and target back to None for multiple file processing
        targetLayer = None
        if res == False:
            err = "Data Assistant Update Failed, see messages for details"
            dla.addError(err)
            print(err)
Beispiel #28
0
def append(object, source_object, target_object, config):
    logger = config['LOGGING']['logger']

    needs_spatial_index = False
    if arcpy.Describe(target_object).datasetType == "FeatureClass":
        needs_spatial_index = True
        
    if arcpy.Exists(source_object):
        if arcpy.Exists(target_object):
                
            # Räumlichen Index entfernen
            if needs_spatial_index and arcpy.Describe(target_object).hasSpatialIndex:
                logger.info("Spatial Index wird entfernt.")
                if arcpy.TestSchemaLock(target_object):
                    arcpy.RemoveSpatialIndex_management(target_object)
                    logger.info("Spatial Index erfolgreich entfernt.")
                else:
                    logger.warn("Spatial Index konnte wegen eines Locks nicht entfernt werden.")
                
            logger.info("Truncating " + target_object)
            arcpy.TruncateTable_management(target_object)
                
            logger.info("Appending " + source_object)
            arcpy.Append_management(source_object, target_object, "TEST")
                
            # Räumlichen Index erstellen
            if needs_spatial_index:
                logger.info("Spatial Index wird erstellt.")
                if arcpy.TestSchemaLock(target_object):
                    logger.info("Grid Size wird berechnet.")
                    grid_size = calculate_grid_size(source_object)
                    logger.info("Grid Size ist: " + unicode(grid_size))
                    if grid_size > 0:
                        arcpy.AddSpatialIndex_management(target_object, grid_size)
                    else:
                        arcpy.AddSpatialIndex_management(target_object)
                    logger.info("Spatial Index erfolgreich erstellt.")
                else:
                    logger.warn("Spatial Index konnte wegen eines Locks nicht erstellt werden.")
                
            logger.info("Zähle Records in der Quelle und im Ziel.")
            source_count = int(arcpy.GetCount_management(source_object)[0])
            logger.info("Anzahl Records in der Quelle: " + unicode(source_count))
            target_count = int(arcpy.GetCount_management(target_object)[0])
            logger.info("Anzahl Records im Ziel: " + unicode(target_count))
                
            if source_count==target_count:
                logger.info("Anzahl Records identisch")
            else:
                logger.error("Anzahl Records nicht identisch. Ebene " + object)
                logger.error("Import wird abgebrochen.")
                avLader.helpers.helper.delete_connection_files(config, logger)
                sys.exit() 
        else:
            logger.error("Ziel-Objekt " + target_object + " existiert nicht.")
            logger.error("Import wird abgebrochen.")
            avLader.helpers.helper.delete_connection_files(config, logger)
            sys.exit()
    else:
        logger.error("Quell-Objekt " + source_object + " existiert nicht.")
        logger.error("Import wird abgebrochen.")
        avLader.helpers.helper.delete_connection_files(config, logger)
        sys.exit()
HANDLER.setFormatter(FORMATTER)
LOGGER.addHandler(HANDLER)
LOGGER.setLevel(LOGLEVEL)
LOGGER.debug("------- START LOGGING-----------")
# Use the default arcpy.AddMessage method to only show this in the tool output
# window, otherwise we will log it to the log file too.
arcpy.AddMessage("Your Log file is: " + LOGFILE)

# Put everything in a try/finally statement, so that we can close the logger
# even if the script bombs out or we raise an execution error along the line
# See http://www.tutorialspoint.com/python/python_exceptions.htm
try:
    # Start the process by first running some sanity checks
    # Check if we can obtain a schema lock - adapted from
    # https://pro.arcgis.com/en/pro-app/arcpy/functions/testschemalock.htm
    if not arcpy.TestSchemaLock(HAZAREA_FC):
    # Warn the user that the required schema lock could not be obtained.
        LOGGER.error("Unable to acquire the necessary schema lock on {0} \
                       ".format(HAZAREA_FC))
        raise arcpy.ExecuteError

    # Check if the feature class has any features before we start
    if int(arcpy.GetCount_management(HAZAREA_FC)[0]) == 0:
        LOGGER.error("{0} has no features. Please use a feature class that \
                      already contains the required features and attributes." \
                      .format(HAZAREA_FC))
        raise arcpy.ExecuteError

    # Define an empty list to hold the lists of fields and their parameters
    ARRAY_FIELDS = []
Beispiel #30
0
def dipNumbers(gdb,mapScaleDenominator):
    if not arcpy.Exists(gdb+'/GeologicMap/OrientationPoints'):
        addMsgAndPrint('  Geodatabase '+os.path.basename(gdb)+' lacks feature class OrientationPoints.')
        return

    desc = arcpy.Describe(gdb+'/GeologicMap/OrientationPoints')
    mapUnits = desc.spatialReference.linearUnitName
    if 'meter' in mapUnits.lower():
        mapUnitsPerMM = float(mapScaleDenominator)/1000.0
    else:
        mapUnitsPerMM = float(mapScaleDenominator)/1000.0 * 3.2808

    if numberOfRows(gdb+'/GeologicMap/OrientationPoints') == 0:
        addMsgAndPrint('  0 rows in OrientationPoints.')
        return

    ## MAKE ORIENTATIONPOINTLABELS FEATURE CLASS
    arcpy.env.workspace = gdb+'/GeologicMap'
    OPL = gdb+'/GeologicMap/'+OPLName
    if arcpy.TestSchemaLock(OPL) == False:
        addMsgAndPrint('    TestSchemaLock('+OPLName+') = False.')
        #raise arcpy.ExecuteError
        #return
        #pass
       
    testAndDelete(OPL)
    arcpy.CreateFeatureclass_management(gdb+'/GeologicMap',OPLName,'POINT')
    arcpy.AddField_management(OPL,'OrientationPointsID','TEXT',"","",50)
    arcpy.AddField_management(OPL,'Inclination','TEXT',"","",3)
    arcpy.AddField_management(OPL,'PlotAtScale','FLOAT')

    ## ADD FEATURES FOR ROWS IN ORIENTATIONPOINTS WITHOUT 'HORIZONTAL' OR 'VERTICAL' IN THE TYPE VALUE
    OPfields = ['SHAPE@XY','OrientationPoints_ID','Type','Azimuth','Inclination','PlotAtScale']
    attitudes = arcpy.da.SearchCursor(gdb+'/GeologicMap/OrientationPoints',OPfields)
    OPLfields = ['SHAPE@XY','OrientationPointsID','Inclination','PlotAtScale']
    inclinLabels = arcpy.da.InsertCursor(OPL,OPLfields)
    for row in attitudes:
        oType = row[2]
        if showInclination(oType):
            x = row[0][0]
            y = row[0][1]
            OP_ID = row[1]
            azi = row[3]
            inc = int(round(row[4]))
            paScale = row[5]
            if isPlanar(oType):
                geom = ' S '
                inclinRadius = 2.4 * mapUnitsPerMM
                azir = math.radians(azi)
            else: # assume linear
                geom = ' L '
                inclinRadius = 7.4 * mapUnitsPerMM
                azir = math.radians(azi - 90)
            ix = x + math.cos(azir)*inclinRadius
            iy = y - math.sin(azir)*inclinRadius

            addMsgAndPrint( '    inserting '+oType+geom+str(int(round(azi)))+'/'+str(inc))
            inclinLabels.insertRow(([ix,iy],OP_ID,inc,paScale))
            
    del inclinLabels
    del attitudes

    ## INSTALL NEWLY-MADE FEATURE CLASS USING .LYR FILE. SET DATA SOURCE. SET DEFINITION QUERY    

    #make copy of .lyr file
    newLyr = os.path.dirname(gdb)+'/NewOrientationPointLabels.lyr'
    shutil.copy(os.path.dirname(sys.argv[0])+'/../Resources/OrientationPointLabels.lyr',newLyr)
    OPLyr = arcpy.mp.Layer(newLyr)
    ## reset data source
    addMsgAndPrint('   gdb = '+gdb)
    if gdb[-3:].lower() == 'gdb':
        wsType = 'FILEGDB_WORKSPACE'
    elif gdb[-3:].lower() == 'mdb':
        wsType = 'ACCESS_WORKSPACE'
    else:
        addMsgAndPrint('Workspace type not recognized.')
        forceExit()
    #### note how we don't include the feature dataset in the workspace 
    OPLyr.replaceDataSource(gdb,wsType,'OrientationPointLabels' )
    ## set definition query
    pasName = arcpy.AddFieldDelimiters(gdb,'PlotAtScale')
    defQuery = pasName+' >= '+str(mapScaleDenominator)
    OPLyr.definitionQuery = defQuery

    # Insert new OrientationPointLabels.lyr
    try:
        lyr,df,refLyr,insertPos = findLyr(gdb+'\GeologicMap\OrientationPoints')
        arcpy.mp.InsertLayer(df,lyr,OPLyr,'BEFORE')
    except:
        addMsgAndPrint('  Unable to insert OrientationPointLabels.lyr.')