Beispiel #1
0
##output location of the composite rasters for part 2 of script
outPathcomp = path + "postprocessOutput\\rasters\\composites\\"

#input location of the rasters for part 3 of script
rasDir2 = path + "postprocessOutput\\rasters\\composites\\"

##Location of the MXD document that will be created and edited
mapDoc = path + "imgCreation.mxd"
mxd = mp.MapDocument(mapDoc)

##Output location of the raster layers created
outlyrDir = path + "postprocessOutput\\rasters\\layers\\"

##Set up the spatial refernce for the file
spatialRef = arcpy.SpatialReference(
    32618
)  # 32618 is code for WGS_1984_UTM_Zone_18N 102387 is code for NAD_1983_2011_UTM_Zone_18N
print "Beginning third step of script processing"

#Set up blank raster to use
outBlank = path + "postprocessOutput\\rasters\\blankRaster\\blankRaster.tif"
print "Creating blank raster"
##blankOut = Con((Raster(path+preRasdir+"\\FM_1_Q1_Depth.tif") == 2),0,0)
##blankOut.save(outBlank)

##List the files in the ensemble directory
dirList2 = os.listdir(rasDir)

##List the files found in dirList with their full pathname
fileList3 = [rasDir + "\\" + filename3 for filename3 in dirList2]
Beispiel #2
0
import arcpy
import sys
import numpy
from pandas import *

sr = arcpy.SpatialReference(2881)

workdir = r"//ad.sfwmd.gov/dfsroot/data/wsd/GIS/GISP_2012/DistrictAreaProj/CFWI/Data/Soils/"
Myworkspace = workdir + "ProcessDir.gdb"

ModelMesh =  r"\\ad.sfwmd.gov\dfsroot\data\wsd\GIS\GISP_2012\DistrictAreaProj\CFWI\Data\Soils\ECFTX_GRID_V3.shp"
SoilGroups = workdir + "ECFTXsoilMu.shp"
SoilModelMesh = Myworkspace + "/SoilModelMesh"
maxAreaSoilMesh = Myworkspace + "/maxAreaSoilMesh"
InterceptFeatures = ModelMesh + " #;" + SoilGroups + " #"
arcpy.Intersect_analysis(in_features=InterceptFeatures,out_feature_class=SoilModelMesh,join_attributes="ALL",cluster_tolerance="#",output_type="INPUT")
                            
arcpy.AddGeometryAttributes_management(Input_Features=SoilModelMesh,Geometry_Properties="AREA",Length_Unit="FEET_US",Area_Unit="SQUARE_FEET_US",Coordinate_System=sr)
                                                        
arcpy.Dissolve_management(in_features=SoilModelMesh,out_feature_class=maxAreaSoilMesh,dissolve_field="SEQNUM;MUKEY",statistics_fields="POLY_AREA SUM",multi_part="MULTI_PART",unsplit_lines="DISSOLVE_LINES")
              
Soil_Mesh = maxAreaSoilMesh
Soil_Mesh_lyr = arcpy.MakeFeatureLayer_management(Soil_Mesh, "Soil_Mesh_lyr")
Soil_Mesh_nparr = arcpy.da.FeatureClassToNumPyArray(Soil_Mesh_lyr, ['OBJECTID','SEQNUM','MUKEY','SUM_POLY_AREA'])
Soil_Mesh_df = DataFrame(Soil_Mesh_nparr, columns=['SEQNUM','MUKEY','SUM_POLY_AREA'])
Soil_Mesh_df.columns=['SEQNUM','MUKEY','SOIL_AREA']
maxAreabySeq = Soil_Mesh_df.sort(['SOIL_AREA'],ascending=False).groupby(['SEQNUM'], as_index=False).first()        

Soilcsv = workdir + "predominanteSoil.csv"
maxAreabySeq.to_csv(Soilcsv,index=False)
# IterateToShp.py
# Created on: 2012-04-24 10:40:13.00000
# Created by: Kyle Balke - Senior GIS Analyst
# Description: This script takes a provider's coverage feature class and prepares it for use on the Broadband Editing App.  The input feature class is re-project to WGS84 (required by the editing app),
#              calculates a temporary field (TOTMaxAddDownTemp field), and then loops through each unique TOTMaxAddDownTemp value which is exported to shapefile.  The final shapefiles are compress into individual zip files.
# ---------------------------------------------------------------------------

# Import arcpy module
import arcpy, os, sys, string, zipfile, zlib
arcpy.env.overwriteOutput = True

# Local variables:
InputDataset = arcpy.GetParameterAsText(0)
DataPath = r"Z:\Broadband\BBND\Provider_Update\201409"
fieldName = "TOTMaxAddDownTemp"
outCS = arcpy.SpatialReference(4326)

# Process: Get the InputDataset File Name
InputFileName = os.path.basename(InputDataset).rstrip(
    os.path.splitext(InputDataset)[1])
OutputProject = DataPath + "\\Scratch.gdb\\" + InputFileName + "_Project"
OutputShapefile = DataPath + os.path.sep + "Shapefiles" + os.path.sep
OutputZipFiles = DataPath + os.path.sep + "ZipFiles" + os.path.sep
OutputFCLocation = DataPath + "\\NDUpdate20140930.gdb\\Provider_Coverage\\"
OutTable = DataPath + "\\NDUpdate20140930.gdb\\" + "tbl_CheckGeo_" + InputFileName

# Create a copy of the input FeatureClass
arcpy.FeatureClassToFeatureClass_conversion(InputDataset, OutputFCLocation,
                                            InputFileName, "")

# Process: Project the Input Dataset to WGS84
Beispiel #4
0
# import system module
import arcpy
from arcpy import env
env.workspace = "C:/data/output"

# Set coordinate system of the output fishnet
env.outputCoordinateSystem = arcpy.SpatialReference("_UTMZone_")

# Source for the output feature class
outFeatureClass = "C:/Users/User1/Desktop/fishnet.shp"

# Set the origin
originCoordinate = 'LeftCoordinate BottomCoordinate'

# Set the orientation
yAxisCoordinate = 'LeftCoordinate BottomCoordinate'

# Dimensions for each point (ie. distance between DTM points)
cellSizeWidth = '100'
cellSizeHeight = '100'

# Number of rows and columns together with origin and opposite corner can determine the size of each cell if entered
numRows = '0'
numColumns = '0'

oppositeCoorner = 'RightCoordinate TopCoordinate'

# Create a point label feature class
labels = 'LABELS'
templateExtent = '#'
Beispiel #5
0

# Step 6

		# cliping clustered area for calculating coef of runoff
		# Process: Clip
		# Set local variables
		# try:
		in_features = os.path.join(input_loc,'rr_zone_clusters_polygon')#cluster_polygon_feature)
		# except:
			# in_features = os.path.join(input_loc,'rr_zone_clusters_polygon')
		clip_features = watershed_with_mean_Project
		X_clustered_design_area_clip = os.path.join(output_loc,"X_clustered_design_area_clip")
		xy_tolerance = ""

		arcpy.env.outputCoordinateSystem = arcpy.SpatialReference(spatial_reference)
		# Execute Clip
		arcpy.Clip_analysis(in_features, watershed_with_mean_Project, X_clustered_design_area_clip, xy_tolerance)


# Step 7

		# finding mean slope
		# Local variables:
		_contributing_area = watershed_with_mean_Project # contributingArea
		_contributing_area_Dissolve = os.path.join(output_loc,"X_contributing_area_Dissolve")
		Statistics_type = "MEAN"
		ZonalSt_X_contr1 = os.path.join(output_loc,"X_mean_slope_contributing_area") 

		# Process: Dissolve
		arcpy.Dissolve_management(_contributing_area, _contributing_area_Dissolve, "", "", "SINGLE_PART", "DISSOLVE_LINES")
#banner
print '***\nConvert Smartrak to featureclass\n***'

#import Modules
import arcpy, os, string

#parameters
gdb = r'D:\TEMP\Smartrak.gdb'
tbl = 'fact_Smartrak_GetHistoryForPeriod'
##SmartrakLongitude, SmartrakLatitude
fc = 'Smartrak'

#env
arcpy.env.workspace = gdb
arcpy.env.overwriteOutput = 1
sr = arcpy.SpatialReference(4326)

#create fc
print 'check and create ' + fc
#if arcpy.Exists(fc):
#    arcpy.Delete_management(fc)
#arcpy.CreateFeatureclass_management( out_path = gdb, out_name = fc, geometry_type = 'point', spatial_reference = sr )

#schema
print 'recreate Schema from ' + tbl
flds = arcpy.ListFields(tbl)
#for fld in flds:
#    ##print 'adding ' + fld.name + '...'
#    if not string.upper(fld.name) in ['OBJECTID']:
#        arcpy.AddField_management( in_table = fc, field_name = fld.name, field_type = fld.type, field_precision = fld.precision, field_scale = fld.scale, field_length = fld.length)
Beispiel #7
0
proxyURL = 'http://132.1.10.230:8080'  # URL du proxy (Null si aucun proxy)

entryXPath = './entry'  # XPath pour chaque entree
elementPropertiesXPath = './content/properties'  # XPath pour les proprietees d'une entree (le root est entryXPath)

objectIDLabel = 'SyndicObjectID'  # Nom de la propriete de l'ID unique de l'objet (pour le lien des pieces jointes)
titleLabel = 'title'  # Nom de la propriete du nom de l'element
pictureLabel = 'Photos'  # Nom de la propriete contenant les liens des photos
xLabel = 'GmapLongitude'  # Nom de la propriete de la coordonnee X
yLabel = 'GmapLatitude'  # Nom de la propriete de la coordonnee Y

customAttributes = dict(
)  #Type de colonnes personalisees. Rajouter une ligne par colonne personalisee: customAttributes['<clef>'] = '<type>'. Ex: customAttributes['CodePostal'] = 'SHORT'

projectionInput = arcpy.SpatialReference(4326)  # Projection en entree (WGS84)
projectionOutput = arcpy.SpatialReference(
    3947)  # Projection en sortie (L93CC47)
projectionMethod = 'RGF_1993_To_WGS_1984_1'  # Methode de projection


def go():
    Utils.Utils().export(url, tempBddName, addPhoto, tempPicsBddName,
                         cheminOutputCC47, projectionInput, projectionOutput,
                         tempDir, tempPictureDir, proxyURL, entryXPath,
                         elementPropertiesXPath, pictureLabel, objectIDLabel,
                         titleLabel, xLabel, yLabel, projectionMethod,
                         customAttributes)


if __name__ == '__main__':  # Si on a execute ce fichier, let's go!
Beispiel #8
0
                                                          '*.shp'), ))
print(root.baselink)

#File = r"C:\Users\Dell\Desktop\skrypty\mz.csv"

# dimension the WKT string field and poly ID field...
# the field holding the WKT string...
field1 = "wkt"
# the field holding the unique ID...
field2 = "id"

# set up the empty list...
featureList = []

# set the spatial reference to a known EPSG code...
sr = arcpy.SpatialReference(2177)
# iterate on table row...
cursor = arcpy.SearchCursor(root.baselink)
row = cursor.next()
print('lama')
while row:
    print(row.getValue(field2))

    WKT = row.getValue(field1)
    # this is the part that converts the WKT string to geometry using the defined spatial reference...
    temp = arcpy.FromWKT(WKT, sr)
    # append the current geometry to the list...
    featureList.append(temp)

    row = cursor.next()
Beispiel #9
0
    def getParameterInfo(self):

        # Input_Observer_Features
        param_1 = arcpy.Parameter()
        param_1.name = u'Input_Observer_Features'
        param_1.displayName = u'Input Observer Features'
        param_1.parameterType = 'Required'
        param_1.direction = 'Input'
        param_1.datatype = u'Feature Set'
        input_layer_file_path = os.path.join(
            os.path.dirname(os.path.dirname(__file__)), "layers",
            "LLOS_InputObserversGDB.lyr")
        param_1.value = input_layer_file_path

        # Observer_Height_Above_Surface
        param_2 = arcpy.Parameter()
        param_2.name = u'Observer_Height_Above_Surface'
        param_2.displayName = u'Observer Height Above Surface'
        param_2.parameterType = 'Required'
        param_2.direction = 'Input'
        param_2.datatype = u'Double'
        param_2.value = u'2'

        # Radius_Of_Observer
        param_3 = arcpy.Parameter()
        param_3.name = u'Radius_Of_Observer'
        param_3.displayName = u'Radius Of Observer'
        param_3.parameterType = 'Required'
        param_3.direction = 'Input'
        param_3.datatype = u'Double'
        param_3.value = u'1000'

        # Input_Surface
        param_4 = arcpy.Parameter()
        param_4.name = u'Input_Surface'
        param_4.displayName = u'Input Surface'
        param_4.parameterType = 'Required'
        param_4.direction = 'Input'
        param_4.datatype = u'Raster Layer'

        # Output_Visibility
        param_5 = arcpy.Parameter()
        param_5.name = u'Output_Visibility'
        param_5.displayName = u'Output Visibility'
        param_5.parameterType = 'Required'
        param_5.direction = 'Output'
        param_5.datatype = u'Feature Class'
        param_5.value = u'%scratchGDB%/outputRLOS'
        param_5.symbology = os.path.join(
            os.path.dirname(os.path.dirname(__file__)), "layers",
            "Radial Line Of Sight Output.lyr")

        # Force_Visibility_To_Infinity__Edge_Of_Surface_
        param_6 = arcpy.Parameter()
        param_6.name = u'Force_Visibility_To_Infinity__Edge_Of_Surface_'
        param_6.displayName = u'Force Visibility To Infinity (Edge Of Surface)'
        param_6.parameterType = 'Optional'
        param_6.direction = 'Input'
        param_6.datatype = u'Boolean'

        # Spatial_Reference
        param_7 = arcpy.Parameter()
        param_7.name = u'Spatial_Reference'
        param_7.displayName = u'Spatial Reference'
        param_7.parameterType = 'Optional'
        param_7.direction = 'Input'
        param_7.datatype = u'Spatial Reference'
        param_7.value = arcpy.SpatialReference(
            54032).exportToString()  # World Azimuthal Equidistant

        return [param_1, param_2, param_3, param_4, param_5, param_6, param_7]
extension = ".csv"

# Use pattern-matching to identify .csv files for EA area:
arcpy.AddMessage("Finding all .csv files to convert to .shp.")
all_filenames = [i for i in glob.glob('*{}'.format(extension))]
# Print statement to manually check number of identified files:
arcpy.AddMessage("{0} .csv files found in {1}:".format(str(len(all_filenames)), csvDir))
for filename in all_filenames:
    arcpy.AddMessage("  - {0}".format(filename))
# -----------------------------------------------------------------------------
# ARC ENVIRONMENTS:

# Set workspace:
arcpy.env.workspace = shpDir
# Set spatial reference to British National Grid (27700):
spRef = arcpy.SpatialReference(27700)
# Allow overwriting of output files:
arcpy.env.overwriteOutput = True

# -----------------------------------------------------------------------------

# Loop through EA operational areas:
for filename in all_filenames:
    
    # Inform user of progress through files:
    arcpy.AddMessage("Processing {0}:".format(filename))
    
    # Get full filepath for .csv:
    area_csv = os.path.join(csvDir, filename)
    
    # Get basename from .csv filepath and strip extension:
Beispiel #11
0
# --------------------------------------------------------------------
# crear_folder_nombre_capa.py
# Fecha de creacion: 2016-08-01 08:59:08.00000
# Author: Carlos Mario Cano Campillo
# Email: [email protected] / [email protected]
# Propietario: Unidad de Planificación Rural Agropecuaria
#
# ---------------------------------------------------------------------
#=====================Librerias==============================#
import arcpy, os, subprocess, time, inspect
# ------------------------------------------------------------

#=========Variables Globales y de Entorno=====================#
t_inicio = time.clock()  # captura el tiempo de inicio del proceso

arcpy.env.outputCoordinateSystem = arcpy.SpatialReference(3116)
arcpy.env.overwriteOutput = True
verPython64 = "C:\\Python27\\ArcGISx6410.3\\python.exe"
scriptAuxiliar = "unionx64_aux_multiple_z.py"
verPythonfinal = verPython64

infea = arcpy.GetParameterAsText(0)
infea = infea.split(";")

infea = [arcpy.Describe(x).catalogPath for x in infea]
infea = (";").join(infea)
join_atributtes = arcpy.GetParameterAsText(1)
gaps = arcpy.GetParameterAsText(2)
capa_salida = arcpy.GetParameterAsText(3)
##capa_salida=arcpy.GetParameterAsText(4)
Beispiel #12
0
arcpy.env.workspace = './CarRecord.gdb'
arcpy.env.overwriteOutput = True

for csv in os.listdir(csvfolder):
    # csvfolder내의 csv파일에 대하여 처리
    if csv.endswith('.csv'):
        start_time = time.time()
        try:
            # Set the local variables
            in_Table = csvfolder + csv
            shpfilename = os.path.splitext(csv)[0]  # 순수 파일명
            x_coords = "PosX"  # 경도
            y_coords = "PosY"  # 위도
            out_Layer = 'csvEventLayer'  # 임시 레이어 파일
            # 좌표계 설정
            spRef = arcpy.SpatialReference("WGS 1984")
            print 'Start Process: ' + shpfilename
            # Make the XY event layer
            arcpy.MakeXYEventLayer_management(in_Table, x_coords, y_coords,
                                              out_Layer, spRef, '#')

            # Copy features to shapefile
            arcpy.CopyFeatures_management(out_Layer,
                                          shpfolder + shpfilename + '.shp')

            print 'End Process: ' + shpfilename
            # 각 파일별 처리 시간 출력
            t = time.time() - start_time
            minute = int(t) / 60
            second = t - 60 * minute
            print 'Processing Time: {0} : {1} seconds'.format(minute, second)
range = float(arcpy.GetParameterAsText(1))  #1000.0 # meters
bearing = float(arcpy.GetParameterAsText(2))  #45.0 # degrees
traversal = float(arcpy.GetParameterAsText(3))  #60.0 # degrees
outFeature = arcpy.GetParameterAsText(4)

webMercator = ""
if argCount >= 6:
    webMercator = arcpy.GetParameterAsText(5)

deleteme = []
debug = True
leftAngle = 0.0  # degrees
rightAngle = 90.0  # degrees

if (webMercator == "") or (webMercator is None):
    webMercator = arcpy.SpatialReference(
        r"WGS 1984 Web Mercator (Auxiliary Sphere)")

try:

    currentOverwriteOutput = env.overwriteOutput
    env.overwriteOutput = True
    installInfo = arcpy.GetInstallInfo("desktop")
    installDirectory = installInfo["InstallDir"]
    GCS_WGS_1984 = os.path.join(installDirectory, r"Coordinate Systems",
                                r"Geographic Coordinate Systems", r"World",
                                r"WGS 1984.prj")
    env.overwriteOutput = True
    scratch = env.scratchWorkspace

    prjInFeature = os.path.join(scratch, "prjInFeature")
    arcpy.AddMessage(str(webMercator) + "\n" + prjInFeature)
'''
This script generates Timber harvest metrics, mainly areas and volumes, for a specific area of interest (AOI). The AOI can be either a Timber supply Area, an Operating area or other ( FN claimed area,?). Tha main steps are:
    1) Clip the input layers to the AOI extent (VRI, OGMA, THLB,...). Other input layers can be added depending on the scope of the analysis
    2) Perform a spatial overlay of the clipped input layers
    3) Add and populate fields based on defined rules: mature timber, merchantability, areas with harvest constraints..
    4) Calculate THLB areas and volumes
    5) Generate metrics by Licensee or Operating area (summary statistics)

'''
import os
import arcpy
from arcpy import env

arcpy.env.overwriteOutput = True
spatialRef = arcpy.SpatialReference(3005)

#Create variables for input layers

BCGWcon = r'Database Connections\....sde'
VRI = os.path.join(BCGWcon, "WHSE_FOREST_VEGETATION.VEG_COMP_LYR_R1_POLY")
OGMA = os.path.join(BCGWcon, "WHSE_LAND_USE_PLANNING.RMP_OGMA_LEGAL_ALL_SVW")
Licensees = os.path.join(
    BCGWcon, "REG_LAND_AND_NATURAL_RESOURCE.FOREST_LICENSEE_OPER_SP")

##VRI = r'\\...\thlb_analysis.gdb\test\vri_tko'
##OGMA = r'\\...\thlb_analysis\data\thlb_analysis.gdb\test\OGMA_tko'
##Licensees = r'\\....\thlb_analysis.gdb\test\licensees_tko'

THLB = r'\\...\SIR\THLB_data_SIR'
BEC_HLP = r'\\...\AdditionalDatasetsTKO.gdb\BEC_HLP_FTBO'
Beispiel #15
0
    targetRef.ImportFromEPSG(args.epsg)

    westRef = osr.SpatialReference()
    westRef.ImportFromEPSG(31254)
    centerRef = osr.SpatialReference()
    centerRef.ImportFromEPSG(31255)
    eastRef = osr.SpatialReference()
    eastRef.ImportFromEPSG(31256)

    westTransform = osr.CoordinateTransformation(westRef, targetRef)
    centralTransform = osr.CoordinateTransformation(centerRef, targetRef)
    eastTransfrom = osr.CoordinateTransformation(eastRef, targetRef)

else:
    # for ArcPy
    arcTargetRef = arcpy.SpatialReference(args.epsg)

    arcWestRef = arcpy.SpatialReference(31254)
    arcCenterRef = arcpy.SpatialReference(31255)
    arcEastRef = arcpy.SpatialReference(31256)


def downloadData():
    """This function downloads the address data from BEV and displays its terms
    of usage"""

    if not requestsModule:
        print("source data missing and download is deactivated")
        quit()
    addressdataUrl = "http://www.bev.gv.at/pls/portal/docs/PAGE/BEV_PORTAL_CONTENT_ALLGEMEIN/0200_PRODUKTE/UNENTGELTLICHE_PRODUKTE_DES_BEV/Adresse_Relationale_Tabellen-Stichtagsdaten.zip"
    response = requests.get(addressdataUrl, stream=True)
Beispiel #16
0
            smax=s
            keymax =key
        slopeDirections[key] = {'s':s,'r':r,'rg':rg}
    return (360-(slopeDirections[keymax]['rg']/math.pi*180-90))%360#


arcpy.Delete_management(r"in_memory")
OrderIDText = '427550'#616276'616620'616622'619811'
##scratch = r"E:\GISData_testing\test\temp4"

imgdir_dem = r"\\Cabcvan1gis001\US_DEM\DEM1"
masterlyr_dem =  r"\\cabcvan1gis001\US_DEM\DEM1.shp"
spatialref = arcpy.Describe(masterlyr_dem).spatialReference
cellsize = 2.77777777777997E-04
connectionString = 'ERIS_GIS/[email protected]'
srGCS83 = arcpy.SpatialReference(4326)

try:
    con = cx_Oracle.connect(connectionString)
    cur = con.cursor()

    cur.execute("select geometry,geometry_type from eris_order_geometry where order_id =" + OrderIDText)
    t = cur.fetchone()
    OrderCoord = eval(str(t[0]))
    OrderType = str(t[1])
except Exception,e:
    print e
    raise
finally:
    cur.close()
    con.close()
Beispiel #17
0
    "txt": treatCSV,
    "json": treatJSON,
    # "geojson": treatGEOJSON, #TODO
    "gpx": treatGPX,
    "xls": treatXLS,
    "xlsx": treatXLS,
    "shp": treatSHP
}
output_index = 2

file = arcpy.GetParameterAsText(0)

crs_code = arcpy.GetParameter(1)
if not crs_code:
    crs_code = 3163
crs = arcpy.SpatialReference(crs_code)

extension = file.split(".")[-1].lower()

if extension == "shp":
    raise arcpy.ExecuteError(
        "A shapefile must be sent as zip with complementary files (shp, dbf, shx...)"
    )

if extension == "zip":
    folder = file[:-4]
    unzipFile(file, folder)
    for (path, dirs, files) in os.walk(folder):
        for f in files:
            extension = f.split(".")[-1].lower()
            if extension in EXTENSIONS:
Beispiel #18
0
def RunTest():
    try:
        arcpy.AddMessage("Starting Test: TestCreateMiscCADRGMosaicDataset")
        
        toolbox = TestUtilities.toolbox
        arcpy.ImportToolbox(toolbox, "DefenseScannedMaps")
        arcpy.env.overwriteOutput = True
   
        # Set environment settings
        print("Running from: " + str(TestUtilities.currentPath))
        print("Geodatabase path: " + str(TestUtilities.geodatabasePath))
        
        arcpy.env.overwriteOutput = True

                       
        webMercator = arcpy.SpatialReference(r"WGS 1984 Web Mercator (Auxiliary Sphere)")
           
           
        inputName = "ScannedMapsMisc_Test"
        inputMosaicDatasetFullPath = os.path.join(TestUtilities.inputGDB, inputName)
        
        if arcpy.Exists(inputMosaicDatasetFullPath):
            print("deleting: " + inputMosaicDatasetFullPath)
            arcpy.Delete_management(inputMosaicDatasetFullPath)
           
        ########################################################3
        # Execute the Model under test: 
        arcpy.CreateCADRGMosaicDataset_DefenseScannedMaps(TestUtilities.inputGDB, inputName, webMercator)  
        ########################################################

        
      
        # Check For Valid Input
        inputFeatureCount = int(arcpy.GetCount_management(inputMosaicDatasetFullPath).getOutput(0)) 
        print("Input FeatureClass: " + str(inputMosaicDatasetFullPath))
        print("Input Feature Count: " +  str(inputFeatureCount))
        
        if inputFeatureCount > 0 :
            print("Mosaic Dataset has already been created and populated")
             
        print("Test Successful"        )
                
    except arcpy.ExecuteError: 
        # Get the tool error messages 
        msgs = arcpy.GetMessages() 
        arcpy.AddError(msgs) 
    
        # return a system error code
        sys.exit(-1)
        
    except Exception as e:
        # Get the traceback object
        tb = sys.exc_info()[2]
        tbinfo = traceback.format_tb(tb)[0]
    
        # Concatenate information together concerning the error into a message string
        pymsg = "PYTHON ERRORS:\nTraceback info:\n" + tbinfo + "\nError Info:\n" + str(sys.exc_info()[1])
        msgs = "ArcPy ERRORS:\n" + arcpy.GetMessages() + "\n"
    
        # Return python error messages for use in script tool or Python Window
        arcpy.AddError(pymsg)
        arcpy.AddError(msgs)
    
        # return a system error code
        sys.exit(-1)
Beispiel #19
0
def worker(data_path, esri_service=False):
    """The worker function to index feature data and tabular data."""
    if esri_service:
        index_service(job.service_connection)
    else:
        job.connect_to_zmq()
        geo = {}
        entry = {}
        schema = {}
        dsc = arcpy.Describe(data_path)

        try:
            from utils import worker_utils
            geometry_ops = worker_utils.GeometryOps()
        except ImportError:
            geometry_ops = None

        try:
            global_id_field = dsc.globalIDFieldName
        except AttributeError:
            global_id_field = None

        try:
            shape_field_name = dsc.shapeFieldName
        except AttributeError:
            shape_field_name = None

        # Get the table schema.
        table_entry = {}
        table_links = []
        schema['name'] = dsc.name
        try:
            alias = dsc.aliasName
        except AttributeError:
            alias = dsc.name
        if not dsc.name == alias:
            schema['alias'] = alias
        schema['OIDFieldName'] = dsc.OIDFieldName
        if shape_field_name:
            schema['shapeFieldName'] = shape_field_name
            schema['wkid'] = dsc.spatialReference.factoryCode
        if global_id_field:
            schema['globalIDField'] = global_id_field
        schema_fields = []
        for fld in dsc.fields:
            field = {}
            props = []
            field['name'] = fld.name
            field['alias'] = fld.aliasName
            field['type'] = fld.type
            field['domain'] = fld.domain
            if fld.isNullable:
                props.append('nullable')
            else:
                props.append('notnullable')
            indexes = dsc.indexes
            if indexes:
                for index in indexes:
                    if fld.name in [f.name for f in index.fields]:
                        props.append('indexed')
                        break
                    else:
                        props.append('notindexed')
                        break
            field['properties'] = props
            schema_fields.append(field)
        schema['fields'] = schema_fields

        # Add and entry for the table and it's schema.
        schema['rows'] = int(arcpy.GetCount_management(data_path).getOutput(0))
        table_entry['id'] = '{0}_{1}'.format(job.location_id, dsc.name)
        table_entry['location'] = job.location_id
        table_entry['action'] = job.action_type
        table_entry['format_type'] = 'Schema'
        table_entry['entry'] = {
            'fields': {
                '_discoveryID': job.discovery_id,
                'name': dsc.name,
                'path': dsc.catalogPath,
                'format': 'schema'
            }
        }
        table_entry['entry']['fields']['schema'] = schema

        if job.schema_only:
            job.send_entry(table_entry)
            return table_entry

        if dsc.dataType == 'Table':
            # Get join information.
            table_join = job.get_join(dsc.name)
            if table_join:
                table_view = arcpy.MakeTableView_management(
                    dsc.catalogPath, 'view')
                arcpy.AddJoin_management(
                    table_view, table_join['field'],
                    os.path.join(job.path, table_join['table']),
                    table_join['field'], 'KEEP_COMMON')
            else:
                table_view = dsc.catalogPath

            # Get any query or constraint.
            query = job.get_table_query(dsc.name)
            constraint = job.get_table_constraint(dsc.name)
            if query and constraint:
                expression = """{0} AND {1}""".format(query, constraint)
            else:
                if query:
                    expression = query
                else:
                    expression = constraint

            field_types = job.search_fields(table_view)
            fields = field_types.keys()
            row_count = float(
                arcpy.GetCount_management(table_view).getOutput(0))
            if row_count == 0.0:
                return

            with arcpy.da.SearchCursor(table_view, fields, expression) as rows:
                mapped_fields = job.map_fields(dsc.name, fields, field_types)
                new_fields = job.new_fields
                ordered_fields = OrderedDict()
                for f in mapped_fields:
                    ordered_fields[f] = None
                increment = job.get_increment(row_count)
                for i, row in enumerate(rows, 1):
                    try:
                        if job.domains:
                            row = update_row(dsc.fields, rows, list(row))
                        mapped_fields = dict(zip(ordered_fields.keys(), row))
                        mapped_fields['_discoveryID'] = job.discovery_id
                        mapped_fields['meta_table_name'] = dsc.name
                        if hasattr(dsc, 'aliasName') and dsc.aliasName:
                            mapped_fields[
                                'meta_table_alias_name'] = dsc.aliasName
                        else:
                            mapped_fields['meta_table_alias_name'] = dsc.name
                        mapped_fields['format_category'] = 'GIS'
                        mapped_fields['format_type'] = "Record"
                        mapped_fields[
                            'format'] = "application/vnd.esri.{0}.record".format(
                                dsc.dataType.lower())
                        for nf in new_fields:
                            if nf['name'] == '*' or nf['name'] == dsc.name:
                                for k, v in nf['new_fields'].iteritems():
                                    mapped_fields[k] = v
                        oid_field = filter(
                            lambda x: x in ('FID', 'OID', 'OBJECTID'),
                            rows.fields)
                        if oid_field:
                            fld_index = rows.fields.index(oid_field[0])
                        else:
                            fld_index = i
                        if global_id_field:
                            mapped_fields['meta_{0}'.format(
                                global_id_field)] = mapped_fields.pop(
                                    'fi_{0}'.format(global_id_field))
                        entry['id'] = '{0}_{1}_{2}'.format(
                            job.location_id, os.path.basename(data_path),
                            row[fld_index])
                        entry['location'] = job.location_id
                        entry['action'] = job.action_type
                        entry['relation'] = 'contains'
                        entry['entry'] = {'fields': mapped_fields}
                        job.send_entry(entry)
                        table_links.append({
                            'relation': 'contains',
                            'id': entry['id']
                        })
                        if (i % increment) == 0:
                            status_writer.send_percent(
                                i / row_count,
                                "{0} {1:%}".format(dsc.name, i / row_count),
                                'esri_worker')
                    except (AttributeError, RuntimeError):
                        continue
        else:
            generalize_value = job.generalize_value
            sr = arcpy.SpatialReference(4326)
            geo['spatialReference'] = dsc.spatialReference.name
            geo['code'] = dsc.spatialReference.factoryCode

            # Get join information.
            table_join = job.get_join(dsc.name)
            if table_join:
                lyr = arcpy.MakeFeatureLayer_management(dsc.catalogPath, 'lyr')
                arcpy.AddJoin_management(
                    lyr, table_join['input_join_field'],
                    os.path.join(job.path, table_join['table']),
                    table_join['output_join_field'], 'KEEP_COMMON')
            else:
                lyr = dsc.catalogPath

            field_types = job.search_fields(lyr)
            fields = field_types.keys()
            query = job.get_table_query(dsc.name)
            constraint = job.get_table_constraint(dsc.name)
            if query and constraint:
                expression = """{0} AND {1}""".format(query, constraint)
            else:
                if query:
                    expression = query
                else:
                    expression = constraint
            if dsc.shapeFieldName in fields:
                fields.remove(dsc.shapeFieldName)
                field_types.pop(dsc.shapeFieldName)
            elif table_join:
                fields.remove(arcpy.Describe(lyr).shapeFieldName)
                field_types.pop(arcpy.Describe(lyr).shapeFieldName)
            row_count = float(arcpy.GetCount_management(lyr).getOutput(0))
            if row_count == 0.0:
                return
            if dsc.shapeType == 'Point':
                with arcpy.da.SearchCursor(lyr, ['SHAPE@'] + fields,
                                           expression, sr) as rows:
                    mapped_fields = job.map_fields(dsc.name,
                                                   list(rows.fields[1:]),
                                                   field_types)
                    new_fields = job.new_fields
                    ordered_fields = OrderedDict()
                    for f in mapped_fields:
                        ordered_fields[f] = None
                    increment = job.get_increment(row_count)
                    for i, row in enumerate(rows):
                        try:
                            if job.domains:
                                row = update_row(dsc.fields, rows, list(row))
                            if row[0]:
                                geo['lon'] = row[0].firstPoint.X
                                geo['lat'] = row[0].firstPoint.Y
                            mapped_fields = dict(
                                zip(ordered_fields.keys(), row[1:]))
                            mapped_fields['_discoveryID'] = job.discovery_id
                            mapped_fields['meta_table_name'] = dsc.name
                            if hasattr(dsc, 'aliasName') and dsc.aliasName:
                                mapped_fields[
                                    'meta_table_alias_name'] = dsc.aliasName
                            else:
                                mapped_fields[
                                    'meta_table_alias_name'] = dsc.name
                            mapped_fields['format_category'] = 'GIS'
                            mapped_fields['geometry_type'] = 'Point'
                            mapped_fields['format_type'] = 'Feature'
                            mapped_fields[
                                'format'] = "application/vnd.esri.{0}.feature".format(
                                    dsc.dataType.lower())
                            for nf in new_fields:
                                if nf['name'] == '*' or nf['name'] == dsc.name:
                                    for k, v in nf['new_fields'].iteritems():
                                        mapped_fields[k] = v
                            if global_id_field:
                                mapped_fields['meta_{0}'.format(
                                    global_id_field)] = mapped_fields.pop(
                                        'fi_{0}'.format(global_id_field))
                            entry['id'] = '{0}_{1}_{2}'.format(
                                job.location_id, os.path.basename(data_path),
                                i)
                            entry['location'] = job.location_id
                            entry['action'] = job.action_type
                            entry['relation'] = 'contains'
                            entry['entry'] = {
                                'geo': geo,
                                'fields': mapped_fields
                            }
                            job.send_entry(entry)
                            table_links.append({
                                'relation': 'contains',
                                'id': entry['id']
                            })
                            if (i % increment) == 0:
                                status_writer.send_percent(
                                    i / row_count,
                                    "{0} {1:%}".format(dsc.name,
                                                       i / row_count),
                                    'esri_worker')
                        except (AttributeError, RuntimeError):
                            continue
            else:
                with arcpy.da.SearchCursor(lyr, ['SHAPE@'] + fields,
                                           expression, sr) as rows:
                    increment = job.get_increment(row_count)
                    mapped_fields = job.map_fields(dsc.name,
                                                   list(rows.fields[1:]),
                                                   field_types)
                    new_fields = job.new_fields
                    ordered_fields = OrderedDict()
                    for f in mapped_fields:
                        ordered_fields[f] = None
                    for i, row in enumerate(rows):
                        try:
                            if job.domains:
                                row = update_row(dsc.fields, rows, list(row))
                            if row[0]:
                                if generalize_value == 0 or generalize_value == 0.0:
                                    geo['wkt'] = row[0].WKT
                                else:
                                    if geometry_ops:
                                        geo['wkt'] = geometry_ops.generalize_geometry(
                                            row[0].WKT, generalize_value)
                                    else:
                                        geo['xmin'] = row[0].extent.XMin
                                        geo['xmax'] = row[0].extent.XMax
                                        geo['ymin'] = row[0].extent.YMin
                                        geo['ymax'] = row[0].extent.YMax
                            mapped_fields = dict(
                                zip(ordered_fields.keys(), row[1:]))
                            mapped_fields['_discoveryID'] = job.discovery_id
                            mapped_fields['meta_table_name'] = dsc.name
                            if hasattr(dsc, 'aliasName') and dsc.aliasName:
                                mapped_fields[
                                    'meta_table_alias_name'] = dsc.aliasName
                            else:
                                mapped_fields[
                                    'meta_table_alias_name'] = dsc.name
                            for nf in new_fields:
                                if nf['name'] == '*' or nf['name'] == dsc.name:
                                    for k, v in nf['new_fields'].iteritems():
                                        mapped_fields[k] = v
                            if global_id_field:
                                mapped_fields['meta_{0}'.format(
                                    global_id_field)] = mapped_fields.pop(
                                        'fi_{0}'.format(global_id_field))
                            mapped_fields['geometry_type'] = dsc.shapeType
                            mapped_fields['format_category'] = 'GIS'
                            mapped_fields['format_type'] = 'Feature'
                            mapped_fields[
                                'format'] = "application/vnd.esri.{0}.feature".format(
                                    dsc.dataType.lower())
                            entry['id'] = '{0}_{1}_{2}'.format(
                                job.location_id,
                                os.path.splitext(
                                    os.path.basename(data_path))[0], i)
                            entry['location'] = job.location_id
                            entry['action'] = job.action_type
                            entry['entry'] = {
                                'geo': geo,
                                'fields': mapped_fields
                            }
                            job.send_entry(entry)
                            table_links.append({
                                'relation': 'contains',
                                'id': entry['id']
                            })
                            if (i % increment) == 0:
                                status_writer.send_percent(
                                    i / row_count,
                                    "{0} {1:%}".format(dsc.name,
                                                       i / row_count),
                                    'esri_worker')
                        except (AttributeError, RuntimeError):
                            continue

        table_entry['entry']['links'] = table_links
        job.send_entry(table_entry)
        return table_entry
Beispiel #20
0
    grp = sys.argv[1]
    scratch = 'D:/best/tmp/GL-NCEAS-SpeciesDiversity_v2013a/scratch_%s' % grp
    log = '%s/ingest_%s.log' % (cd, grp)
    if not os.path.exists(scratch): os.makedirs(scratch)

# log function
logging.basicConfig(format='%(asctime)s %(message)s',
                    datefmt='%Y-%m-%d %I:%M:%S %p',
                    filename=log,
                    level=logging.DEBUG)

if len(sys.argv) > 1:
    logging.info('Running just group: %s' % grp)

# projections
sr_mol = arcpy.SpatialReference(
    'Mollweide (world)')  # projected Mollweide (54009)
sr_gcs = arcpy.SpatialReference(
    'WGS 1984')  # geographic coordinate system WGS84 (4326)

# shapefiles don't have nulls, so use geodatabase
if not arcpy.Exists(gdb):
    arcpy.CreateFileGDB_management(os.path.dirname(gdb), os.path.basename(gdb))

# workspace & scratch space
arcpy.env.workspace = gdb
os.chdir(gdb)
arcpy.env.scratchWorkspace = scratch

# copy land and regions
for fc in ('land_gcs', 'rgn_fao_gcs'):
    arcpy.FeatureClassToGeodatabase_conversion('%s/%s.shp' % (td, fc), gdb)
Beispiel #21
0
arcpy.management.AddField(test_pts, "LONG_NAD83", "TEXT", "", "", 25)
arcpy.management.AddField(test_pts, "LAT_NAD83", "TEXT", "", "", 25)

print("Adding new Point_Category fields ...")
arcpy.management.AddField(test_pts, "Point_Category", "TEXT", "", "", 20)
arcpy.management.AddField(test_pts, "isMonument", "TEXT", "", "", 3)
arcpy.management.AddField(test_pts, "isControl", "TEXT", "", "", 3)
arcpy.management.AddField(test_pts, "County", "TEXT", "", "", 20)

# Use code below if you need to correct the PLSS alias
#print("Updating PLSSID field alias ...")
#arcpy.management.AlterField(test_pts, "PLSSID", "", "PLSS Area Identification")

# Calculate point geometry in NAD 83
print("Calculating geometry fields ...")
spatial_ref = arcpy.SpatialReference(4269)  # NAD 1983
#arcpy.management.CalculateGeometryAttributes(test_pts, "POINT_X", "", "", spatial_ref)
#arcpy.management.CalculateGeometryAttributes(test_pts, "POINT_Y", "", "", spatial_ref)
arcpy.management.AddGeometryAttributes(test_pts, "POINT_X_Y_Z_M", "", "",
                                       spatial_ref)

# Calculate Error Fields
print("Copying geometry fields to LAT/LONG ...")
update_count = 0
fields = ['POINT_X', 'LONG_NAD83', 'POINT_Y', 'LAT_NAD83']
with arcpy.da.UpdateCursor(test_pts, fields) as cursor:
    for row in cursor:
        row[1] = str(row[0])
        row[3] = str(row[2])
        update_count += 1
        cursor.updateRow(row)
Beispiel #22
0
def project_to_WGS84():
    # Project final data to WGS84
    print("Projecting final law boundaries into WGS84 ...")
    sr = arcpy.SpatialReference("WGS 1984")
    arcpy.management.Project(law_final, law_wgs84, sr,
                             "WGS_1984_(ITRF00)_To_NAD_1983")
import os, arcpy
from arcpy import env

# Check out the ArcGIS Spatial Analyst extension license
env.cartographicCoordinateSystem = arcpy.SpatialReference(
    "British National Grid")
env.outputCoordinateSystem = arcpy.SpatialReference("British National Grid")
env.overwriteOutput = True
Data_folder = os.getcwd()
env.workspace = os.getcwd()
env.scratchWorkspace = os.getcwd()

for filename in os.listdir(Data_folder):
    if filename.endswith("bound.shp"):
        infile = Data_folder + "/" + filename
        print(infile)
        Catch = filename.split("_us")[0]
        arcpy.AddField_management(infile, "Name", "TEXT")
        arcpy.CalculateField_management(infile, "Name", '"' + Catch + '"',
                                        "PYTHON")
Beispiel #24
0
                if feature['input_type'] == 'shp':
                    f_in=config['workspace'] + "\\" + config['input_folder'] + "\\" + feature['input']    
                    arcpy.FeatureClassToFeatureClass_conversion(f_in, config['workspace'] + "\\" + config['input_folder'] + "\\", feature['ref'] + ".shp")
                
                f_out = config['workspace'] + "\\" + config['input_folder'] + "\\" + feature['ref'] + ".shp" 
                f_in = f_out

                #repair
                arcpy.RepairGeometry_management(f_out)

                #reproject if needed
                desc = arcpy.Describe(f_in)
                if desc.spatialReference.factoryCode != config['coordinate_system']:
                    print("Feature has coordinate system: " + desc.spatialReference.name + ". Projecting to: " +  str(config['coordinate_system']))
                    f_out=config['output_folder'] + "\\" + feature['ref'] + "_proj.shp"
                    sys= arcpy.SpatialReference(config['coordinate_system'])
                    arcpy.Project_management(f_in, f_out, sys)

                #clip input feature to the start_area
                print("Clipping: " + feature['name'])
                f_in = f_out
                f_clip = config['input_folder'] + "\\" + config['start_area'] + ".shp"
                f_out = config['output_folder'] + "\\" + feature['ref'] + "_clip.shp"
                arcpy.Clip_analysis(f_in, f_clip, f_out)
        

                #buffer
                f_in=f_out
                f_out = config['output_folder'] + "\\" + feature['ref'] + "_buffer.shp"
                if feature['buffer_meters']>0:
                    print("Buffering to: " + f_out)
                if obsLat[-1] == 'N':
                    obsLat = float(obsLat[:-1])
                else:
                    obsLat = float(obsLat[:-1] * -1)
                if obsLon[-1] == 'W':
                    obsLon = float(obsLon[:-1])
                else:
                    obsLon = float(obsLon[:-1] * -1)
                   
                # Construct a point object from the feature class
                obsPoint = arcpy.Point()
                obsPoint.X = obsLon
                obsPoint.Y = obsLat
                
                # Convert the point to a point geometry object with spatial reference
                inputSR = arcpy.SpatialReference(4326)
                obsPointGeom = arcpy.PointGeometry(obsPoint,inputSR)

                # Create a feature object
                cur.insertRow((obsPointGeom,tagID,obsLC,obsDate.replace(".","/") + " " + obsTime))

            #Handle any error
            except Exception as e:
                pass
                #arcpy.AddWarning("  Error adding record {} to the output".format(tagID))

        # Move to the next line so the while loop progresses
        lineString = inputFileObj.readline()
        
    #Close the file object
    inputFileObj.close()
Beispiel #26
0
while True:
    try:
        #prompt user for analysis mode
        mode = int(
            raw_input(
                "Do you want to calculate inter-island distances [0], inter-individual distances [1], or both [2]? "
            ))
        if mode in [0, 1, 2]:
            break
        else:
            raise ValueError
    except ValueError:
        print("Please choose a valid analysis mode ")

#define spatial reference presets
spatialref_default = arcpy.SpatialReference(4326)
spatialref_proj = arcpy.SpatialReference(epsg)
spatial_ref = arcpy.Describe(inputpath + points)
#check projection of point shapefile and set projection if unknown
if spatial_ref.name == "Unknown":
    print(
        "The source points file has an unknown spatial reference, setting to default projection (EPSG 4326) "
    )
    arcpy.DefineProjection_management(points, spatialref_default)

#reproject shapefile to correct projection
points_projected = "sourcepoints_projected.shp"
arcpy.Project_management(inputpath + points, inputpath + points_projected,
                         spatialref_proj)
if binningmode == 0:
    #calculate interval values and time interval counts
Beispiel #27
0
def _get_spatial_ref(code):
    return arcpy.SpatialReference(code)
Beispiel #28
0
    arcpy.CopyFeatures_management("temp", out_feature)
    selectList.append(out_feature)
    print "Select by attribute", fc, "completed at", datetime.datetime.now(
    ).strftime("%I:%M:%S%p")

print "Step 1 Select By Attribute completed at", datetime.datetime.now(
).strftime("%I:%M:%S%p")

## ---------------------------------------------------------------------------
## 2. Project the polygon
## Description: Project RGB soil polygon to North America Albers Equal Area Conic

print "\nStep 2 Project the polygon starts at", datetime.datetime.now(
).strftime("%A, %B %d %Y %I:%M:%S%p")

outCS = arcpy.SpatialReference("North America Albers Equal Area Conic")

for fc in selectList:
    name = "Sub_Basins.shp"
    projFile = os.path.join(finalFolder, name)
    arcpy.Project_management(fc, projFile, outCS)
    finalList.append(projFile)
    print "Projection", fc, "completed at", datetime.datetime.now().strftime(
        "%I:%M:%S%p")

print "Step 2 Project completed at", datetime.datetime.now().strftime(
    "%I:%M:%S%p")

## ---------------------------------------------------------------------------
## 3. Dissolve
## Description: Dissolve the sub-basins MAJ_BAS = 2001 to create the boundaries of the RGB
Beispiel #29
0
# Name: CreateFileGDB

# Description: Create a file GDB

# Import system modules
import arcpy

# Set local variables
out_folder_path = "C:\Users\hawkinle\Desktop\STDTAS"
featuredataset_path = "C:\Users\hawkinle\Desktop\STDTAS\\fGDB.gdb"
out_name = "fGDB.gdb"
featuredataset1 = "Scratch"
featuredataset2 = "Workspace"
featuredataset3 = "Data"
featuredataset4 = "Final"

#create a spatial refeemce
sr = arcpy.SpatialReference("Q:\Users\Hawkins_L\static\New_Shapefile.prj")
# Execute CreateFileGDB
arcpy.CreateFileGDB_management(out_folder_path, out_name)

#Q:\Users\Hawkins_L\static\New_Shapefile.prj

#exectue create feature dataset

arcpy.CreateFeatureDataset_management(featuredataset_path, featuredataset1, sr)
arcpy.CreateFeatureDataset_management(featuredataset_path, featuredataset2, sr)
arcpy.CreateFeatureDataset_management(featuredataset_path, featuredataset3, sr)
arcpy.CreateFeatureDataset_management(featuredataset_path, featuredataset4, sr)
Beispiel #30
0
import time

myPath = "D:\\_exportDM\\"
oracleConnector = myPath + "oracle_dzaw.sde"
baza4Connector = myPath + "baza4_dzaw.sde"
baza4HydroConnector = myPath + "baza4Hydro.sde"
oracle_hydro_sdo = myPath + "oracle_hydro_sdo.sde"

#mxd = arcpy.mapping.MapDocument(myPath+"ARCIMS_DM.mxd")
#layers = arcpy.mapping.ListLayers(mxd)

prjFile = os.path.join(
    arcpy.GetInstallInfo()["InstallDir"],
    "Coordinate Systems/Projected Coordinate Systems/National Grids/Europe/ETRS 1989 Poland CS92.prj"
)
spatialRef = arcpy.SpatialReference(prjFile)

startTime = time.time()
now = datetime.datetime.now()


def createGDB(tempdb_name):
    tmpDatabase = myPath + tempdb_name
    if os.path.exists(tmpDatabase):
        arcpy.Delete_management(tmpDatabase)  #os.remove(tmpDatabase)
    arcpy.CreateFileGDB_management(myPath, tempdb_name)


createGDB("tempGDB.gdb")

arcpy.env.overwriteOutput = True