Exemplo n.º 1
0
def ScratchFolder():
    """Create a scratch folder

    1) if current workspace is a folder, in the current workspace
    2) if a .mdb or .gdb, parallel to it
    3) in TEMP
    """
    import arcgisscripting
    gp = arcgisscripting.create()
    try:
        # this works at 10.1
        sw = gp.scratchFolder
    except:
        try:
            sw = gp.scratchWorkspace
            swType = gp.Describe(sw).dataType
            if swType == "Folder":
                sw = os.path.join(sw,"scratch")
            elif swType == "Workspace":
                pth = os.path.dirname(sw)
                if not gp.Exists(pth): raise
                sw = os.path.join(pth,"scratch")
        except:
            # put it in TEMP
            sw = os.path.join(os.environ["TEMP"],"scratch")
        finally:
            if not gp.Exists(sw):
                os.mkdir(sw)
                print "created " + sw
    return sw
 def __init__(self):
   """Setup the Geoprocessor and the list of tool parameters."""
   import arcgisscripting as ARC
   import os
   self.os = os
   self.GP = ARC.create(9.3)
   self.params = self.GP.getparameterinfo()
Exemplo n.º 3
0
def get_mapsheets(mask_fc, i_mapsheets, mapsheets_key, workspace):
    """
    temporary fix for users with gdal 1.6 (arcgis 9.3)
    local import of ArcGIS libraries
    """
    import arcgisscripting
    import turtlebase.arcgis
    gp = arcgisscripting.create()
    mapsheets = []
    
    gp.MakeFeatureLayer_management(mask_fc, "mask_lyr")
    gp.MakeFeatureLayer_management(i_mapsheets, "mapsheets_lyr")
    gp.SelectLayerByLocation_management("mapsheets_lyr","INTERSECT","mask_lyr","#","NEW_SELECTION")
    
    mapsheets_tmp = turtlebase.arcgis.get_random_file_name(workspace, '.shp')

    gp.Select_analysis("mapsheets_lyr", mapsheets_tmp)
    rows = gp.searchcursor(mapsheets_tmp)
    row = rows.next()
    while row:
        mapsheets_value = row.GetValue(mapsheets_key)
        ext = row.Shape.extent
        mapsheets.append((mapsheets_value, ext))
        row = rows.next()

    return mapsheets
Exemplo n.º 4
0
        def genETF(self,rsLSTC,ThotD,Thot_cold,day,TcoldD,Thot1,Tcold1):

                gp1 = arcgisscripting.create()

                # Assign Paht variables
                grid2 = self.tempPath+"grid2"
                etf = self.tempPath+"etf"
                finalEtf=self.etfPath
                inTrueRaster=0
                #print grid2
                print"\n"
                print "Extracted Variables: THOT1= "+str(Thot1)+ " & TCOLD1= "+str(Tcold1) +" (ONLY FOR TEST PURPOSE)"
                print "Raster Name:"+str(rsLSTC)
                print "Year of the Day:"+str(day)
                print "Average Cold Pixel Value:"+str(TcoldD)
                print "Average Hot Pixel Value:"+str(ThotD)
                print "Differnce between Hot and Cold Pixels:"+str(Thot_cold)

                # average hot pixel value - LSTc raster = raster grid2
                print "Process: Minus..."
                gp1.Minus_sa(ThotD,rsLSTC,grid2)

                # raster grid2 / (average hot pixel value-average cold pixel value) = ETfgrid
                print"Process: Divide..."
                gp1.Divide_sa(grid2, Thot_cold, etf)

                #set all values <0 to zero
                print "Process: Con..."
                gp1.Con_sa(etf, inTrueRaster,finalEtf+"etf2008"+str(day).zfill(3),etf, "VALUE < 0")

                # deletes all intermediate rasters besides the output raster
                gp1.Delete_management(grid2, "Raster Dataset")
                gp1.Delete_management(etf, "Raster Dataset")  
Exemplo n.º 5
0
def script():
    gp = arcgisscripting.create()
    logging_config = LoggingConfig(gp)
    # TODO: perhaps grab the logfile location from the config below.
    parser = OptionParser()
    (options, args) = parser.parse_args()
    main(options, args)
    logging_config.cleanup()
def ReplaceFCstg(fc,temp,fcname):
    gp2 = arcgisscripting.create() # for some reason the script connection is dropped after restarting Oracle
    if gp2.Exists(fc) and gp2.Exists(temp):
        
        gp2.Delete_management(fc, "FeatureClass")
        gp2.Rename_management(temp, fc, "FeatureClass")
        logstr('     -- Replaced '+fcname)
        
    '''else:
Exemplo n.º 7
0
def execute(self, parameters, messages):
    #Obsolete, needs refactoring!
    dwrite ("Starting sdmvalues");
    gp = arcgisscripting.create() 
    TrainingSites =  parameters[0].valueAsText        
    Unitarea = float( parameters[1].value)        
    appendSDMValues(gp,  Unitarea, TrainingSites)
    arcpy.AddMessage("\n" + "="*40);
    arcpy.AddMessage("\n")       
    def OnNext(self, event):
        logging.debug("customer file is: "+self.export_stops_picker.GetValue())        
        logging.debug("ready to open progress")

        self.wizard.next(Progress, message = 'Processing.....')        
        os.chdir(self.wizard.cwd)
        # Create the geoprocessor object
        gp = arcgisscripting.create() 
        Stops = (self.export_stops_picker.GetValue())
        # Set the workspace for the processing files
        gp.workspace = os.path.dirname(Stops)        
def gp_create(vers=None):
  '''create the geoprocessor, 9.3, 9.2 or empty for win32com.client.Dispatch'''
  if vers >= 9.2:   #try 9.2 or above
    try:
      import arcgisscripting
      gp = arcgisscripting.create(vers)
      gp_version = vers
    except:
      import arcgisscripting
      gp = arcgisscripting.create()
      gp_version = 9.2
  else:
    try:
      import win32com.client
      gp = win32com.client.Dispatch("esriGeoprocessing.GpDispatch.1")
      gp_version = 9.1
    except:
      gp = None
      gp_version = 9.1
  #
  return [gp, gp_version]
Exemplo n.º 10
0
def get_geoprocessor(version):
    try:
        if version == 9.3:
            # Try importing arcgisscripting, will fail if ArcGIS not present
            # Raise the exception if not present
            
            import arcgisscripting
            return arcgisscripting.create(version)
        elif version == 10:
            import arcpy
            return arcpy
    except ImportError, e:
        raise e
def mainfunction(dirbase, dirout, country, sres, period, resolution, tiled):
	
	import arcgisscripting, os, sys, string, glob
	gp = arcgisscripting.create(9.3)
	
	# gp.CheckOutExtension("Spatial")
	# gp.toolbox="management"
	
	print "~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~"
	print "  CONVERT TO ASCII AND TIFF	 "
	print "~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n"

	varlist = "bio", "dtr", "prec", "tmean"
	# modellist =  sorted(os.listdir(dirbase + "\\SRES_" + sres + "\\" + country + "_" + resolution))
	
	# for model in modellist:
	model = "current"
	for var in varlist:
		
		gp.workspace = dirbase + "\\SRES_" + sres + "\\" + country + "_" + resolution + "\\" + model + "\\" + var
		
		print "\n --> Processing: " + country,sres,model,period,var,"\n"
		
		# diroutAscii = dirout + "\\SRES_" + sres + "\\" + country + "_" + resolution + "\\" + model + "\\" + var + "_asciis"
		# if not os.path.exists(diroutAscii):
			# os.system('mkdir ' + diroutAscii)
		
		diroutTiff = dirout + "\\SRES_" + sres + "\\" + country + "_" + resolution + "\\" + model + "\\" + period + "\\" + var + "_tif"
		if not os.path.exists(diroutTiff):
			os.system('mkdir ' + diroutTiff)
		
		rasters = gp.ListRasters("*", "GRID")
		for raster in rasters:
			
			# OutAscii = diroutAscii + "\\" + model + "_" + raster + "_1.asc"
			OutTiff = diroutTiff + "\\" + sres + "_" + period + "_" + model + "_" + raster + ".tif"
			
			
			# if not gp.Exists(OutAscii):
				# gp.RasterToASCII_conversion(raster, OutAscii)
			if not os.path.exists(OutTiff):
				print "\tConverting " + raster
				os.system("gdal_translate -of GTiff -ot Int32 -co COMPRESS=lzw -quiet " + gp.workspace + "\\" + raster + " " + OutTiff)
				# gp.delete_management(raster)
			else:
				print "\tConverted " + raster
		# trashList = sorted(glob.glob(diroutAscii + "\\*.prj"))
		# for trashfile in trashList:
			# os.remove(trashfile)
		
		os.system("rmdir /s /q " + gp.workspace)
def checkArcGIS():
    try:
            import arcgisscripting
            gp = arcgisscripting.create()
            return gp
    except:
        try:
            import win32com.client
            gp = win32com.client.Dispatch('esriGeoprocessing.GpDispatch.1')
            print ('ArcGIS 9.0 or 9.1 is installed')
            return gp
        except:
            print ('sorry, cannot find an ArcGIS module. bailing...')
            # Problem: Can't print this out to ArcGIS window if ArcGIS module can't be found!  
Exemplo n.º 13
0
def zapusk3 (c, source_control_points, target_control_points):
    for i in c:
        file = i + '_NGT.jpg'
        file2 = i + '_NGT.jp2'
        in_raster = 'Z:/SatData/Satimages/nonprojected/Mtsat/mtsat-2r/nrl/JAPAN/Geo-Color/'+ file
        out_raster = 'Z:/SatData/Satimages/Project/Mtsat/mtsat-2r/nrl/JAPAN/Geo-Color/'+ file2    
        
        # Расстановка опорных точек
        gp = arcgisscripting.create ()
        ppp = gp.Warp_management (in_raster, source_control_points, target_control_points, out_raster, "POLYORDER2", "BILINEAR") 
        ccc = ppp

        # Определение проекции
        coordsys = "Z:/SatData/Satimages/Project/Mtsat/mtsat-2r/nrl/JAPAN/Geo-Color/Project/Mtsat_Japan_Geo_Color.img"
        gp.DefineProjection_management (ccc, coordsys)  
        print gp.DefineProjection_management (ccc, coordsys)		
Exemplo n.º 14
0
    def Reproject(self):

        print "upto here -->1"
        rs=self.rasterFile
        print rs
        # Create Geoprocessing object
        GP = arcgisscripting.create()

        # set Toolbox
        GP.toolbox = "management"

        # Check out any License
        GP.CheckOutExtension("spatial")

        # Overwriting the Output
        GP.OverwriteOutput =1

        # Define Workspace
        #GP.workspace="D:\\MODIS_ETa\\Output\\Eto_composite\\"
        GP.workspace=self.compositeDir

       
        # Assigning Projection types
        #cs="C:\Program Files (x86)\\ArcGIS\\Coordinate Systems\\Projected Coordinate Systems\\Continental\\North America\\North America Albers Equal Area Conic.prj"
        cs=self.projection
        
        #coordsys="C:\\Program Files (x86)\\ArcGIS\\Coordinate Systems\\Geographic Coordinate Systems\\North America\North American Datum 1983.prj"
        coordsys=self.datum

        print "Try to define Projections........."

            ## Define the projection and Coordinate System
        GP.defineprojection(rs, coordsys)

        print "Definition completed........."
        try:
            print "Try to reproject raster  "+rs

            ##Reproject Raster into Albers Equals Area
            #GP.ProjectRaster_management(InFileName, OutFileName, out_coordinate, resample, cell, geo_tran, reg_point, in_coordinate)
            GP.ProjectRaster_management(rs,self.reprojectDir+"PrjETo"+str(self.year)+str(self.yearDay).zfill(3),cs,"NEAREST",self.sampleSize,"","",coordsys)
            print "Reprojection Done"
            print "upto here -->2"
            
        except:
            GP.GetMessages()
            raise "exit"
def convertRasterToNetCDF(inputRasterFile, outNetCDFFile, dataSetVariableName):
    variable = dataSetVariableName #"tmin"
    units = "" #actual unit for temperature data is 'deg C' but this unit does not work with netcdf create function - correct unit is set after the file is created
    XDimension = "x"
    YDimension = "y"
    bandDimension = "time"
    # Create the Geoprocessor object
    gp = arcgisscripting.create()

    # Check out any necessary licenses
    gp.CheckOutExtension("spatial")

    gp.RasterToNetCDF_md(inputRasterFile, outNetCDFFile, variable, units,
                            XDimension, YDimension, bandDimension)

    # check in any necessary licenses
    gp.CheckInExtension("spatial")
Exemplo n.º 16
0
def main(argv):
	gp = arcgisscripting.create()
	gp.AddMessage('\n')

	if gp.ParameterCount == 4:
		folder = os.sep.join( gp.GetParameterAsText(0).split( os.sep )[:-1] )
		layerName = gp.GetParameterAsText(0).split( os.sep )[-1]
		province = string.lower( gp.GetParameterAsText(1) )
		day = {'today':0, 'tomorrow':1, 'day after tomorrow':2,
			   'day after the day after tomorrow':3}[ string.lower(gp.GetParameterAsText(2)) ]
		language = string.lower( gp.GetParameterAsText(3)[0] ) 

	else:
		usage()
		sys.exit(2)

	weatherData = getWeatherData( gp, province, language, day )
	createWeatherDataLayer( gp, folder, layerName, weatherData, province, day )
def initialize_geoprocessing():
    """Initializes geoprocessing tools. 

    Returns:
      None
    """
    global gp
    
    # Create the Geoprocessor object
    gp = arcgisscripting.create()
    
    # Check out any necessary licenses
    gp.CheckOutExtension("3D")
    
    # Load required toolboxes...
    gp.AddToolbox("C:/Program Files (x86)/ArcGIS/ArcToolbox/Toolboxes/Conversion Tools.tbx")
    gp.AddToolbox("C:/Program Files (x86)/ArcGIS/ArcToolbox/Toolboxes/Data Management Tools.tbx")
   
    return gp
Exemplo n.º 18
0
        def __init__(self, storage_location):
            storage_location = os.path.normpath(storage_location)
            # Create ESRI Geoprocessing Object
            self.gp = arcgisscripting.create()
            # Check to see if storage_location exists
            if 'Database Connections' in storage_location:
                db_connection_file = os.path.split(storage_location)[-1]
                full_db_connection_file_path = os.path.join(os.environ['USERPROFILE'], 'Application Data\\ESRI\\ArcCatalog', db_connection_file)
                storage_location_exists = os.path.exists(full_db_connection_file_path)
                if not storage_location_exists:
                    raise IOError, 'The ArcSDE geodatabase connection "%s" does not exist.' % (db_connection_file)
                self.sde = True
            else:
                storage_location_exists = os.path.exists(storage_location)
                if not storage_location_exists:
                    raise IOError, 'The storage location "%s" does not exist.' % (storage_location)
                self.sde = False

            # Set the ESRI workspace parameter
            self.gp.Workspace = storage_location
            self._storage_location = storage_location
Exemplo n.º 19
0
 def __init__(self, inlayer, parent=None):
     # Try to create the Geoprocessor
     try:
         import arcgisscripting
         self.gp = arcgisscripting.create()
         
         # Make a in-memory layer out of the target feature
         #self.table = 'targettable'
         self.table = self.gp.MakeTableView(inlayer, 'targettable')
         # Get the number of records in the table
         self.records = self.gp.GetCount_management(self.table)
         # Set the dict for fields and values
         self.values = {}
         
     except ImportError:
         # If ArcGIS is unavailable, delegate error message back 
         if parent:
             parent.log("ArcGIS unavailable.")
             raise
         else:
             print "ArcGIS unavailable."
Exemplo n.º 20
0
    def Process(self, event):
        self.wizard.next(Progress, message = "Updating %s with route/sequence..." % self.copy_customers_dbf)

        logging.debug("start dbf update")
        gp = arcgisscripting.create(9.3)
        gp.workspace = (self.copy_customers_dbf)

        # f_out = open(self.spreadsheet+".xls", 'wb')
        # f_in = GzipFile(self.spreadsheet,'rb')

        # f_out.writelines(f_in)
        # f_out.close()
        # f_in.close()

        sheet = RA_Routes_Sheet(self.spreadsheet)


        # This is to process new routes created
        # We will have a 2nd process to update just the sequence
        for row in sheet :
            if row['customer_name']== '' or row['customer_name']== None: continue
            logging.debug("Site_id == %s" % row['customer_name'])
            dbf_cursor = gp.UpdateCursor(self.copy_customers_dbf, "Site_id = %s" % row['customer_name'])
            dbf_row = dbf_cursor.Next()

            # Fix this to update the right columns
            # 'row' is a dict. print inspect(row) to see it
            # 'dbf_row' has instance-variables for the fields,
            # the fields are given by print inspect(gp.ListFields(table_view))
            dbf_row.Sequence = row['ts_stop_position']
            dbf_row.Route = row['vehicle_id']
            dbf_row.Est_toa = row['ts_stop_start_time']

            dbf_cursor.UpdateRow(dbf_row)
            dbf_row = dbf_cursor.Next()
            del dbf_cursor

        logging.debug("Done updating dbf")
        
        self.wizard.next(End_Page, copy_customers_dbf = self.copy_customers_dbf)        
Exemplo n.º 21
0
def ArcGISfinish(outSurf, outBsmt, outSurf_dir, outBsmt_dir, version):
    # ArcGIS 9.2, 9.3, and 10.0 will all read Arc ASCII files natively, however,
    # the loading of files can be improved by pre-calculating raster statistics.
    # These statistics are calculated and stored in accompanying XML files. This
    # is primarily meant for convenience, and is not a vital part of the program
    # or model.

    # We recommend users with ArcGIS 9.1 or older should investigate using another
    # GIS viewer to examine model outputs. Older versions of ArcGIS require users
    # to convert Arc ASCII files to another format prior to viewing with the
    # "ASCII to Raster" tool in the "Conversion toolbox". Our experiences with this
    # conversion have been mixed. It is also inconvenient.

    # ArcGIS versions 9.2 and 9.3 have identical instructions 
    if version == "9.2" or version == "9.3":
        try:
            # import ArcGIS scripting library
            import arcgisscripting
            gp = arcgisscripting.create()

            # calculate statistics
            gp.CalculateStatistics_management (outSurf)
            gp.CalculateStatistics_management (outBsmt)
        except:
            pass

    # ArcGIS version 10.0 instructions
    if version == "10.0":
        try:
            # import ArcGIS scripting library
            import arcpy

            # calculate statistics
            arcpy.CalculateStatistics_management (outSurf)
            arcpy.CalculateStatistics_management (outBsmt)
        except:
            pass

    return
Exemplo n.º 22
0
def startScenarios(tempList, resolution, outFolder):
    try:
        gp = arcgisscripting.create(9.3)
        gp.OverwriteOutput = True

        tableList = [table.strip("\'") for table in tempList]
        for table in tableList:
            gp.AddMessage(table)

        Hier1 = TrendsNames.dbLocation + "EcoregionHierarchy"
        Eco1 = TrendsNames.dbLocation + "Level1Ecoregions"
        Eco2 = TrendsNames.dbLocation + "Level2Ecoregions"

        for sumname in tableList:
            summary = "sm_" + sumname
            #Delete old summary data from tables before creating new data
            if AnalysisNames.isInAnalysisNames(gp, summary):
                deleteAnalysisName.deleteAnalysisName(summary)

            ecoList = []
            #Get the ecoregion numbers for each summary name in the list
            if sumname in [
                    "WESTERNMOUNTAINSFORESTS", "EASTERNUS", "GREATPLAINS",
                    "WESTERNARID"
            ]:
                where_clause = "AnalysisName = \'" + sumname + "\'"
                rows = gp.SearchCursor(Eco1, where_clause)
                row = rows.Next()
                eco1 = row.EcoLevel1ID
                gp.AddMessage("EcoLevel1ID = " + str(eco1))
                #now get the level 3s for this level 1
                where_clause = "EcoLevel1ID = " + str(eco1)
                rows = gp.SearchCursor(Hier1, where_clause)
                row = rows.Next()
                while row:
                    ecoList.append(int(row.Ecolevel3ID))
                    gp.AddMessage("adding eco " + str(ecoList[-1]) +
                                  " to ecoList")
                    row = rows.Next()
            else:
                where_clause = "AnalysisName = \'" + sumname + "\'"
                rows = gp.SearchCursor(Eco2, where_clause)
                row = rows.Next()
                eco2 = row.EcoLevel2ID
                gp.AddMessage("EcoLevel2ID = " + str(eco2))
                #now get the level 3s for this level 2
                where_clause = "EcoLevel2ID = " + str(eco2)
                rows = gp.SearchCursor(Hier1, where_clause)
                row = rows.Next()
                while row:
                    ecoList.append(int(row.Ecolevel3ID))
                    gp.AddMessage("adding eco " + str(ecoList[-1]) +
                                  " to ecoList")
                    row = rows.Next()

            AnalysisNames.updateAnalysisNames(gp, summary)
            analysisNum = AnalysisNames.getAnalysisNum(gp, summary)
            gp.AddMessage("startStratified: summary = " + summary +
                          "  analysisNum = " + str(analysisNum))

            #Get the intermediate tables ready for the new summary
            newEcos, strats, splits = TrendsDataAccess.accessTrendsData(
                ecoList, analysisNum, resolution)

            #Start the Trends processing
            path = os.path.dirname(sys.argv[0])
            testStudyAreaStats.buildStudyAreaStats(summary, analysisNum,
                                                   newEcos, strats, splits)

            newExcel = SummaryWorkbook(gp, summary, analysisNum, resolution,
                                       outFolder)
            newExcel.build_workbook(gp)
            del newExcel

    except arcgisscripting.ExecuteError:
        msgs = gp.GetMessage(0)
        msgs += gp.GetMessages(2)
        gp.AddError(msgs)
    except TrendsUtilities.JustExit:
        pass
    except Exception:
        gp.AddMessage(traceback.format_exc())
Exemplo n.º 23
0
def main(env):
    gp = arcgisscripting.create()
    gp.CheckOutExtension("Spatial")

    if env:
        mdt_file = gp.GetParameterAsText(0)
        gdb_path = gp.GetParameterAsText(1)
        name_out = gp.GetParameterAsText(2)
        par1 = gp.GetParameterAsText(3)
        par2 = gp.GetParameterAsText(4)
        par3 = gp.GetParameterAsText(5)
        par4 = gp.GetParameterAsText(6)
        par5 = gp.GetParameterAsText(7)
        show = gp.GetParameterAsText(8)
        par6 = gp.GetParameterAsText(9)
        par7 = gp.GetParameterAsText(10)
        par8 = gp.GetParameterAsText(11)
        par9 = gp.GetParameterAsText(12)
        par10 = gp.GetParameterAsText(13)
    else:
        mdt_file = r'C:\Users\jchav\AH_01\CATATUMBO\data\DEM_Raw_Init_Catatumbo_Plus_750_3116.tif'
        gdb_path = r'C:\Users\jchav\AH_01\CATATUMBO\results\UTTL.gdb'
        name_out = 'mrvbf'
        par1 = 8
        par2 = 0.4
        par3 = 0.35
        par4 = 4
        par5 = 3
        show = True
        par6 = r'C:\Users\jchav\AH_01\CATATUMBO\results\UTTL.gdb\Drain_UTTL'
        par7 = r'C:\Users\jchav\AH_01\CATATUMBO\results\UTTL.gdb\UTTL_Basins'
        par8 = r'C:\Users\jchav\AH_01\CATATUMBO\results\UTTL.gdb\fac'
        par9 = r'C:\Users\jchav\AH_01\CATATUMBO\data\Qmax_Regional_UPME_CTr.tif'
        par10 = r'C:\Users\jchav\AH_01\CATATUMBO\data\Qmax_Regional_UPME_qTr.tif'

    gp.AddMessage('Making temps folders')
    temp_folder = r'{}\temp'.format(os.path.dirname(os.path.abspath(gdb_path)))

    if os.path.exists(temp_folder):
        gp.AddMessage('folder temp already exists')
    else:
        os.mkdir(temp_folder)

    gp.AddMessage('Running SAGA - MRVBF')
    import_data_to_saga(mdt_file, temp_folder)
    run_mrvbf(temp=temp_folder,
              t_slope=par1,
              tv=par2,
              tr=par3,
              p_slope=par4,
              p=par5)
    export_data_from_saga(temp_folder,
                          out_grid=name_out,
                          gdb=gdb_path,
                          show=show)
    gp.AddMessage('Running Beechie')
    fn_beechie(raster='{}/{}'.format(gdb_path, name_out),
               drain_shape=par6,
               uttl_basins=par7,
               fac=par8,
               workspace=gdb_path,
               CTr=par9,
               qTr=par10)
Exemplo n.º 24
0
def execute(self, parameters, messages):
    try:       
        arcpy.AddMessage("\n"+"="*21+" Starting Neural network inputfiles "+"="*21)
        gp = arcgisscripting.create()
        import arcsdm.workarounds_93;
        try:
            importlib.reload (arcsdm.sdmvalues)
            importlib.reload (arcsdm.workarounds_93);
        except :
            reload(arcsdm.sdmvalues);
            reload(arcsdm.workarounds_93);        
        #Arguments from tool dialog
        ucs = parameters[0].valueAsText #Unique Conditions raster
        #ucs_path = 'ucs_path'
        #gp.makerasterlayer_management(ucs, ucs_path)
        ucs_path = gp.describe(ucs).catalogpath
        TPs = parameters[1].valueAsText #Training sites
        FZMbrFld = parameters[2].valueAsText #Fuzzy membership field
        NDTPs = parameters[3].valueAsText#Nondeposit training sites
        NDFZMbrFld = parameters[4].valueAsText #Fuzzy membership field
        #Make Train file path
        #Todo: PArameter 5 is missing!
        #Todo: Parameter 6 is missing! 
        traindta_filename = parameters[7].valueAsText #Make train file or not
        traintable = True
        classtable = parameters[6].valueAsText #gp.getparameter(6) # Make class file or not
        classdta_filename = None
        #Make Train file path
        if not traindta_filename:
            UCName = os.path.splitext(os.path.basename(ucs))[0]
    ##        traindta_filename = UCName + "_train"
    ##        OutWrkSpc = gp.Workspace
    ##        traindta_filename = gp.createuniquename(traindta_filename + ".dta", OutWrkSpc)
    ##        if classtable:
    ##            classdta_filename = traindta_filename.replace('_train', '_class')
        else:
            UCName = traindta_filename
    ##        traindta_filename = UCName + "_train"
    ##        OutWrkSpc = gp.Workspace
    ##        traindta_filename = gp.createuniquename(traindta_filename + ".dta", OutWrkSpc)
    ##        if classtable:
    ##            classdta_filename = traindta_filename.replace('_train', '_class')
        traindta_filename = UCName + "_train"
        OutWrkSpc = gp.Workspace
        traindta_filename = gp.createuniquename(traindta_filename + ".dta", OutWrkSpc)
        arcpy.AddMessage("%-20s %s " % ("Traindata filename",  traindta_filename))
        
        if classtable:
            classdta_filename = traindta_filename.replace('_train', '_class')
        #Make Class file path
        if classtable and not classdta_filename:        
            classdta_filename = gp.createuniquename(UCName + "_class" + ".dta", OutWrkSpc)
        #Get min/max values of evidence fields in unique conditions raster
        BandStatsFile = parameters[7].valueAsText #gp.getparameterastext(7) #Prepared band statistics file or not
        evidence_names = [row.name for row in rowgen(gp.listfields(ucs))][3:]
        if BandStatsFile:
            minmaxValues = getBandStatsFileMinMax(BandStatsFile, evidence_names)
        else:
            minmaxValues = getMinMaxValues(ucs_path, evidence_names)
        UnitArea = 1.0 #gp.getparameter(8) #1.0
        #gp.AddMessage("Got arguments..."+time.ctime())
        
        gp.AddMessage("Training file = " + str(traindta_filename))
        if classtable:
            gp.AddMessage("Class file = " + str(classdta_filename))

        #Derive other values
        RasValFld = NDRasValFld = 'RASTERVALU'
        #Feature classes to be gotten with Extract tool
        TP_RasVals = arcsdm.workarounds_93.ExtractValuesToPoints(gp, ucs, TPs, 'TPFID')
        NDTP_RasVals = arcsdm.workarounds_93.ExtractValuesToPoints(gp, ucs, NDTPs, 'NDTPFID')    
        TP_Dict, TPFID_Dict, TPFZM_Dict = MaxFZMforUC( TPs, TP_RasVals, RasValFld, FZMbrFld, 'TPFID' )
        NDTP_Dict, NDTPFID_Dict, NDTPFZM_Dict = MaxFZMforUC( NDTPs, NDTP_RasVals, NDRasValFld, NDFZMbrFld, 'NDTPFID' )
        CellSize = float(gp.cellsize)
        train_lineno = 0
        class_lineno = 0
        train_lines = []
        class_lines = []
        #Compose the lines of the files
        for ucrow in rowgen(gp.searchcursor(ucs_path)):
            #Read the UC raster rows, get evidence values
            UCValue = ucrow.value
            #gp.addwarning('%d'%UCValue)
            evidence_values = getValuesList( \
                [(evidence_name, ucrow.getValue(evidence_name)) for evidence_name in evidence_names], \
                minmaxValues
                )
            if classtable:
                #Compose the class table line
                class_lineno += 1
                wLine = str(class_lineno) + ","
                wLine = wLine + str(TP_Dict.get(UCValue, 0)) + ","
                Area = "%.1f" % (ucrow.Count * CellSize * CellSize / 1000000.0 / UnitArea)
                wLine = wLine + str(Area) + ","
                for theVal in evidence_values:
                    wLine = wLine + "%.5f" % theVal + ","
                wLine += "0\n"
                class_lines.append(wLine)
            if traintable:
                #Compose the train table line
                if TP_Dict.get(UCValue, 0):
                    if FZMbrFld:
                        if NDFZMbrFld:
                            if NDTP_Dict.get(UCValue, 0):
                                #Use max fuzzy mbrshp between dep and non-dep site
                                TP_Mbrship = TPFZM_Dict[UCValue]
                                NDTP_Mbrship = NDTPFZM_Dict[UCValue]
                                if TP_Mbrship > NDTP_Mbrship:
                                    TPFid = TPFID_Dict[UCValue]
                                    fuzzy_mbrshp = TP_Mbrship
                                else:
                                    TPFid = NDTPFID_Dict[UCValue] + 1000
                                    fuzzy_mbrshp = NDTP_Mbrship
                            else:
                                #Use fuzzy mbrshp of dep site
                                TPFid = TPFID_Dict[UCValue]
                                fuzzy_mbrshp = TPFZM_Dict[UCValue]
                        else:
                            #Use fuzzy mbrshp of dep site
                            TPFid = TPFID_Dict[UCValue]
                            fuzzy_mbrshp = TPFZM_Dict[UCValue]
                    elif NDFZMbrFld:
                        if NDTP_Dict.get(UCValue, 0):
                            #Use fuzzy mbrshp of non=dep site
                            TPFid = NDTPFID_Dict[UCValue] + 1000
                            fuzzy_mbrshp = NDTPFZM_Dict[UCValue]
                        else:
                            #Use default fuzzy mbrshp of dep site
                            TPFid = 0
                            fuzzy_mbrshp = 1.0
                    else:
                        #Use default fuzzy mbrshp of dep site
                        TPFid = 0
                        fuzzy_mbrshp = 1.0
                elif NDTP_Dict.get(UCValue, 0):
                    if NDFZMbrFld:
                        #Use fuzzy mbrshp of non-dep site
                        TPFid = NDTPFID_Dict[UCValue] + 1000
                        fuzzy_mbrshp = NDTPFZM_Dict[UCValue]
                    else:
                        #Use default fuzzy mbrshp of non=dep site
                        TPFid = 1000
                        fuzzy_mbrshp = 0.0                    
                else:
                    #No sites within UC area
                    continue #Do not write line
                train_lineno += 1
                try:
                    train_lines.append(composeDTAline(train_lineno, TPFid, UCValue, evidence_values, fuzzy_mbrshp))
                except:
                    gp.addwarning(str(train_lineno))
                
        if traintable:
            #Write out the train file
            trainfd_dta = open(traindta_filename, 'w')
            trainfd_dta.write('%-d\n'%len(evidence_names))
            trainfd_dta.write(str(gp.getcount_management(TPs))+'\n')
            trainfd_dta.write('1\n')
            trainfd_dta.write(str(train_lineno)+'\n')
            trainfd_dta.writelines(train_lines)
            trainfd_dta.close()
        if classtable:
            #Write out the class file
            classfd_dta = open(classdta_filename, 'w')
            classfd_dta.write('%-d\n'%len(evidence_names))
            if traintable:
                classfd_dta.write(str(gp.getcount_management(TPs))+'\n')
            else: classfd_dta.write('5\n')
            classfd_dta.write('1\n')
            classfd_dta.write('%-d\n'%class_lineno)
            classfd_dta.writelines(class_lines)
            classfd_dta.close()
        
    except arcpy.ExecuteError as e:
        #TODO: Clean up all these execute errors in final version
        arcpy.AddError("\n");
        arcpy.AddMessage("Calculate weights caught arcpy.ExecuteError ");
        gp.AddError(arcpy.GetMessages())
        if len(e.args) > 0:
            #arcpy.AddMessage("Calculate weights caught arcpy.ExecuteError: ");
            args = e.args[0];
            args.split('\n')
            #arcpy.AddError(args);
                    
        arcpy.AddMessage("-------------- END EXECUTION ---------------");        
        raise 
    except:
        # get the traceback object
        tb = sys.exc_info()[2]
        # tbinfo contains the line number that the code failed on and the code from that line
        tbinfo = traceback.format_tb(tb)[0]
        # concatenate information together concerning the error into a message string
        pymsg = "PYTHON ERRORS:\nTraceback Info:\n" + tbinfo + "\nError Info:\n    " + \
                str(sys.exc_type)+ ": " + str(sys.exc_value) + "\n"
        # generate a message string for any geoprocessing tool errors
        msgs = "GP ERRORS:\n" + arcpy.GetMessages(2) + "\n"
        arcpy.AddError(msgs)

        # return gp messages for use with a script tool
        arcpy.AddError(pymsg)

        # print messages for use in Python/PythonWin
        print (pymsg)
        print (msgs)

        raise
Exemplo n.º 25
0
def createDBFtables(tableList, resolution, folderForTables):
    try:
        gp = arcgisscripting.create(9.3)

        if "ConversionChange" in tableList:
            changeTable = changedBASEtable(gp, TrendsNames.numConversions,
                                           len(TrendsNames.TrendsIntervals),
                                           resolution)
            changeTable.getTableValues(gp, TrendsNames.numConversions,
                                       len(TrendsNames.TrendsIntervals))
            changeTable.writeToTable(gp, folderForTables)
            del changeTable
        if "ConversionError" in tableList:
            errorTable = errordBASEtable(gp, TrendsNames.numConversions,
                                         len(TrendsNames.TrendsIntervals),
                                         resolution)
            errorTable.getTableValues(gp, TrendsNames.numConversions,
                                      len(TrendsNames.TrendsIntervals))
            errorTable.writeToTable(gp, folderForTables)
            del errorTable
        if "Composition" in tableList:
            compTable = compdBASEtable(gp, TrendsNames.numLCtypes * 2,
                                       len(TrendsNames.TrendsYears),
                                       resolution)
            compTable.getTableValues(gp, TrendsNames.numLCtypes * 2,
                                     len(TrendsNames.TrendsYears))
            compTable.writeToTable(gp, folderForTables)
            del compTable
        if "Gains" in tableList:
            gainTable = gaindBASEtable(gp, 'gain', TrendsNames.numLCtypes * 2,
                                       len(TrendsNames.TrendsIntervals),
                                       resolution)
            gainTable.getTableValues(gp, TrendsNames.numLCtypes * 2,
                                     len(TrendsNames.TrendsIntervals))
            gainTable.writeToTable(gp, folderForTables)
            del gainTable
        if "Losses" in tableList:
            lossTable = lossdBASEtable(gp, 'loss', TrendsNames.numLCtypes * 2,
                                       len(TrendsNames.TrendsIntervals),
                                       resolution)
            lossTable.getTableValues(gp, TrendsNames.numLCtypes * 2,
                                     len(TrendsNames.TrendsIntervals))
            lossTable.writeToTable(gp, folderForTables)
            del lossTable
        if "Gross" in tableList:
            grossTable = grossdBASEtable(
                gp,
                'gross',
                (TrendsNames.numLCtypes + 1) *
                2,  #make longer for 'overall' class
                len(TrendsNames.TrendsIntervals),
                resolution)
            grossTable.getTableValues(gp, (TrendsNames.numLCtypes + 1) * 2,
                                      len(TrendsNames.TrendsIntervals))
            grossTable.writeToTable(gp, folderForTables)
            del grossTable
        if "Net" in tableList:
            netTable = netdBASEtable(gp, 'net', TrendsNames.numLCtypes * 2,
                                     len(TrendsNames.TrendsIntervals),
                                     resolution)
            netTable.getTableValues(gp, TrendsNames.numLCtypes * 2,
                                    len(TrendsNames.TrendsIntervals))
            netTable.writeToTable(gp, folderForTables)
            del netTable
        if "Multichange" in tableList:
            multiTable = multidBASEtable(
                gp,
                TrendsNames.numMulti * 2,
                1,  #currently only 1973-2000 used in tables
                resolution)
            multiTable.getTableValues(gp, TrendsNames.numMulti * 2, 1)
            multiTable.writeToTable(gp, folderForTables)
            del multiTable
        gp.AddMessage("Complete")
    except Exception:
        tb = sys.exc_info()[2]
        tbinfo = traceback.format_tb(tb)[0]
        pymsg = tbinfo + "\n" + str(sys.exc_type) + ": " + str(sys.exc_value)
        gp.AddMessage(pymsg)
Exemplo n.º 26
0
def loadEcosForSummary( sa, ecoData ):
    try:
        gp = arcgisscripting.create(9.3)    
        gp.OverwriteOutput = True
        trlog = TrendsUtilities.trLogger()
        statList = ('EstChange','EstVar')

        def getoffset( source ):
            try:
                desc = gp.Describe( source )
                newfields = desc.Fields
                for ptr, field in enumerate(newfields):
                    if field.Name == "Statistic":
                        newoffset = ptr + 1
                return newfields, newoffset
            except Exception:
                raise
        def getrows( tabname, clause, fields, offset, data ):
            try:
                rows = gp.SearchCursor( tabname, clause )
                row = rows.Next()
                while row:
                    ctr = statList.index( row.Statistic )
                    for (ptr, field) in enumerate( fields[offset:]):
                        data[ ptr, ctr ] = row.GetValue( field.Name )
                    row = rows.Next()
            except Exception:
                raise

        customAnalysis = "Partial stratified" in [ecodat[9] for ecodat in ecoData]
        if customAnalysis:
            tableType = "Custom"
            analysisNum = sa.analysisNum
        else:
            tableType = "Trends"
            analysisNum = TrendsNames.TrendsNum
        ecoList = [x[0] for x in ecoData]        
        for ecoNum in ecoList:
            setUpShortEcoArrays( sa, ecoNum )
            where_clause = "AnalysisNum = " + str(analysisNum) + \
                        " and EcoLevel3ID = " + str(ecoNum) + \
                       " and Resolution = '" + str(sa.resolution) + "'" + \
                       " and (Statistic = 'EstChange' or Statistic = 'EstVar')"

            if tableType == "Trends":
                sourceName = TrendsNames.dbLocation + "TrendsChangeStats"
            else:
                sourceName = TrendsNames.dbLocation + "CustomChangeStats"
            fields, offset = getoffset( sourceName )
            for interval in sa.intervals:
                clause = where_clause + " and ChangePeriod = '" + interval + "'"
                getrows( sourceName, clause, fields, offset, sa.study[ecoNum].conv[interval] )

            if tableType == "Trends":
                sourceName = TrendsNames.dbLocation + "TrendsCompStats"
            else:
                sourceName = TrendsNames.dbLocation + "CustomCompStats"
            fields, offset = getoffset( sourceName )
            for year in sa.years:
                clause = where_clause + " and CompYear = '" + year + "'"
                getrows( sourceName, clause, fields, offset, sa.study[ecoNum].comp[year] )

            if tableType == "Trends":
                sourceName = TrendsNames.dbLocation + "TrendsMultichangeStats"
            else:
                sourceName = TrendsNames.dbLocation + "CustomMultichangeStats"
            fields, offset = getoffset( sourceName )
            for interval in sa.multiIntervals:
                getrows( sourceName, where_clause, fields, offset, sa.study[ecoNum].multi[interval] )

            if tableType == "Trends":
                sourceName = TrendsNames.dbLocation + "TrendsGlgnStats"
            else:
                sourceName = TrendsNames.dbLocation + "CustomGlgnStats"
            fields, offset = getoffset( sourceName )
            for interval in sa.intervals:
                for tab in TrendsNames.glgnTabTypes:
                    clause = where_clause + " and ChangePeriod = '" + interval + "'" + \
                             " and Glgn = \'" + tab + "\'" 
                    getrows( sourceName, clause, fields, offset, sa.study[ecoNum].glgn[interval][tab] )

            source = 'gross'                    
            if tableType == "Trends":
                sourceName = TrendsNames.dbLocation + "TrendsAggregateStats"
            else:
                sourceName = TrendsNames.dbLocation + "CustomAggregateStats"
            fields, offset = getoffset( sourceName )
            for interval in sa.aggIntervals:
                clause = where_clause + " and ChangePeriod = '" + interval + "'" + \
                        " and Source = '" + source + "'"
                getrows( sourceName, clause, fields, offset, sa.study[ecoNum].aggregate[interval][source] )

            if tableType == "Trends":
                sourceName = TrendsNames.dbLocation + "TrendsAggGlgnStats"
            else:
                sourceName = TrendsNames.dbLocation + "CustomAggGlgnStats"
            fields, offset = getoffset( sourceName )
            for interval in sa.aggIntervals:
                for tab in TrendsNames.glgnTabTypes:
                    clause = where_clause + " and ChangePeriod = '" + interval + "'" + \
                             " and Source = '" + source + "'" + " and Glgn = \'" + tab + "\'" 
                    getrows( sourceName, clause, fields, offset, sa.study[ecoNum].aggGlgn[interval][source][tab] )

            sourceC = 'conversion'
            sourceA = 'addgross'
            sourceM = 'multichange'
            if tableType == "Trends":
                sourceName = TrendsNames.dbLocation + "TrendsAllChangeStats"
            else:
                sourceName = TrendsNames.dbLocation + "CustomAllChangeStats"
            fields, offset = getoffset( sourceName )
            for interval in sa.intervals:
                clause = where_clause + " and ChangePeriod = '" + interval + "'" + \
                        " and Source = '" + sourceC + "'"
                getrows( sourceName, clause, fields, offset, sa.study[ecoNum].allchg[interval][sourceC] )

            for interval in sa.aggIntervals:
                clause = where_clause + " and ChangePeriod = '" + interval + "'" + \
                        " and Source = '" + sourceA + "'"
                getrows( sourceName, clause, fields, offset, sa.study[ecoNum].allchg[interval][sourceA] )

            for interval in sa.multiIntervals:
                clause = where_clause + " and ChangePeriod = '" + interval + "'" + \
                        " and Source = '" + sourceM + "'"
                getrows( sourceName, clause, fields, offset, sa.study[ecoNum].allchg[interval][sourceM] )
            
    except arcgisscripting.ExecuteError:
        # Get the geoprocessing error messages
        msgs = gp.GetMessage(0)
        msgs += gp.GetMessages(2)
        trlog.trwrite(msgs)
        raise            
    except TrendsUtilities.TrendsErrors, Terr:
        #Get errors specific to Trends execution
        trlog.trwrite( Terr.message )
        raise
Exemplo n.º 27
0
def CalcBasinStats(doctbl_d, doc_d):

    import os, os.path, shutil

    # Initialize the gp object to work between versions (9x vs 8x)
    try:
        import arcgisscripting
        gp = arcgisscripting.create()
    except:
        import win32com.client
        gp = win32com.client.Dispatch("esriGeoprocessing.GpDispatch.1")

    #try:
    gp.CheckOutExtension("Spatial")

    gp.Workspace = FPathTmpSpace
    ValGrid = "valgridtmp"
    ValGridCF = "valgridtmp0"
    ZoneGrid = doctbl_d['grdbasins']['filepath']
    ClAreaGrid = doctbl_d['grdarea']['filepath']
    print("ZONALSTATS 0:", FPathTmpSpace, ZoneGrid, ClAreaGrid)

    for var in doc_d:
        # perform zonal stats on "value" grids only
        grd_key = doc_d[var]['srctable'][0]
        ValRawGrid = doctbl_d[grd_key]['filepath']
        ValGridItem = doc_d[var]['fieldname']
        # write zonalstats dbf table to the Workspace
        # (is this path-joining step needed? Isn't the Workspace used?)
        outTbl = os.path.join(FPathTmpSpace, var + ".dbf")
        if os.path.exists(outTbl):
            os.remove(outTbl)

        print("ZONALSTATS:", var, ValRawGrid, ValGridItem, outTbl)

        # Check for and include ValRawGrid item names other than "VALUE"
        if ValGridItem.upper() != "VALUE":
            ValRawGrid += '.' + ValGridItem

        # If CREATEBASAREA is requested, no area scaling is needed
        if BASAREA['BasinAreas']['FLAG']:
            # Perform zonal statistics
            gp.ZonalStatisticsAsTable_sa(ZoneGrid, "Value", ValRawGrid, outTbl,
                                         "DATA")
        else:
            # create a temporary (on-the-fly) grid that's the product of
            # the raw ValueGrid and one or two area scaling grids.
            # This temporary grid will be deleted once the zonal table is created
            gp.Times_sa(ValRawGrid, ClAreaGrid, ValGrid)
            if doc_d[var]['fieldtype'] == 'all':
                ValGridRun = ValGrid
            else:
                CFAreaGrid = doctbl_d[doc_d[var]['fieldtype']]['filepath']
                print("ZONALSTATS Cell Fr grid:", ValGrid, CFAreaGrid)
                gp.Times_sa(ValGrid, CFAreaGrid, ValGridCF)
                # delete the temporary grid
                gp.Delete_management(ValGrid)
                ValGridRun = ValGridCF

            # Perform zonal statistics
            gp.ZonalStatisticsAsTable_sa(ZoneGrid, "Value", ValGridRun, outTbl,
                                         "DATA")

            # delete the temporary grid
            gp.Delete_management(ValGridRun)

    # Final GIS cleanup: delete info directory & log file
    # Otherwise the model run folder retains this litter...
    shutil.rmtree(os.path.join(".", "info"))
    logfile = os.path.join(".", "log")
    if os.path.exists(logfile):
        os.remove(logfile)

    #except:
    #    print gp.GetMessages()

    # delete geoprocessing object, just to be safe
    del gp
    def ConvertoGrid(self):

        # Create the Geoprocessor object
        gp = arcgisscripting.create()

        # Check out any necessary licenses
        gp.CheckOutExtension("spatial")

        gp.overwriteoutput = 1

        # Local directory with NetCDF files
        NetCDFdir = self.cdfPath
        # Local directory to store GRID files
        Griddir = self.gridPath

        # List all the files in this folder
        Filelist = os.listdir(NetCDFdir)

        try:

            for Filename in Filelist:

                # Process the subset of the list fname that matches the description
                if fnmatch.fnmatch(Filename, '*.nc'):
                    # Split the .nc filename
                    Splitfname = Filename.split(".")
                    Mdataset = Splitfname[0]
                    Myear = Splitfname[1]
                    Mmon = Splitfname[2]
                    Mday = Splitfname[3]
                    Mver = Splitfname[4]
                    Mext = Splitfname[5]

                    imageInfo = Myear + Mmon + Mday

                    if ((int(imageInfo) > int(self.ReadLog()))):

                        NetCDFfile = NetCDFdir + Filename

                        #Set local variables
                        InNetCDFFile = NetCDFfile
                        InVariable = "hrf"
                        InXDimension = "longitude"
                        InYDimension = "latitude"
                        OutRasterLayer = "hrf_Layer"

                        # Process: MakeNetCDFRasterLayer
                        gp.MakeNetCDFRasterLayer(InNetCDFFile, InVariable,
                                                 InXDimension, InYDimension,
                                                 OutRasterLayer)

                        # convert to GRID
                        OutName = Griddir + "//" + "TRMM" + "_" + Myear + Mmon + Mday
                        gp.SingleOutputMapalgebra(OutRasterLayer, OutName)

                        self.WriteLog(imageInfo)
                        print "Successfully converted to Grid " + str(Filename)
                        #raise exit
                    else:
                        print "Already Converted From NetCDF to Grid -->" + str(
                            Filename)
                    print "Conversion completed..."
        except Exception, e:
            print "Could not get the Net Cdf file list" + str(e)
Exemplo n.º 29
0
def loadSummary( sa ):
    try:
        gp = arcgisscripting.create(9.3)    
        gp.OverwriteOutput = True
        trlog = TrendsUtilities.trLogger()
        where_clause = "AnalysisNum = " + str(sa.analysisNum) + \
                       " and Resolution = '" + str(sa.resolution) + "'"

        sourceName = "SummaryChangeStats"
        dbReader = databaseReadClass.changeRead( gp, TrendsNames.dbLocation + sourceName)
        for interval in sa.intervals:
            dbReader.databaseRead( gp, sa.analysisNum, 0, interval,
                                    sa.resolution, sa.summary[interval][1],
                                    "stats", TrendsNames.sumStatsNames)
                
        sourceName = "SummaryCompStats"
        dbReader = databaseReadClass.compositionRead( gp, TrendsNames.dbLocation + sourceName)
        for year in sa.years:
            dbReader.databaseRead( gp, sa.analysisNum, 0, year, sa.resolution,
                                    sa.sumcomp[year][1], "stats", TrendsNames.sumStatsNames)

        sourceName = "SummaryMultichangeStats"
        dbReader = databaseReadClass.multichangeRead( gp, TrendsNames.dbLocation + sourceName)
        for interval in sa.multiIntervals:
            datafound = dbReader.databaseRead( gp, sa.analysisNum, 0, interval, sa.resolution,
                                   sa.summulti[interval][1],"stats", TrendsNames.sumStatsNames)

        sourceName = "SummaryGlgnStats"
        dbReader = databaseReadClass.glgnRead( gp, TrendsNames.dbLocation + sourceName)
        for interval in sa.intervals:
            dbReader.databaseRead( gp, sa.analysisNum, 0, interval, sa.resolution,
                                    sa.sumglgn[interval],
                                    "stats", TrendsNames.sumStatsNames)

        source = 'gross'                    
        sourceName = "SummaryAggregateStats"
        dbReader = databaseReadClass.aggregateRead( gp, TrendsNames.dbLocation + sourceName)
        for interval in sa.aggIntervals:
            dbReader.databaseRead( gp, sa.analysisNum, 0, interval, sa.resolution, source,
                                    sa.sumaggregate[interval][source][1],
                                    "stats", TrendsNames.sumStatsNames)

        sourceName = "SummaryAggGlgnStats"
        dbReader = databaseReadClass.aggGlgnRead( gp, TrendsNames.dbLocation + sourceName)
        for interval in sa.aggIntervals:
            dbReader.databaseRead( gp, sa.analysisNum, 0, interval, sa.resolution, source,
                                    sa.sumaggglgn[interval][source],
                                    "stats", TrendsNames.sumStatsNames)

        sourceC = 'conversion'
        sourceA = 'addgross'
        sourceM = 'multichange'
        sourceName = "SummaryAllChangeStats"
        dbReader = databaseReadClass.allChangeRead( gp, TrendsNames.dbLocation + sourceName)
        for interval in sa.intervals:
            dbReader.databaseRead( gp, sa.analysisNum, 0, interval, sa.resolution,
                                    sourceC, sa.sumallchange[interval][sourceC][1],
                                    "stats", TrendsNames.sumStatsNames)
        for interval in sa.aggIntervals:
            dbReader.databaseRead( gp, sa.analysisNum, 0, interval, sa.resolution,
                                       sourceA, sa.sumallchange[interval][sourceA][1],
                                       "stats", TrendsNames.sumStatsNames)

        for interval in sa.multiIntervals:
            dbReader.databaseRead( gp, sa.analysisNum, 0, interval, sa.resolution,
                                    sourceM, sa.sumallchange[interval][sourceM][1],
                                   "stats", TrendsNames.sumStatsNames)
            
    except arcgisscripting.ExecuteError:
        # Get the geoprocessing error messages
        msgs = gp.GetMessage(0)
        msgs += gp.GetMessages(2)
        trlog.trwrite(msgs)
        raise            
    except TrendsUtilities.TrendsErrors, Terr:
        #Get errors specific to Trends execution
        trlog.trwrite( Terr.message )
        raise
Exemplo n.º 30
0
    def EtaCalculation(self):
       
        # Create Geoprocessing object

        gp = arcgisscripting.create()

        # Check out any License

        gp.CheckOutExtension("spatial")

        # Overwriting the Output

        gp.OverwriteOutput =1

        #Define output path
        outRaster=self.etaPath

        #Creating Lists
        fList=list() #Read etf Rasters
        oList=list() # Read eto Rasters

        etfList=list() # To add etf Rasters
        etoList=list() # To add eto Rasters

        #change the work space and starting to read ETF files 
        gp.workspace=self.etfPath
        fList=gp.ListRasters("*")
        etf=fList.Next()
        etfList.append(etf)

        while etf:
            etf=fList.Next()
            etfList.append(etf)

        print "\t ETf List \t"
        for etf in etfList:
           
            print etf
            
        print "-------------"


        #change the work space and starting to read reprojected ETO files

        roList=list() # Read resampled eto Rasters
        retoList=list() # To add resampled eto Rasters

        gp.workspace=self.etoPath
        roList=gp.ListRasters("*")
        reto=roList.Next()
        retoList.append(reto)

        while reto:
            reto=roList.Next()
            retoList.append(reto)
        
        print"\t ETo List \t"
        for eto in retoList:
           
            print eto
            
        print "-------------"


        # Using times tool to multiply the eto and etf rasters.
        for j in range(0,(len(etfList)-1),1):
            i=0
            #print "j--->" + str(etfList[j])
            while(i<(len(retoList)-1)):
                #print "i--->"+str(i)               
                dayEto=retoList[i][-3:]
                dayEtf=etfList[j][-3:]    
                yearEto=retoList[i][-7:-3]
                yearEtf=etfList[j][-7:-3]

                etoInfo=int(yearEto+dayEto)
                etfInfo=int(yearEtf+dayEtf)
                etaInfo=self.ReadLog()

                #print dayEto,dayEtf,yearEto,yearEtf
                if((etoInfo<=etaInfo) and (etfInfo<=etaInfo)):
                    print"ETa alreay comuted for-"
                    print"ETf: "+str(etfList[j])
                    print"ETo: "+str(retoList[i])
                    print"\n"
                    
                elif(dayEto==dayEtf and yearEto==yearEtf):
                    
                    print "Multiply \t"+retoList[i] +"\t and \t "+ etfList[j]
                    gp.Times_sa(self.etfPath+etfList[j],self.etoPath+retoList[i],(outRaster+"ETa"+str(yearEto)+str(dayEto).zfill(3)))
                    
                    print "Successfully Completed"
                    self.WriteLog(str(etoInfo))
                    break
                
                else:
                    
                    print "Unmatched ETf -->"+self.etfPath+etfList[j]+" and ETo -->"+self.etoPath+retoList[i]
                    
                i=i+1
Exemplo n.º 31
0
 def __init__(self):
     """Initialize class and create single geoprocessor object"""
     self.gp = arcgisscripting.create(9.3)
     self.gp.CheckOutExtension("Spatial")
     self.gp.OverwriteOutput = True
     self.lm_configured = False
Exemplo n.º 32
0
def main(env):
    arcpy.CheckOutExtension('Spatial')
    gp = arcgisscripting.create()

    if env:
        gdb_path = arcpy.GetParameterAsText(0)
        dem_path = arcpy.GetParameterAsText(1)
        batch_points_path = arcpy.GetParameterAsText(2)
        drain_network_path = arcpy.GetParameterAsText(3)
        uttl = arcpy.GetParameterAsText(4)
        epsg = arcpy.GetParameterAsText(5)
        fac_path = arcpy.GetParameterAsText(6)

        mrvbf = gp.GetParameterAsText(7)
        par1 = gp.GetParameterAsText(8)
        par2 = gp.GetParameterAsText(9)
        par3 = gp.GetParameterAsText(10)
        par4 = gp.GetParameterAsText(11)
        par5 = gp.GetParameterAsText(12)
        show = gp.GetParameterAsText(13)
        par9 = gp.GetParameterAsText(14)
        par10 = gp.GetParameterAsText(15)

        save_mxd()
    else:
        gdb_path = r'C:\Users\jchav\AH_01\CATATUMBO\results\UTTL.gdb'
        dem_path = r'C:\Users\jchav\AH_01\CATATUMBO\data\DEM_Raw_Init_Catatumbo_Plus_750_3116.tif'
        batch_points_path = r'C:\Users\jchav\AH_01\CATATUMBO\results\UTTL.gdb\BatchPoints'
        drain_network_path = r'C:\Users\jchav\AH_01\CATATUMBO\results\UTTL.gdb\drainage_line'
        epsg = 3116
        uttl = r'C:\Users\jchav\AH_01\CATATUMBO\results\UTTL.gdb\UTTL_Basins'
        fac_path = r'C:\Users\jchav\AH_01\CATATUMBO\results\UTTL.gdb\fac'

        mrvbf = 'mrvbf'
        par1 = 8
        par2 = 0.4
        par3 = 0.35
        par4 = 4
        par5 = 3
        show = True
        par9 = r'C:\Users\jchav\AH_01\CATATUMBO\data\Qmax_Regional_UPME_CTr.tif'
        par10 = r'C:\Users\jchav\AH_01\CATATUMBO\data\Qmax_Regional_UPME_qTr.tif'

    gp.AddMessage('FLORES ...')
    slope_calc(batch_point=batch_points_path,
               workspace=gdb_path,
               drain=drain_network_path,
               epsg=epsg,
               dem=dem_path,
               uttl=uttl)
    areas_watershed(workspace=gdb_path, fac=fac_path, uttl=uttl)
    flores(workspace=gdb_path, uttl=uttl)
    gp.AddMessage('FLORES: was successfully')

    par6 = os.path.join(gdb_path, 'Drain_UTTL')

    gp.AddMessage('BECHIEE ...')
    temp_folder = r'{}\temp'.format(os.path.dirname(os.path.abspath(gdb_path)))
    if os.path.exists(temp_folder):
        gp.AddMessage('folder temp already exists')
    else:
        os.mkdir(temp_folder)
    gp.AddMessage('Running SAGA - MRVBF')
    import_data_to_saga(dem_path, temp_folder)
    run_mrvbf(temp=temp_folder,
              t_slope=par1,
              tv=par2,
              tr=par3,
              p_slope=par4,
              p=par5)
    export_data_from_saga(temp_folder, out_grid=mrvbf, gdb=gdb_path, show=show)
    gp.AddMessage('Running Beechie')
    fn_beechie(raster='{}/{}'.format(gdb_path, mrvbf),
               drain_shape=par6,
               uttl_basins=uttl,
               fac=fac_path,
               workspace=gdb_path,
               CTr=par9,
               qTr=par10)
    if show:
        show_things(uttl, 'UTTL', os.path.dirname(gdb_path))
Exemplo n.º 33
0
def execute(self, parameters, messages):

    # Create the Geoprocessor object
    gp = arcgisscripting.create()

    # Check out any necessary licenses
    gp.CheckOutExtension("spatial")

    gp.OverwriteOutput = 1
    gp.LogHistory = 1

    # Load required toolboxes...
    try:
        parentfolder = os.path.dirname(sys.path[0])
        #
        #tbxpath = os.path.join(parentfolder,"arcsdm.pyt")
        tbxpath = os.path.join(parentfolder, "toolbox\\arcsdm.pyt")
        dwrite(tbxpath)
        gp.AddToolbox(tbxpath)
        #gp.addmessage('getting arguments...')
        Grand_WOFE_Name = '_' + parameters[0].valueAsText
        #gp.GetParameterAsText(0)
        Evidence_Rasters = parameters[1].valueAsText.split(';')
        #gp.GetParameterAsText(1).split(';')
        Evidence_Data_Types = parameters[2].valueAsText.lower().split(';')
        #gp.GetParameterAsText(2).lower().split(';')
        Input_Training_Sites_Feature_Class = parameters[3].valueAsText
        #gp.GetParameterAsText(3)
        Ignore_Missing_Data = parameters[4].value
        #gp.GetParameter(4)
        Confidence_Level_of_Studentized_Contrast = parameters[5].value
        #gp.GetParameter(5)
        Unit_Area__sq_km_ = parameters[6].value  #gp.GetParameter(6)
        #gp.addmessage('got arguments')
        #import SDMValues
        arcsdm.sdmvalues.appendSDMValues(gp, Unit_Area__sq_km_,
                                         Input_Training_Sites_Feature_Class)

        # Local variables...
        List_Wts_Tables = []
        suffixes = {
            'Ascending': '_CA',
            'Descending': '_CD',
            'Categorical': '_CT'
        }
        Missing_Data_Value = -99
        Evidence_Raster_Code_Field = ''
        OutSet = []  #List of output datasets
        dwrite('set local variables')

        #Processing...
        # Test for proper table data types:
        if len(Evidence_Data_Types) != len(Evidence_Rasters):
            gp.adderror(
                'Number of evidence layers and weights data types do not match'
            )
            raise
        for evtype in Evidence_Data_Types:
            if not evtype[0] in 'ofc':
                gp.adderror(
                    'Evidence data type %s not of %s' %
                    (Evidence_Data_Type, ['free', 'categorical', 'ordered']))
                raise TypeError
        # Process: Calculate Weights of Evidence...
        dwrite(str(Evidence_Data_Types))
        dwrite(str(Evidence_Rasters))
        arcpy.AddMessage("========== Starting GrandWofe ====================")

        for Evidence_Raster_Layer, Evidence_Data_Type in zip(
                Evidence_Rasters, Evidence_Data_Types):
            prefix = Evidence_Raster_Layer + Grand_WOFE_Name
            arcpy.AddMessage("Calculating weights for %s (%s)..." %
                             (Evidence_Raster_Layer, Evidence_Data_Type))
            if Evidence_Data_Type.startswith('o'):
                Wts_Table_Types = ['Ascending', 'Descending']
            else:
                Wts_Table_Types = ['Categorical']

            for Wts_Table_Type in Wts_Table_Types:
                suffix = suffixes[Wts_Table_Type]
                filename = prefix + suffix
                # + '.dbf' NO DBF anymore
                unique_name = gp.createuniquename(filename, gp.workspace)
                Output_Weights_Table = unique_name
                dwrite(gp.ValidateTablename(prefix + suffix))

                dwrite('%s Exists: %s' %
                       (Output_Weights_Table, gp.exists(Output_Weights_Table)))
                arcpy.ImportToolbox(tbxpath)

                # Temporarily print directory
                #gp.addmessage(dir(arcpy));
                #gp.addmessage("Calling calculate weights...")
                dwrite(" raster layer name: " + Evidence_Raster_Layer)

                result = arcpy.CalculateWeightsTool_ArcSDM ( Evidence_Raster_Layer, Evidence_Raster_Code_Field, \
                                               Input_Training_Sites_Feature_Class, Wts_Table_Type, Output_Weights_Table, \
                                               Confidence_Level_of_Studentized_Contrast, \
                                               Unit_Area__sq_km_, Missing_Data_Value)
                arcpy.AddMessage("     ...done")
                gp.addwarning('Result: %s\n' % result)
                #gp.addmessage("Done...")
                #gp.addmessage(result);

                #Output, Success = result.split(';')
                Success = "True"  # horrible fix...
                Output = result.getOutput(0)
                if Success.strip().lower() == 'true':
                    List_Wts_Tables.append((Evidence_Raster_Layer, Output))
                    #gp.addmessage('Valid Wts Table: %s'%Output_Weights_Table)
                    OutSet.append(str(
                        Output))  # Save name of output table for display kluge
                else:
                    gp.addmessage('Invalid Wts Table: %s' % Output.strip())
                #arcpy.AddMessage("\n")

        #Get list of valid tables for each input raster
        raster_tables = {}
        #arcpy.AddMessage("     ...done");

        for Evidence_Raster_Layer, Output_Weights_Table in List_Wts_Tables:
            #gp.addmessage(str((evidence_layer, wts_table)))
            if Evidence_Raster_Layer in raster_tables:
                raster_tables[Evidence_Raster_Layer].append(
                    Output_Weights_Table)
            else:
                raster_tables[Evidence_Raster_Layer] = [Output_Weights_Table]

        if len(raster_tables) > 0:
            #Function to do nested "for" statements by recursion
            def nested_fors(ranges, tables, N, tables_out=[], tables_all=[]):
                for n in ranges[0]:
                    tables_out.append(tables[0][n])
                    if len(ranges) > 1:
                        nested_fors(ranges[1:], tables[1:], N, tables_out,
                                    tables_all)
                    if len(tables_out) == N:
                        tables_all.append(tables_out[:])
                    del tables_out[-1]
                return tables_all

            #Get per-test lists of tables; each table in a list is in input raster order
            #tables = [raster_tables[Evidence_Raster_Layer] for Evidence_Raster_Layer in Evidence_Rasters]
            tables = []
            valid_rasters = []
            valid_raster_datatypes = []
            for Evidence_Raster_Layer, Evidence_Data_Type in zip(
                    Evidence_Rasters, Evidence_Data_Types):
                if Evidence_Raster_Layer in raster_tables:
                    valid_rasters.append(Evidence_Raster_Layer)
                    valid_raster_datatypes.append(Evidence_Data_Type)
                    tables.append(raster_tables[Evidence_Raster_Layer])
            #Get ranges for number of tables for each evidence layer (in input evidence order)
            ranges = map(range, map(len, tables))
            #gp.addmessage(str(ranges))
            #Get combinations of valid wts table for input evidence_rasters
            Weights_Tables_Per_Test = nested_fors(ranges, tables, len(tables))
            for Testnum, Weights_Tables in enumerate(Weights_Tables_Per_Test):
                gp.addmessage("------ Running tests... (%s) ------" %
                              (Testnum))
                # Process: Calculate Response...
                Test = chr(ord('A') + Testnum)
                dwrite(str(Weights_Tables))
                Weights_Tables = ";".join(Weights_Tables)
                prefix = Grand_WOFE_Name[1:] + Test
                gp.addMessage("%s: Response & Logistic Regression: %s,%s\n" %
                              (Test, ";".join(valid_rasters), Weights_Tables))
                Output_Post_Prob_Raster = gp.createuniquename(
                    prefix + "_pprb", gp.workspace)
                Output_Prob_Std_Dev_Raster = gp.createuniquename(
                    prefix + "_pstd", gp.workspace)
                Output_MD_Variance_Raster = gp.createuniquename(
                    prefix + "_mvar", gp.workspace)
                Output_Total_Std_Dev_Raster = gp.createuniquename(
                    prefix + "_tstd", gp.workspace)
                Output_Confidence_Raster = gp.createuniquename(
                    prefix + "_conf", gp.workspace)
                #gp.AddToolbox(tbxpath)
                #dwrite (str(dir(arcpy)))
                gp.addMessage(" Calculating response... ")
                out_paths = arcpy.CalculateResponse_ArcSDM(";".join(valid_rasters), Weights_Tables, Input_Training_Sites_Feature_Class, \
                                 Ignore_Missing_Data, Missing_Data_Value, Unit_Area__sq_km_, Output_Post_Prob_Raster, Output_Prob_Std_Dev_Raster, \
                                 Output_MD_Variance_Raster, Output_Total_Std_Dev_Raster, Output_Confidence_Raster)
                # Set the actual output parameters
                gp.addMessage("       ...done")

                actualoutput = []
                dwrite(str(out_paths))
                dwrite("Outputcount: " + str(out_paths.outputCount))
                dwrite("Output0: " + str(out_paths.getOutput(0)))
                paths = ""
                for i in range(0, out_paths.outputCount):
                    dwrite("Output: " + str(out_paths.getOutput(i)))
                    paths = out_paths.getOutput(i) + ";"

                #for raspath in out_paths.split(';'):
                for raspath in paths.split(';'):
                    if gp.exists(raspath.strip()):
                        actualoutput.append(raspath.strip())
                out_paths = ';'.join(actualoutput)
                #Append delimited string to list
                OutSet.append(
                    out_paths)  # Save name of output raster dataset for kluge
                dwrite(" Outset: " + str(OutSet))

                # Process: Logistic Regression...
                Output_Polynomial_Table = gp.createuniquename(
                    prefix + "_lrpoly.dbf", gp.workspace)
                Output_Coefficients_Table = gp.createuniquename(
                    prefix + "_lrcoef.dbf", gp.workspace)
                Output_Post_Probability_raster = gp.createuniquename(
                    prefix + "_lrpprb", gp.workspace)
                Output_Standard_Deviation_raster = gp.createuniquename(
                    prefix + "_lrstd", gp.workspace)
                Output_LR_Confidence_raster = gp.createuniquename(
                    prefix + "_lrconf", gp.workspace)
                #gp.AddToolbox(tbxpath)
                gp.addMessage(" Running logistic regression...")

                out_paths = arcpy.LogisticRegressionTool_ArcSDM(
                    ";".join(valid_rasters), ";".join(valid_raster_datatypes),
                    Weights_Tables, Input_Training_Sites_Feature_Class,
                    Missing_Data_Value, Unit_Area__sq_km_,
                    Output_Polynomial_Table, Output_Coefficients_Table,
                    Output_Post_Probability_raster,
                    Output_Standard_Deviation_raster,
                    Output_LR_Confidence_raster)
                dwrite(str(out_paths.status))
                gp.addMessage("     ...done ")

                # Set the output parameters
                #Append delimited string to list
                for i in range(0, out_paths.outputCount):
                    dwrite("Output: " + str(out_paths.getOutput(i)))
                    OutSet.append(out_paths.getOutput(i))
                #OutSet.append(out_paths) # Save name of output raster dataset for kluge
                #Set output parameters
                #gp.addmessage("==== ====")
                dwrite(str(out_paths.status))
            """Kluge because Geoprocessor can't handle variable number of ouputs"""
            dwrite(" Outset: " + str(OutSet))
            OutSet = ';'.join(OutSet)

            gp.addwarning("Copy the following line with ControlC,")
            gp.addwarning(
                "then paste in the Name box of the Add Data command,")
            gp.addwarning("then click Add button")
            dwrite(str(OutSet))
            #stoppitalle();
            gp.addwarning(OutSet)

        else:
            #Stop processing
            gp.AddError('No Valid Weights Tables: Stopped.')

    except:
        # get the traceback object
        tb = sys.exc_info()[2]
        # tbinfo contains the line number that the code failed on and the code from that line
        tbinfo = traceback.format_tb(tb)[0]
        # concatenate information together concerning the error into a message string
        pymsg = "PYTHON ERRORS:\nTraceback Info:\n" + tbinfo + "\nError Info:\n    " + \
            str(sys.exc_type)+ ": " + str(sys.exc_value) + "\n"
        # generate a message string for any geoprocessing tool errors
        if len(gp.GetMessages(2)) > 0:
            msgs = "GP ERRORS:\n" + gp.GetMessages(2) + "\n"
            gp.AddError(msgs)

        # return gp messages for use with a script tool
        gp.AddError(pymsg)

        # print messages for use in Python/PythonWin
        print(msgs)
        raise
Exemplo n.º 34
0
def main(env):
    arcpy.CheckOutExtension('Spatial')
    gp = arcgisscripting.create()

    if env:
        # from ArcMap
        # User input data
        dem_path = arcpy.GetParameterAsText(0)
        folder_out_path = arcpy.GetParameterAsText(1)
        gdb_name = arcpy.GetParameterAsText(2)
        drain_burning = arcpy.GetParameterAsText(3)
        threshold = arcpy.GetParameterAsText(4)
        show_layers = arcpy.GetParameterAsText(5)
        epsg = arcpy.GetParameterAsText(6)
        make_fill = arcpy.GetParameterAsText(7)

        equidistant = gp.GetParameter(8)
        knick_name = gp.GetParameterAsText(9)

        hydro_zone = gp.GetParameter(10)
        # mxd_project = gp.GetParameter(11)

    else:
        # from console
        dem_path = r'C:\DIRECTOS\data\HydroDEM_Directos_3116.tif'
        folder_out_path = r'C:\DIRECTOS\results'
        gdb_name = 'UTTL'
        drain_burning = ''
        threshold = 324  # TODO: estimate the threshold from the scale and resolution of dem
        show_layers = False
        epsg = 3116
        make_fill = True

        equidistant = 200
        knick_name = 'knickpoints'

        hydro_zone = 12
        # mxd_project = 'Untitled'

    temp = os.path.join(folder_out_path, 'temp')
    if not os.path.isdir(temp):
        os.mkdir(temp)
    save_mxd(folder=temp, name='Untitled')
    arcpy.env.workspace = folder_out_path
    arcpy.env.overwriteOutput = True
    if not os.path.exists(os.path.join(folder_out_path, '{}.gdb'.format(gdb_name))):
        arcpy.CreateFileGDB_management(folder_out_path, '{}.gdb'.format(gdb_name))

    gdb_path = os.path.join(folder_out_path, '{}.gdb'.format(gdb_name))
    dem_conditioning(dem=dem_path, folder=folder_out_path, gdb=gdb_name, threshold=threshold, show=show_layers, epsg=epsg, fill=make_fill, drain_network=drain_burning)

    drain_network = os.path.join(folder_out_path, '{}.gdb'.format(gdb_name), 'drainage_line')
    extract_hydro_points(drain=drain_network, show=show_layers, folder=folder_out_path, gdb=gdb_name)
    knickpoints_extract(raw_dem=dem_path, shape_out=knick_name, drain_network=drain_network, folder=folder_out_path, eq=equidistant, gdb=gdb_name, epsg=epsg)
    knickpoints_filter(folder=folder_out_path, gdb=gdb_name, knick=knick_name)
    topog_points = os.path.join(folder_out_path, '{}.gdb'.format(gdb_name), 'knickpoints_filter')
    hydro_points = os.path.join(folder_out_path, '{}.gdb'.format(gdb_name), 'hydro_points')
    merge_points(knickpoints=topog_points, hydropoints=hydro_points, folder_out=folder_out_path, gdb=gdb_path, zone=hydro_zone)

    batchpoints_path = os.path.join(gdb_path, 'BatchPoints')
    fdr = os.path.join(gdb_path, 'fdr')
    str_stream = os.path.join(gdb_path, 'Str')
    uttl_maker(flow_grid=fdr, stream_grid=str_stream, batch_point=batchpoints_path, basins='UTTL_Basins', workspace=gdb_path)
    clear_layers()
Exemplo n.º 35
0
def loadChangeImageData(eco, interval):
    try:
        #Set up a log object
        trlog = TrendsUtilities.trLogger()

        #Create the geoprocessor object
        gp = arcgisscripting.create(9.3)
        gp.OverwriteOutput = True

        trlog.trwrite("Loading data for ecoregion " + str(eco.ecoNum) +
                      " and interval " + interval)

        #Make a query table from the database
        #The table is the changeImage table, and
        # only rows with the given ecoregion number, resolution,
        # and change interval are selected from ChangeImage for this table view
        tableName = TrendsNames.dbLocation + "ChangeImage"
        where_clause = "EcoLevel3ID = " + str(eco.ecoNum) + \
                " and ChangePeriod = \'" + interval + "\'" + \
                " and Resolution = \'" + eco.resolution + "\'"
        sort_fields = "BlkLabel A"

        #Step through all the rows and find the block numbers
        # that are in the list of sample blocks
        #For those blocks, get the filename and store in a dictionary
        #  with the filename. format = {block:filename, block:filename, ...}
        changeFiles = {}
        rows = gp.SearchCursor(tableName, where_clause, "", "", sort_fields)
        row = rows.Next()
        while row:
            if row.BlkLabel in eco.stratBlocks:
                changeFiles[row.BlkLabel] = row.ImageLocation
            row = rows.Next()

        if len(changeFiles) > eco.sampleBlks:
            trlog.trwrite("Found " + str(len(changeFiles)) +
                          " change files but expected only " +
                          str(eco.sampleBlks))
            raise TrendsUtilities.TrendsErrors(
                "More block images found than expected - unable to process ecoregion"
            )

        trlog.trwrite("N= " + str(eco.totalBlks) + " n= " +
                      str(eco.sampleBlks))
        if (len(changeFiles) < eco.sampleBlks) and (eco.runType
                                                    == "Full stratified"):
            trlog.trwrite("WARNING: found only " + str(len(changeFiles)) +
                          " images in the change image table")

        #For each filename in the list, open its attribute table and read in the
        # conversion number and count columns. Add this to the array
        for block in changeFiles:
            rows = gp.SearchCursor(changeFiles[block])
            row = rows.Next()
            while row:
                if row.Count > 0 and row.Value > 0 and row.Value <= eco.numCon:
                    eco.ecoData[interval][0][row.Value - 1,
                                             eco.column[block]] = int(
                                                 row.Count)
                row = rows.Next()
        #For each block in the list of split blocks,
        #  read the conversion numbers for each block from the table for this ecoregion and interval
        #The split blocks have already been screened for interval and resolution
        #  If there are blocks in the split list, then they are ready to load (TBD)

        #Send an acknowledgment back that there was data found for
        #  this interval at this resolution
        # If changeFiles or eco.splitBlocks contain block numbers,
        #  the value returned will be TRUE.  If they are both empty, FALSE is returned
        return (changeFiles or eco.splitBlocks)

    except arcgisscripting.ExecuteError:
        msgs = gp.GetMessage(0)
        msgs += gp.GetMessages(2)
        trlog.trwrite(msgs)
        raise
    except TrendsUtilities.TrendsErrors, Terr:
        trlog.trwrite(Terr.message)
        raise
Exemplo n.º 36
0
def loadMultiChangeData(eco, multiInterval):
    try:
        #Set up a log object
        trlog = TrendsUtilities.trLogger()
        trlog.trwrite("Loading multichange data for ecoregion " + str(eco.ecoNum) + \
                      " and interval " + multiInterval)

        #Create the geoprocessor object
        gp = arcgisscripting.create(9.3)
        gp.OverwriteOutput = True

        #Make a query from the database
        #The table is the MultiChangeImage table, and
        # only rows with the given ecoregion number and resolution
        # are selected from ChangeImage for this table view
        tableName = TrendsNames.dbLocation + "MultiChangeImage"
        where_clause = "EcoLevel3ID = " + str(eco.ecoNum) + \
                " and ChangePeriod = \'" + multiInterval + "\'" + \
                " and Resolution = \'" + eco.resolution + "\'"
        sort_fields = "BlkLabel A"

        #Step through all the rows and find the block numbers
        # that are in the list of sample blocks
        #For those blocks, get the filename and store in a dictionary
        #  with the filename. format = {block:filename, block:filename, ...}
        changeFiles = {}
        rows = gp.SearchCursor(tableName, where_clause, "", "", sort_fields)
        row = rows.Next()
        while row:
            if row.BlkLabel in eco.stratBlocks:
                changeFiles[row.BlkLabel] = row.ImageLocation
            row = rows.Next()

        #Find the number of change intervals for this interval
        changes = TrendsNames.MultiMap[multiInterval]

        #For each filename in the list, open its attribute table and read in the
        # multichange number and count columns. Add this to the array
        for block in changeFiles:
            rows = gp.SearchCursor(changeFiles[block])
            row = rows.Next()
            while row:
                if row.Count > 0 and row.Value >= 0 and row.Value <= changes:
                    eco.ecoMulti[multiInterval][0][row.Value,
                                                   eco.column[block]] = int(
                                                       row.Count)
                row = rows.Next()
        #For each block in the list of split blocks,
        #  read the conversion numbers for each block from the table for this ecoregion and interval
        #The split blocks have already been screened for interval and resolution
        #  If there are blocks in the split list, then they are ready to load (TBD)

        #Send an acknowledgment back that there was data found at this resolution
        # If changeFiles or eco.splitBlocks contain block numbers,
        #  the value returned will be TRUE.  If they are both empty, FALSE is returned
        return (changeFiles or eco.splitBlocks)

    except arcgisscripting.ExecuteError:
        msgs = gp.GetMessage(0)
        msgs += gp.GetMessages(2)
        trlog.trwrite(msgs)
        raise
    except TrendsUtilities.TrendsErrors, Terr:
        trlog.trwrite(Terr.message)
        raise
Exemplo n.º 37
0
def Execute(self, parameters, messages):
    if PY2:
        reload(arcsdm.common)
    if PY34:
        importlib.reload(arcsdm.common)
    gp = arcgisscripting.create()
    # Check out any necessary licenses
    gp.CheckOutExtension("spatial")

    if arcsdm.common.testandwarn_filegeodatabase_environment():        
        return;
        
    
    gp.OverwriteOutput = 1
    gp.LogHistory = 1

    # Load required toolboxes...

    # Script arguments...
    try:
        unitCell = parameters[5].value
        CheckEnvironment();
        if unitCell < (float(gp.CellSize)/1000.0)**2:
            unitCell = (float(gp.CellSize)/1000.0)**2
            gp.AddWarning('Unit Cell area is less than area of Study Area cells.\n'+
                        'Setting Unit Cell to area of study area cells: %.0f sq km.'%unitCell)

        #Get evidence layer names
        Input_Rasters = parameters[0].valueAsText.split(';')
        #Remove group layer names 
        for i, s in enumerate(Input_Rasters):
            Input_Rasters[i] = s.strip("'"); #arcpy.Describe( s.strip("'")).file;
            dwrite (arcpy.Describe( s.strip("'")).file);
            dwrite (Input_Rasters[i]);
            if arcsdm.common.testandwarn_filegeodatabase_source(s):
                return;
        gp.AddMessage("Input rasters: " + str(Input_Rasters))

        #Get evidence layer types
        Evidence_types = parameters[1].valueAsText.lower().split(';')
        gp.AddMessage('Evidence_types: %s'%(str(Evidence_types)))
        if len(Evidence_types) != len(Input_Rasters):
            gp.AddError("Not enough Evidence types!")
            raise Exception
        for evtype in Evidence_types:
            if not evtype[0] in 'ofcad':
                gp.AddError("Incorrect Evidence type: %s"%evtype)
                raise Exception
        #Get weights tables names
        Wts_Tables = parameters[2].valueAsText.split(';')
        gp.AddMessage('Wts_Tables: %s'%(str(Wts_Tables)))
        for i, s in enumerate(Wts_Tables):                  
            arcpy.AddMessage(s);
            if arcsdm.common.testandwarn_filegeodatabase_source(s):
                return;
        if len(Wts_Tables) != len(Wts_Tables):
            gp.AddError("Not enough weights tables!")
            raise Exception
        #Get Training sites feature layer
        TrainPts = parameters[3].valueAsText
        gp.AddMessage('TrainPts: %s'%(str(TrainPts)))
        #Get missing data values
        MissingDataValue = parameters[4].valueAsText
        lstMD = [MissingDataValue for ras in Input_Rasters]
        gp.AddMessage('MissingDataValue: %s'%(str(MissingDataValue)))
        #Get output raster name
        thmUC = gp.createscratchname("tmp_UCras", '', 'raster',   gp.scratchworkspace)

        #Print out SDM environmental values
        sdmvalues.appendSDMValues(gp, unitCell, TrainPts)

        #Create Generalized Class tables
        Wts_Rasters = []
        mdidx = 0
        gp.AddMessage("Creating Generalized Class rasters.")
        for Input_Raster, Wts_Table in zip(Input_Rasters, Wts_Tables):
            Output_Raster = gp.CreateScratchName(os.path.basename(Input_Raster[:9]) + "_G", '', 'rst', gp.scratchworkspace)            
            gp.AddMessage('Output_Raster: %s'%(str(Output_Raster)))
        #>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
            #++ Need to create in-memory Raster Layer for AddJoin
            #RasterLayer = arcpy.env.workspace + "\\OutRas.lyr"
            #Table_temp = "temp_tab"
        #=========================================================
            #temp_gdb = arcpy.env.scratchGDB
            #asterGDB = temp_gdb + "\\" + RasterLayer
            #TableGDB = temp_gdb + "\\" + Table_temp
            #gp.RasterToGeodatabase_conversion(Input_Raster, temp_gdb)
            #gp.CopyRaster_management(Input_Raster, RasterGDB)
            #gp.CopyRows_management(Wts_Table, TableGDB)
            #gp.AddMessage('RasterGDB: %s'%(str(RasterGDB)))

            #arcpy.MakeRasterCatalogLayer_management(RasterGDB,RasterLayer)
            #gp.BuildRasterAttributeTable_management(Input_Raster, "OVERWRITE") 
            #arcpy.MakeRasterLayer_management(Input_Raster, RasterLayer)
            #arcpy.MakeTableView_management(Input_Raster,RasterLayer)


            #arcpy.SaveToLayerFile_management(Input_Raster, RasterLayer)
        #=========================================================
            #gp.AddMessage('Input_Raster: %s'%(str(Input_Raster)))
            #gp.AddMessage('Wts_Table: %s'%(str(Wts_Table)))
            #gp.makerasterlayer(Input_Raster, RasterLayer)
            
            #gp.AddJoin_management(RasterLayer, "Value", Wts_Table, "CLASS")
            Temp_Raster = gp.CreateScratchName('tmp_rst', '', 'rst',  gp.scratchworkspace)
            gp.copyraster_management(Input_Raster, Temp_Raster)
            gp.JoinField_management(Temp_Raster, 'Value', Wts_Table, 'CLASS')
            
            gp.AddMessage('Temp_Raster: %s'%(str(Temp_Raster)))
            #gp.CopyRaster_management(RasterLayer, Temp_Raster)
            gp.Lookup_sa(Temp_Raster, "GEN_CLASS", Output_Raster)
            #gp.delete(RasterLayer)
            #gp.delete(TableGDB)
        #<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<
            #gp.AddMessage(Output_Raster + " exists: " + str(gp.Exists(Output_Raster)))
            if not gp.Exists(Output_Raster):
                gp.AddError(Output_Raster + " does not exist.")
                raise Exception
            Wts_Rasters.append(gp.Describe(Output_Raster).CatalogPath)
        #Create the Unique Conditions raster from Generalized Class rasters
    ##    #>>>> Comment out for testing >>>>>>>>>>>>>>>>>>>>>>>>>>
        Input_Combine_rasters = ";".join(Wts_Rasters)
        #Combine created Wts_Rasters and add to TOC
        #gp.AddMessage('Combining...%s'%Input_rasters)
        if gp.exists(thmUC): gp.delete_management(thmUC)
        gp.Combine_sa(Input_Combine_rasters, thmUC)
    ##    #<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<
        #gp.AddMessage('Combine done...')

        #Get UC lists from combined raster
        UCOIDname = gp.describe(thmUC).OIDfieldname
        #First get names of evidence fields in UC raster
        evflds = []
        ucflds = gp.ListFields(thmUC)
        ucfld = ucflds.Next()
        while ucfld:
            if (ucfld.Name == UCOIDname) or (ucfld.Name.upper() in ('VALUE', 'COUNT')):
            #if ucfld.Name == UCOIDname or ucfld.Name == 'Value' or ucfld.Name == 'Count':
                pass
            else:
                evflds.append(ucfld.Name)
            ucfld = ucflds.Next()
        #gp.AddMessage('evflds: %s'%str(evflds))
        #Set up empty list of lists
        lstsVals = [[] for fld in evflds]
        #gp.AddMessage('lstsVals: %s'%str(lstsVals))
        #Put UC vals and areas for each evidence layer in lstsVals
        cellSize = float(gp.CellSize)
        lstAreas = [[] for fld in evflds]
        if gp.describe(thmUC).datatype == 'RasterLayer':
            thmUCRL = gp.describe(thmUC).catalogpath
        else:
            thmUCRL = thmUC
        ucrows = workarounds_93.rowgen(gp.SearchCursor(thmUCRL))
        for ucrow in ucrows:
            for i, fld in enumerate(evflds):
                lstsVals[i].append(ucrow.GetValue(fld))
                lstAreas[i].append(ucrow.Count * cellSize * cellSize / (1000000.0 * unitCell))
        #gp.AddMessage('lstsVals: %s'%(str(lstsVals)))
        #gp.AddMessage('lstAreas: %s'%(str(lstAreas)))

        #Check Maximum area of conditions so not to exceed 100,000 unit areas
        #This is a limitation of Logistic Regression: sdmlr.exe
        maxArea = max(lstAreas[0])
        if (maxArea/unitCell)/100000.0 > 1:
            unitCell = math.ceil(maxArea/100000.0)
            gp.AddWarning('UnitCell is set to minimum %.0f sq. km. to avoid area limit in Logistic Regression!'%unitCell)

        #Get Number of Training Sites per UC Value
        #First Extract RasterValues to Training Sites feature layer
        #ExtrTrainPts = os.path.join(gp.ScratchWorkspace, "LRExtrPts.shp")
        #ExtrTrainPts = gp.CreateScratchName('LRExtrPts', 'shp', 'shapefile', gp.scratchworkspace)
        #gp.ExtractValuesToPoints_sa(TrainPts, thmUC, ExtrTrainPts, "NONE", "VALUE_ONLY")
        ExtrTrainPts = workarounds_93.ExtractValuesToPoints(gp, thmUC, TrainPts, "TPFID")
        #Make dictionary of Counts of Points per RasterValue
        CntsPerRasValu = {}
        tpFeats = workarounds_93.rowgen(gp.SearchCursor(ExtrTrainPts))
        for tpFeat in tpFeats:
            if tpFeat.RasterValu in CntsPerRasValu.keys():
                CntsPerRasValu[tpFeat.RasterValu] += 1
            else:
                CntsPerRasValu[tpFeat.RasterValu] = 1
        #gp.AddMessage('CntsPerRasValu: %s'%(str(CntsPerRasValu)))
        #Make Number of Points list in RasterValue order
        #Some rastervalues can have no points in them
        lstPnts = []
        numUC = len(lstsVals[0])
        for i in range(1, numUC+1): #Combined raster values start at 1
            if i in CntsPerRasValu.keys():
                lstPnts.append(CntsPerRasValu.get(i))
            else:
                lstPnts.append(0)
        #gp.AddMessage('lstPnts: %s'%(lstPnts))
        lstsMC = []
        mcIndeces = []
        for et in Evidence_types:
            if et.startswith('o') or et.startswith('a') or et.startswith('d'):
                mcIndeces.append(-1)
            elif et.startswith('f') or et.startswith('c'):
                mcIndeces.append(1)
            else:
                gp.AddError('Incorrect evidence type')
                raise Exception
        if len(mcIndeces) != len(Input_Rasters):
            gp.AddError("Incorrect number of evidence types.")
            raise Exception
        #gp.AddMessage('mcIndeces: %s'%(str(mcIndeces)))
        catMCLists = [[], mcIndeces]
        evidx = 0
        for mcIdx in mcIndeces:
            catMCLists[0].append([])
            if mcIdx<0:
                pass
            else:
                #Make a list of free raster values
                #evidx = len(catMCLists[0]) - 1
                #gp.AddMessage(Wts_Rasters[evidx])
                wts_g = gp.createscratchname("Wts_G")
                gp.MakeRasterLayer_management(Wts_Rasters[evidx], wts_g)
                #evrows = gp.SearchCursor("Wts_G")
                evrows = FloatRasterSearchcursor(gp, wts_g)
                #evrow = evrows.next()
                for evrow in evrows:
                    #gp.AddMessage("Value: %s"%evrow.value)
                    if evrow.Value not in catMCLists[0][evidx]:
                        catMCLists[0][evidx].append(evrow.Value)
                    #evrow = evrows.next()
            evidx += 1
        #gp.AddMessage('catMCLists: %s'%(catMCLists))
        lstWA = CalcVals4Msng(lstsVals, lstAreas[0], lstMD, catMCLists)
        #gp.AddMessage('lstWA: %s'%(str(lstWA)))
        ot = [['%s, %s'%(Input_Rasters[i], Wts_Tables[i])] for i in range(len(Input_Rasters))]
        #gp.AddMessage("ot=%s"%ot)
        strF2 = "case.dat"
        fnCase = os.path.join(arcpy.env.scratchFolder, strF2)
        fCase = open(fnCase, 'w')
        if not fCase :
            gp.AddError("Can't create 'case.dat'.")
            raise Exception
        nmbUC = len(lstsVals[0])
        getNmbET = True # True when first line of case.dat
        nmbET = 0 # Number of ET values in a line of case.dat
        #gp.AddMessage("Writing Logistic Regression input files...")
        ''' Reformat the labels for free evidence '''
        for j in range(len(lstsVals)):
            mcIdx = mcIndeces[j]
            if mcIdx > -1:
                listVals = catMCLists[0][j]
                #gp.AddMessage('listVals: %s'%(listVals))
                lstLV = listVals[:]
                lstLV = RemoveDuplicates(lstLV)
                elOT = ot[j]
                tknTF = elOT[0].split(',')
                strT = tknTF[0].strip()
                strF = tknTF[1].strip()
                first = True
               #gp.AddMessage("lstLV=%s"%lstLV)
                #gp.AddMessage("elOT=%s"%elOT)
                for lv in lstLV:
                    if lv == lstMD[j]: continue
                    if first:
                        elOT = ["%s (%s)"%(elOT[0], lv)]
                        first = False
                    else:
                        elOT.append("%s, %s (%s)"%(strT, strF, lv))
                    #gp.AddError("elOT=%s"%elOT)
                ot[j] = elOT
        #gp.AddMessage('ot=%s'%(str(ot)))
    ##' Loop through the unique conditions, substituting
    ##' the weighted average of known classes for missing data
    ##' and 'expanding' multi-class free data themes to
    ##' a series of binary themes
    ##'----------------------------------------------
        #gp.AddMessage('lstWA: %s'%lstWA)
        for i in range(nmbUC):
            numPoints = lstPnts[i]
    ##        #>>> This is a kluge for problem in case.dat for sdmlr.exe
    ##        if numPoints == 0: continue
    ##        #Fractional numpoints is not accepted
    ##        #This means that UC area had no extracted points,
    ##        #and should not be a case here.
    ##        #<<< End kluge for case.dat
            wLine = ""
            wLine = wLine + ('%-10d'%(i+1))
            j = 0
            for lst in lstsVals:
                missing = lstMD[j]
                theVal = lst[i]
                mcIdx = mcIndeces[j]
                if mcIdx < 0: #ordered evidence
                    if getNmbET: nmbET = nmbET + 1
                    if theVal == missing:
                        theVal = lstWA[j][0] #avgweighted
                    wLine = wLine + '%-20s'%theVal
                else: #free evidence
                    listVals = catMCLists[0][j]
                    #gp.AddMessage('catMCLists[%d]: %s'%(j, catMCLists[0][j]))
                    OFF = 0
                    ON = 1
                    if theVal == missing:
                        m=0
                        for v in listVals:
                            if v == missing:
                                continue
                            else:
                                #gp.AddMessage('lstWA[%d][%d]=%s'%(j, m, lstWA[j]))
                                valWA = lstWA[j][m]
                                wLine = wLine + '%-20s'%valWA
                                m += 1
                                if getNmbET: nmbET += 1
                    else:
                        for v in listVals:
                            if v == missing:
                                continue
                            elif getNmbET: nmbET += 1
                            if theVal == v:
                                wLine = wLine + '%-20s'%ON
                            else:
                                wLine = wLine + '%-20s'%OFF
                j += 1
            wLine = wLine + '%-10d'%numPoints
            theArea = lstAreas[0][i] / unitCell
            wLine = wLine + '%-20s' %theArea
            fCase.write(wLine + '\n')
            getNmbET = False
        fCase.close()
    ##' Write a parameter file to the ArcView extension directory
    ##'----------------------------------------------
        strF1 = "param.dat"
        fnParam = os.path.join(arcpy.env.scratchFolder, strF1) #param.dat file
        fParam = open(fnParam, 'w')
        if not fParam:
            gp.AddError("Error writing logistic regression parameter file.")
            raise Exception
        fParam.write('%s\\\n' %(arcpy.env.scratchFolder))
        fParam.write('%s\n' %strF2)
        fParam.write("%d %g\n" %(nmbET, unitCell))
        fParam.close()

    ### RunLR ------------------------------------------------------------------------------------------------------------
    #Check input files
        #Check input files exist
        #Paramfile = os.path.join(gp.scratchworkspace, 'param.dat')
        Paramfile = os.path.join(arcpy.env.scratchFolder, 'param.dat')
        if gp.exists(Paramfile):
            pass
            #gp.AddMessage("\nUsing the following input file in Logistic Regression: %s"%(Paramfile))
        else:
            gp.AddError("Logistic regression parameter file does not exist: %s"%Paramfile)
            raise Exception
        #Place input files folder in batch file
        #sdmlr.exe starts in input files folder.
        sdmlr = os.path.join(sys.path[0], 'bin', 'sdmlr.exe')
        if not os.path.exists(sdmlr):
            gp.AddError("Logistic regression executable file does not exist: %s"%sdmlr)
            raise Exception
        os.chdir(arcpy.env.scratchFolder)
        if os.path.exists('logpol.tba'): os.remove('logpol.tba')
        if os.path.exists('logpol.out'): os.remove('logpol.out')
        if os.path.exists('cumfre.tba'): os.remove('cumfre.tba')
        if os.path.exists('logco.dat'): os.remove('logco.dat')
        fnBat = os.path.join(arcpy.env.scratchFolder, 'sdmlr.bat')
        #fnBat = os.path.join( sys.path[0], 'sdmlr.bat')
        fBat = open(fnBat, 'w')
        #fBat.write("%s\n"%os.path.splitdrive(gp.ScratchWorkspace)[0])
        fBat.write("%s\n"%os.path.splitdrive(arcpy.env.scratchFolder)[0])
        fBat.write("CD %s\n"%os.path.splitdrive(arcpy.env.scratchFolder)[1])
        fBat.write('"%s"\n'%sdmlr)
        fBat.close()
        params = []
        try:
            #os.spawnv(os.P_WAIT, fnBat, params) # <==RDB  07/01/2010  replace with subprocess
            import subprocess
            p = subprocess.Popen([fnBat,params]).wait()
            gp.AddMessage('Running %s: '%fnBat)
        except OSError:
            gp.AddMessage('Exectuion failed %s: '%fnBat)

        if not os.path.exists('logpol.tba'):
            gp.AddError("Logistic regression output file %s\\logpol.tba does not exist.\n Error in case.dat or param.dat. "%arcpy.env.scratchFolder)
            raise Exception
        #gp.AddMessage("Finished running Logistic Regression")

    ###ReadLRResults -------------------------------------------------------------------------------------------------------

        thmuc = thmUC
        vTabUC = 'thmuc_lr'
        gp.MakeRasterLayer_management(thmuc, vTabUC)
        strFN = "logpol.tba"
        #strFnLR = os.path.join(gp.ScratchWorkspace, strFN)
        strFnLR = os.path.join(arcpy.env.scratchFolder, strFN)

        if not gp.Exists(strFnLR):
            gp.AddError("Reading Logistic Regression Results\nCould not find file: %s"%strFnLR)
            raise 'Existence error'
        #gp.AddMessage("Opening Logistic Regression Results: %s"%strFnLR)
        fLR = open(strFnLR, "r")
        if not fLR:
            gp.AddError("Input Error - Unable to open the file: %s for reading." %strFnLR)
            raise 'Open error'
        read = 0
        #fnNew = gp.GetParameterAsText(6)
        fnNew = parameters[6].valueAsText
        tblbn = os.path.basename(fnNew)
        [tbldir, tblfn] = os.path.split(fnNew)
        if tbldir.endswith(".gdb"):
            tblfn = tblfn[:-4] if tblfn.endswith(".dbf") else tblfn
            fnNew = fnNew[:-4] if fnNew.endswith(".dbf") else fnNew
            tblbn = tblbn[:-4] if tblbn.endswith(".dbf") else tblbn
        gp.AddMessage("fnNew: %s"%fnNew)
        gp.AddMessage('Making table to hold logistic regression results: %s'%fnNew)
        fnNew = tblbn
        print ("Table dir: ", tbldir);
        gp.CreateTable_management(tbldir, tblfn)
        print('Making table to hold logistic regression results: %s'%fnNew)
        fnNew = tbldir + "/" + fnNew;

        #To point to REAL table

        gp.AddField_management(fnNew, 'ID', 'LONG', 6)
        gp.AddField_management(fnNew, 'LRPostProb', 'Double', "#", "#", "#", "LR_Posterior_Probability")
        gp.AddField_management(fnNew, 'LR_Std_Dev', 'Double', "#", "#", "#", "LR_Standard_Deviation")
        gp.AddField_management(fnNew, 'LRTValue', 'Double', "#", "#", "#", "LR_TValue")
        gp.DeleteField_management(fnNew, "Field1")
        vTabLR = fnNew
        strLine = fLR.readline()
        #vTabUCrows = workarounds_93.rowgen(gp.SearchCursor(vTabUC))
        #vTabUCrow = vTabUCrows.Next()
        #ttl = 0
        #while vTabUCrow:
        #for vTabUCrow in vTabUCrows: ttl += 1
            #vTabUCrow = vTabUCrows.Next()
        #gp.AddMessage("Reading Logistic Regression Results: %s"%strFnLR)
        vTabLRrows = gp.InsertCursor(vTabLR)
        while strLine:
            print (strLine);
            if strLine.strip() == 'DATA':
                read = 1
            elif read:
                vTabLRrow = vTabLRrows.NewRow()
                lstLine = strLine.split()
                if len(lstLine) > 5:
                    #gp.AddMessage('lstLine: %s'%lstLine)
                    vTabLRrow.SetValue("ID", int(lstLine[1].strip()))
                    vTabLRrow.SetValue("LRPostProb", float(lstLine[3].strip()))
                    vTabLRrow.SetValue("LR_Std_Dev", float(lstLine[5].strip()))
                    vTabLRrow.SetValue("LRTValue", float(lstLine[4].strip()))
                    vTabLRrows.InsertRow(vTabLRrow)
            strLine = fLR.readline()
        fLR.close()
        del vTabLRrow, vTabLRrows
        #gp.AddMessage('Created table to hold logistic regression results: %s'%fnNew)

    ##' Get the coefficients file
    ##'----------------------------------------------
        strFN2 = "logco.dat"
        fnLR2 = os.path.join(arcpy.env.scratchFolder, strFN2)
    ##  ' Open file for reading
    ##  '----------------------------------------------
        #gp.AddMessage("Opening Logistic Regression coefficients Results: %s"%fnLR2)
        fLR2 = open(fnLR2, "r")
        read = 0
    ##  ' Expand object tag list of theme, field, value combos
    ##  '----------------------------------------------
        #gp.AddMessage('Expanding object tag list of theme, field, value combos')
        lstLabels = []
        for el in ot:
            for e in el:
                lstLabels.append(e.replace(' ', ''))
        #gp.AddMessage('lstLabels: %s'%lstLabels)
    ##  ' Make vtab to hold theme coefficients
    ##  '----------------------------------------------
        #fnNew2 = gp.GetParameterAsText(7)
        fnNew2 = parameters[7].valueAsText
        tblbn = os.path.basename(fnNew2)
        [tbldir, tblfn] = os.path.split(fnNew2)
        if tbldir.endswith(".gdb"):
            tblfn = tblfn[:-4] if tblfn.endswith(".dbf") else tblfn
            fnNew2 = fnNew2[:-4] if fnNew2.endswith(".dbf") else fnNew2
            tblbn = tblbn[:-4] if tblbn.endswith(".dbf") else tblbn
        fnNew2 = tblbn
        print ("Tabledir: ", tbldir);
        #gp.AddMessage('Making table to hold theme coefficients: %s'%fnNew2)
        print('Making table to hold theme coefficients: %s'%fnNew2)
        #fnNew2 = tbldir + "/" + fnNew2;
        fnNew2 = os.path.join(tbldir, fnNew2)
        gp.AddMessage('Making table to hold theme coefficients: %s'%fnNew2)
        gp.CreateTable_management(tbldir, tblfn)
        gp.AddField_management(fnNew2, "Theme_ID", 'Long', 6, "#", "#", "Theme_ID")
        gp.AddField_management(fnNew2, "Theme", 'text', "#", "#", 256, "Evidential_Theme")
        gp.AddField_management(fnNew2, "Coeff", 'double', "#", "#", "#", 'Coefficient')
        gp.AddField_management(fnNew2, "LR_Std_Dev", 'double', "#", "#", "#", "LR_Standard_Deviation")
        gp.DeleteField(fnNew2, "Field1")
        vTabLR2 = fnNew2
        strLine = fLR2.readline()
        i = 0
        first = 1
        #gp.AddMessage("Reading Logistic Regression Coefficients Results: %s"%fnLR2)
        vTabLR2rows = gp.InsertCursor(vTabLR2)
        print ("Starting to read LR_Coeff")
        while strLine:
            print ("Rdr:" , strLine);
            if len(strLine.split()) > 1:
                if strLine.split()[0].strip() == 'pattern':
                    read = 1
                    strLine = fLR2.readline()
                    continue
            if read:

                lstLine = strLine.split()
                if len(lstLine) > 2:
                    vTabLR2row = vTabLR2rows.NewRow()
                    #vTabLR2row.SetValue('Theme_ID', long(lstLine[0].strip())+1)
                    print ("Theme: ", lstLine[0].strip());
                    vTabLR2row.SetValue('Theme_ID', int(lstLine[0].strip())+1)
                    if not first:
                        try:
                            #For all but first...
                            lbl = lstLabels.pop(0);
                            print ("Lbl:", lbl);
                            vTabLR2row.SetValue('Theme', lbl)
                        except IndexError:
                            gp.AddError('Evidence info %s not consistent with %s file'%(otfile, fnLR2))
                        i = i+1
                    else:
                        vTabLR2row.SetValue('Theme', "Constant Value")
                        first = 0
                    print ("Coeff:", lstLine[1].strip());
                    vTabLR2row.SetValue("Coeff", float(lstLine[1].strip()))
                    print ("LR_std_dev:", lstLine[2].strip());
                    vTabLR2row.SetValue("LR_Std_Dev", float(lstLine[2].strip()))
                    vTabLR2rows.InsertRow(vTabLR2row)
                else:
                    break
            strLine = fLR2.readline()
        fLR2.close()
        if len(lstLabels) != 0:
            gp.AddError('Evidence info %s not consistent with %s file'%(otfile, fnLR2))
        del vTabLR2row, vTabLR2rows
        #gp.AddMessage('Created table to hold theme coefficients: %s'%fnNew2)

        #Creating LR Response Rasters
        #Join LR polynomial table to unique conditions raster and copy
        #to get a raster with attributes
        cmb = thmUC
        cmbrl = 'cmbrl'
        cmbrl_ = 'cmbrl_lyr'
        
        #gp.makerasterlayer_management(cmb, cmbrl)
        #tbl = gp.GetParameterAsText(6)
        tbl = parameters[6].valueAsText
        tbltv = 'tbltv'
        gp.maketableview_management(tbl, tbltv)
        #gp.addjoin_management(cmbrl, 'Value', tbltv, 'ID')
        cmb_cpy = gp.createscratchname("cmb_cpy", '', 'raster', arcpy.env.scratchFolder)
        gp.copyraster_management(cmb, cmb_cpy)
        gp.JoinField_management(cmb_cpy, 'Value', tbltv, 'ID')
                                                              
        #Make output float rasters from attributes of joined unique conditions raster
        #outRaster1 = gp.GetParameterAsText(8)
        #outRaster2 = gp.GetParameterAsText(9)
        #outRaster3 =  gp.GetParameterAsText(10)
        outRaster1 =  parameters[8].valueAsText
        outRaster2 =  parameters[9].valueAsText
        outRaster3 =  parameters[10].valueAsText
        gp.addmessage("="*41+'\n'+"="*41)
        ##template = {'cmbrl':cmb_cpy}
        ##InExp = "CON(%(cmbrl)s.LRPOSTPROB >= 0, %(cmbrl)s.LRPOSTPROB, 0)"%template
        ##gp.SingleOutputMapAlgebra_sa(InExp, outRaster1)
        ##InExp = "CON(%(cmbrl)s.LR_STD_DEV >= 0, %(cmbrl)s.LR_STD_DEV, 0)"%template
        ##gp.SingleOutputMapAlgebra_sa(InExp, outRaster2)
        ##InExp = "CON(%(cmbrl)s.LRTVALUE >= 0, %(cmbrl)s.LRTVALUE, 0)"%template
        ##gp.SingleOutputMapAlgebra_sa(InExp, outRaster3) # <==RDB  07/01/2010
        # <==RDB  07/01/2010 -  SOMA expression is crashing in version 10. Changed to use Con tool.
        
        #gp.Con_sa(cmb_cpy,cmb_cpy+".LRPOSTPROB",outRaster1,"0","LRPOSTPROB > 0")
        #gp.Con_sa(cmb_cpy,cmb_cpy+".LR_STD_DEV",outRaster2,"0","LR_STD_DEV > 0")
        #gp.Con_sa(cmb_cpy,cmb_cpy+".LRTVALUE",outRaster3,"0","LRTVALUE > 0")
        outcon1 = Con(cmb_cpy, Lookup(cmb_cpy,"LRPOSTPROB"),"0","LRPOSTPROB > 0")
        outcon1.save(outRaster1)
        outcon2 = Con(cmb_cpy,Lookup(cmb_cpy,"LR_STD_DEV"),"0","LR_STD_DEV > 0")
        outcon2.save(outRaster2)
        outcon3 = Con(cmb_cpy,Lookup(cmb_cpy,"LRTVALUE"),"0","LRTVALUE > 0")
        outcon3.save(outRaster3)
        

        #Add t0 display
        #gp.SetParameterAsText(6, tbl)
        arcpy.SetParameterAsText(6,tbl)
        #gp.SetParameterAsText(7, gp.describe(vTabLR2).catalogpath)
        arcpy.SetParameterAsText(7, gp.describe(vTabLR2).catalogpath)
        #gp.SetParameterAsText(8, outRaster1)
        arcpy.SetParameterAsText(8, outRaster1)
        #gp.SetParameterAsText(9, outRaster2)
        arcpy.SetParameterAsText(9, outRaster2)
        #gp.SetParameterAsText(10, outRaster3)
        arcpy.SetParameterAsText(10, outRaster3)
    except arcpy.ExecuteError as e:
        arcpy.AddError("\n");
        arcpy.AddMessage("Caught ExecuteError in logistic regression. Details:");
        args = e.args[0];
        args.split('\n')
        arcpy.AddError(args);
        # get the traceback object
        tb = sys.exc_info()[2]
         # tbinfo contains the line number that the code failed on and the code from that line
        tbinfo = traceback.format_tb(tb)[0]
        # concatenate information together concerning the error into a message string
        msgs = "Traceback\n" + tbinfo + "\nError Info:\n" + str(sys.exc_info()[1])
        arcpy.AddError(msgs)
        raise 
    except:
        # get the traceback object
        tb = sys.exc_info()[2]
         # tbinfo contains the line number that the code failed on and the code from that line
        tbinfo = traceback.format_tb(tb)[0]
        # concatenate information together concerning the error into a message string
        msgs = "Traceback\n" + tbinfo + "\nError Info:\n" + str(sys.exc_info()[1])
        pymsg = "PYTHON ERRORS:\nTraceback Info:\n" + tbinfo + "\nError Info:\n    " +str(sys.exc_type)+ ": " + str(sys.exc_value) + "\n"
        # generate a message string for any geoprocessing tool errors
        msgs = "GP ERRORS:\n" + gp.GetMessages(2) + "\n"
        gp.AddError(msgs)
        # return gp messages for use with a script tool
        gp.AddError(pymsg)
        # print messages for use in Python/PythonWin
        print (pymsg)
        raise
Exemplo n.º 38
0
def fn_beechie(raster, drain_shape, uttl_basins, fac, workspace, CTr, qTr):
    """
    Esta funcion estima el grado de confinamiento
    correspondiente a un tramo de corriente a
    partir de los resultados del modulo MRVBF
    de Saga y el ancho trenzado de acuerdo con
    el modelo de regresion en funcion del area.
    :param raster: Es el raster reclasificado de MRVBF
    :param drain_shape: Es el vectorial de los tramos de corriente de las UTTL
    :param uttl_basins: Es el vectorial de las UTTL
    :return: Agrega al shape de UTTL un atributo llamado w_valley, area_MRVBF, WB, DoC
    """
    gp = arcgisscripting.create()
    gp.CheckOutExtension("Spatial")
    arcpy.env.workspace = '{}'.format(
        os.path.dirname(os.path.abspath(workspace)))
    arcpy.env.overwriteOutput = True
    arcpy.env.qualifiedFieldNames = False
    temp_folder = '{}/temp'.format(os.path.dirname(workspace))

    gp.AddMessage('Wvalley width ... ')

    arcpy.gp.RasterCalculator_sa(
        'Con(IsNull("{}"),0,"{}")'.format(raster, raster),
        '{}/temp_mrvbf.tif'.format(temp_folder))
    arcpy.gp.ZonalStatisticsAsTable_sa(uttl_basins, "Name",
                                       '{}/temp_mrvbf.tif'.format(temp_folder),
                                       '{}/mrvbf_stats'.format(temp_folder),
                                       "NODATA", "SUM")

    arcpy.TableToTable_conversion('{}/mrvbf_stats'.format(temp_folder),
                                  temp_folder, 'mrvbf_stats.csv')
    arcpy.TableToTable_conversion(drain_shape, temp_folder,
                                  'drain_lengths.csv')

    x_size = float(
        arcpy.GetRasterProperties_management(raster, "CELLSIZEX").getOutput(0))
    y_size = float(
        arcpy.GetRasterProperties_management(raster, "CELLSIZEY").getOutput(0))

    df_stats = pd.read_csv('{}/mrvbf_stats.csv'.format(temp_folder),
                           index_col='NAME')
    df_drain = pd.read_csv('{}/drain_lengths.csv'.format(temp_folder),
                           index_col='Name')

    df_stats['Area_MRVBF'] = df_stats['SUM'] * x_size * y_size
    df_stats['w_valley'] = df_stats['Area_MRVBF'] / df_drain['Shape_Length']
    df_stats.index.name = 'Code'

    gp.AddMessage('Width braided valley  ... ')

    arcpy.TableToTable_conversion(uttl_basins, temp_folder, 'UTTL_table.csv')
    df_uttl = pd.read_csv('{}/UTTL_table.csv'.format(temp_folder),
                          index_col='Name')
    df_stats['WB'] = 17.748 * (df_uttl['Areas_Km2']**0.3508)
    df_stats['DoC'] = df_stats['w_valley'] / df_stats['WB']

    gp.AddMessage('Regionalization Q max Discharge UPME Methodology ... ')

    spatial_ref = arcpy.Describe(uttl_basins).spatialReference
    CTr_path = CTr
    qTr_path = qTr

    arcpy.ProjectRaster_management(CTr_path,
                                   '{}/CTr_reproject.tif'.format(temp_folder),
                                   spatial_ref, "NEAREST",
                                   '{} {}'.format(x_size, y_size), "#", "#",
                                   arcpy.Describe(CTr_path).spatialReference)
    arcpy.ProjectRaster_management(qTr_path,
                                   '{}/qTr_reproject.tif'.format(temp_folder),
                                   spatial_ref, "NEAREST",
                                   '{} {}'.format(x_size, y_size), "#", "#",
                                   arcpy.Describe(qTr_path).spatialReference)
    arcpy.ProjectRaster_management(fac,
                                   '{}/fac_reproject.tif'.format(temp_folder),
                                   spatial_ref, "NEAREST",
                                   '{} {}'.format(x_size, y_size), "#", "#",
                                   arcpy.Describe(fac).spatialReference)

    arcpy.MakeRasterLayer_management(
        '{}/CTr_reproject.tif'.format(temp_folder), 'CTr')
    arcpy.MakeRasterLayer_management(
        '{}/qTr_reproject.tif'.format(temp_folder), 'qTr')
    arcpy.MakeRasterLayer_management(
        '{}/fac_reproject.tif'.format(temp_folder), 'fac')

    arcpy.gp.RasterCalculator_sa(
        '"{}" * Power(("{}" * {} * {}) / 1000000,"{}")'.format(
            'CTr', 'fac', x_size, y_size, 'qTr'), "{}/Qmax".format(workspace))

    arcpy.gp.ZonalStatisticsAsTable_sa(uttl_basins, "Name",
                                       '{}/Qmax'.format(workspace),
                                       '{}/Qmax_stats'.format(temp_folder),
                                       "DATA", "MAXIMUM")
    arcpy.TableToTable_conversion('{}/Qmax_stats'.format(temp_folder),
                                  temp_folder, 'Qmax_stats.csv')
    df_stats['Qmax'] = pd.read_csv('{}/Qmax_stats.csv'.format(temp_folder),
                                   index_col='NAME')['MAX']

    # Qmax Classification
    df_stats['Qmax_Class'] = 'Muy Alto'

    # low = np.percentile(df_stats['Qmax'], 25)
    # low_medium = np.percentile(df_stats['Qmax'], 50)
    # high_medium = np.percentile(df_stats['Qmax'], 75)

    low = 150
    medium_low = 300
    medium = 600
    medium_high = 1200
    high = 2400

    df_stats.ix[df_stats[df_stats['Qmax'] < low].index, 'Qmax_Class'] = 'Bajo'
    df_stats.ix[df_stats[(df_stats['Qmax'] >= low)
                         & (df_stats['Qmax'] < medium_low)].index,
                'Qmax_Class'] = 'Medio Bajo'
    df_stats.ix[df_stats[(df_stats['Qmax'] >= medium_low)
                         & (df_stats['Qmax'] < medium)].index,
                'Qmax_Class'] = 'Medio'
    df_stats.ix[df_stats[(df_stats['Qmax'] >= medium)
                         & (df_stats['Qmax'] < medium_high)].index,
                'Qmax_Class'] = 'Medio Alto'
    df_stats.ix[df_stats[(df_stats['Qmax'] >= medium_high)
                         & (df_stats['Qmax'] < high)].index,
                'Qmax_Class'] = 'Alto'

    df_stats['Slope'] = df_uttl['Slope']

    gp.AddMessage(
        'Classification of streams alignment based on slope-flow thresholds ... '
    )

    df_stats['Beechie'] = None
    df_stats['BeechieNew'] = 'Inconfinados'
    df_stats['Smax'] = 0.1 * (df_stats['Qmax']**-0.42)
    df_stats['Smin'] = 0.05 * (df_stats['Qmax']**-0.61)

    df_stats.ix[df_stats[df_stats['DoC'] < 4.].index, 'Beechie'] = 'Confinados'
    df_stats.ix[df_stats[(df_stats['Slope'] > df_stats['Smax'])
                         & (df_stats['DoC'] > 4.)].index,
                'Beechie'] = 'Trenzados'
    df_stats.ix[df_stats[(df_stats['Slope'] < df_stats['Smax'])
                         & (df_stats['Qmax'] < 15.) &
                         (df_stats['DoC'] > 4.)].index, 'Beechie'] = 'Rectos'
    df_stats.ix[df_stats[(df_stats['Slope'] < df_stats['Smin'])
                         & (df_stats['Qmax'] > 15.) &
                         (df_stats['DoC'] > 4.)].index,
                'Beechie'] = 'Meandricos'
    df_stats.ix[df_stats[(df_stats['Slope'] > df_stats['Smin'])
                         & (df_stats['Slope'] < df_stats['Smax']) &
                         (df_stats['Qmax'] > 15.) &
                         (df_stats['DoC'] > 4.)].index,
                'Beechie'] = 'Trenzados-Islas'

    # Beechie Reclass
    df_stats.ix[df_stats[df_stats['Beechie'] == 'Confinados'].index,
                'BeechieNew'] = 'Confinados'

    df_stats.index = [str(i) for i in df_stats.index]
    df_stats.index.name = 'Code'
    df_stats[[
        'w_valley', 'WB', 'DoC', 'Qmax', 'Qmax_Class', 'Smax', 'Smin',
        'Beechie', 'BeechieNew'
    ]].to_csv('{}/Beechie_Table.csv'.format(temp_folder), index_label='Code')

    arcpy.TableToTable_conversion('{}/Beechie_Table.csv'.format(temp_folder),
                                  workspace, 'Beechie')

    expression = 'str(!Code!)'
    code_block = ''
    arcpy.AddField_management(os.path.join(workspace,
                                           'Beechie'), 'STRCODE', 'TEXT', '',
                              '', '10', '', 'NULLABLE', 'NON_REQUIRED', '')
    arcpy.CalculateField_management(os.path.join(workspace,
                                                 'Beechie'), 'STRCODE',
                                    expression, 'PYTHON', code_block)

    # Join Table to UTTL Segmentation Polygons
    arcpy.MakeFeatureLayer_management(uttl_basins, 'UTTL')
    arcpy.AddJoin_management('UTTL', 'Name',
                             os.path.join(workspace, 'Beechie'), 'STRCODE')
    arcpy.CopyFeatures_management(
        'UTTL', os.path.join(temp_folder, r'UTTL_Beechie.shp'))
    arcpy.DeleteField_management(
        os.path.join(temp_folder, r'UTTL_Beechie.shp'),
        ['Shape_Leng', 'Shape_Area', 'OBJECTID_1', 'Code', 'STRCODE'])

    arcpy.DeleteFeatures_management(os.path.join(workspace, r'UTTL_Basins'))
    arcpy.CopyFeatures_management(
        os.path.join(temp_folder, r'UTTL_Beechie.shp'),
        os.path.join(workspace, r'UTTL_Basins'))
Exemplo n.º 39
0
def execute(self, parameters, messages):

    # Create the Geoprocessor object
    gp = arcgisscripting.create()

    # Check out any necessary licenses
    gp.CheckOutExtension("spatial")

    gp.OverwriteOutput = 1
    gp.LogHistory = 1

    # Logistic Regression don't work on ArcGIS Pro 2.5 when workspace is File System but works on V2.6! #AL 140820
    desc = arcpy.Describe(gp.workspace)
    install_version = str(arcpy.GetInstallInfo()['Version'])
    if str(
            arcpy.GetInstallInfo()['ProductName']
    ) == "ArcGISPro" and install_version <= "2.5" and desc.workspaceType == "FileSystem":
        arcpy.AddError("ERROR: Logistic Regression don't work on ArcGIS Pro " +
                       install_version + " when workspace is File System!")
        raise

    # Load required toolboxes...
    try:
        parentfolder = os.path.dirname(sys.path[0])
        #
        #tbxpath = os.path.join(parentfolder,"arcsdm.pyt")
        tbxpath = os.path.join(parentfolder, "toolbox\\arcsdm.pyt")
        dwrite(tbxpath)
        gp.AddToolbox(tbxpath)
        #gp.addmessage('getting arguments...')
        Grand_WOFE_Name = '_' + parameters[0].valueAsText
        #gp.GetParameterAsText(0)
        # Grand Wofe Name cannot be longer than 7 characters #AL 090620
        if (len(Grand_WOFE_Name) > 7):
            arcpy.AddError(
                "ERROR: Grand Wofe Name cannot be longer than 7 characters.")
            raise
        Evidence_Rasters = parameters[1].valueAsText.split(';')
        #gp.GetParameterAsText(1).split(';')
        Evidence_Data_Types = parameters[2].valueAsText.lower().split(';')
        #gp.GetParameterAsText(2).lower().split(';')
        Input_Training_Sites_Feature_Class = parameters[3].valueAsText
        #gp.GetParameterAsText(3)
        trainingDescr = arcpy.Describe(
            Input_Training_Sites_Feature_Class)  #AL 180520
        trainingCoord = trainingDescr.spatialReference.name  #AL 180520
        Ignore_Missing_Data = parameters[4].value
        #gp.GetParameter(4)
        Confidence_Level_of_Studentized_Contrast = parameters[5].value
        #gp.GetParameter(5)
        Unit_Area__sq_km_ = parameters[6].value  #gp.GetParameter(6)
        Missing_Data_Value = -99
        #gp.addmessage('got arguments')
        #import SDMValues
        arcsdm.sdmvalues.appendSDMValues(gp, Unit_Area__sq_km_,
                                         Input_Training_Sites_Feature_Class)

        # Local variables...
        List_Wts_Tables = []
        suffixes = {
            'Ascending': '_CA',
            'Descending': '_CD',
            'Categorical': '_CT'
        }
        Missing_Data_Value = -99
        Evidence_Raster_Code_Field = ''
        OutSet = []  #List of output datasets
        dwrite('set local variables')

        #Processing...
        # Test for proper table data types:
        if len(Evidence_Data_Types) != len(Evidence_Rasters):
            gp.adderror(
                'Number of evidence layers and weights data types do not match'
            )
            raise
        for evtype in Evidence_Data_Types:
            if not evtype[0] in 'ofcad':
                gp.adderror('Evidence data type %s not of %s' %
                            (Evidence_Data_Type, [
                                'free', 'categorical', 'ordered', 'ascending',
                                'descending'
                            ]))
                raise TypeError
        # Process: Calculate Weights of Evidence...
        dwrite(str(Evidence_Data_Types))
        dwrite(str(Evidence_Rasters))
        arcpy.AddMessage("========== Starting GrandWofe ====================")

        for Evidence_Raster_Layer, Evidence_Data_Type in zip(
                Evidence_Rasters, Evidence_Data_Types):
            # Check Evidence Raster datatype and Coordinate System #AL 180520
            evidenceDescr = arcpy.Describe(Evidence_Raster_Layer)
            evidenceCoord = evidenceDescr.spatialReference.name
            arcpy.AddMessage("Data type of Evidence Layer " +
                             Evidence_Raster_Layer + " is " +
                             evidenceDescr.datatype +
                             " and Coordinate System " + evidenceCoord)
            if (evidenceCoord != trainingCoord):
                arcpy.AddError(
                    "ERROR: Coordinate System of Evidence Layer is " +
                    evidenceCoord + " and Training points it is " +
                    trainingCoord + ". These must be same.")
                raise

            splitted_evidence = os.path.split(
                Evidence_Raster_Layer)  #AL 090620
            eviname = os.path.splitext(splitted_evidence[1])  #AL 090620
            #prefix = Evidence_Raster_Layer + Grand_WOFE_Name
            prefix = gp.workspace + "\\" + eviname[
                0] + Grand_WOFE_Name  #AL 090620
            arcpy.AddMessage("Calculating weights for %s (%s)..." %
                             (Evidence_Raster_Layer, Evidence_Data_Type))
            if Evidence_Data_Type.startswith('o'):
                Wts_Table_Types = ['Ascending', 'Descending']
            elif Evidence_Data_Type.startswith('a'):
                Wts_Table_Types = ['Ascending']
            elif Evidence_Data_Type.startswith('d'):
                Wts_Table_Types = ['Descending']
            else:
                Wts_Table_Types = ['Categorical']

            for Wts_Table_Type in Wts_Table_Types:
                suffix = suffixes[Wts_Table_Type]
                filename = prefix + suffix
                # + '.dbf' NO DBF anymore
                desc = arcpy.Describe(gp.workspace)

                if desc.workspaceType == "FileSystem":
                    if not (filename.endswith('.dbf')):
                        filename = filename + ".dbf"
                    dwrite("Filename is a file - adding dbf")

                unique_name = gp.createuniquename(filename, gp.workspace)
                Output_Weights_Table = unique_name
                #dwrite("Validate: " + gp.ValidateTablename(prefix + suffix) )

                arcpy.ImportToolbox(tbxpath)

                # Temporarily print directory
                #gp.addmessage(dir(arcpy));
                gp.addmessage("Calling calculate weights...")
                dwrite("Evidence raster layer name: " + Evidence_Raster_Layer)
                dwrite(' Output table name: %s Exists already: %s' %
                       (Output_Weights_Table, gp.exists(Output_Weights_Table)))

                result = arcpy.CalculateWeightsTool_ArcSDM ( Evidence_Raster_Layer, Evidence_Raster_Code_Field, \
                                               Input_Training_Sites_Feature_Class, Wts_Table_Type, Output_Weights_Table, \
                                               Confidence_Level_of_Studentized_Contrast, \
                                               Unit_Area__sq_km_, Missing_Data_Value)
                arcpy.AddMessage("     ...done")
                gp.AddMessage('Result: %s\n' % result)
                #gp.addmessage("Done...")
                #gp.addmessage(result);

                #Output, Success = result.split(';')
                Success = "True"  # horrible fix...
                outputfilename = result.getOutput(0)
                tmp = result.getOutput(1)
                dwrite("Result: " + str(tmp))
                warning = result.getMessages(1)

                dwrite(warning)
                if (len(warning) > 0):
                    arcpy.AddWarning(warning)
                    #Success = "False"; #AL 180520 removed
                    #Should stop here?

                #TODO: filegeodatabase support! No .dbf there.
                #dbf file-extension fix
                # Testing workspace

                Output = outputfilename

                if not (outputfilename.endswith('.dbf')):
                    Output = outputfilename  #+ ".dbf";
                    #Geodatabase....
                if desc.workspaceType == "FileSystem":
                    if not (outputfilename.endswith('.dbf')):
                        Output = outputfilename + ".dbf"
                    dwrite("Workspace is filesystem - adding dbf")

                if Success.strip().lower() == 'true':
                    List_Wts_Tables.append((Evidence_Raster_Layer, Output))
                    #gp.addmessage('Valid Wts Table: %s'%Output_Weights_Table)
                    OutSet.append(str(
                        Output))  # Save name of output table for display kluge
                else:
                    #gp.addmessage('Invalid Wts Table: %s'%Output.strip())
                    gp.AddWarning('Invalid Wts Table: %s' %
                                  Output.strip())  #AL 040520
                #arcpy.AddMessage("\n")

        #Get list of valid tables for each input raster
        raster_tables = {}
        #arcpy.AddMessage("     ...done");

        for Evidence_Raster_Layer, Output_Weights_Table in List_Wts_Tables:
            #gp.addmessage(str((evidence_layer, wts_table)))
            if Evidence_Raster_Layer in raster_tables:
                raster_tables[Evidence_Raster_Layer].append(
                    Output_Weights_Table)
            else:
                raster_tables[Evidence_Raster_Layer] = [Output_Weights_Table]

        if len(raster_tables) > 0:
            #Function to do nested "for" statements by recursion
            def nested_fors(ranges, tables, N, tables_out=[], tables_all=[]):
                for n in ranges[0]:
                    tables_out.append(tables[0][n])
                    if len(ranges) > 1:
                        nested_fors(ranges[1:], tables[1:], N, tables_out,
                                    tables_all)
                    if len(tables_out) == N:
                        tables_all.append(tables_out[:])
                    del tables_out[-1]
                return tables_all

            #Get per-test lists of tables; each table in a list is in input raster order
            #tables = [raster_tables[Evidence_Raster_Layer] for Evidence_Raster_Layer in Evidence_Rasters]
            tables = []
            valid_rasters = []
            valid_raster_datatypes = []
            for Evidence_Raster_Layer, Evidence_Data_Type in zip(
                    Evidence_Rasters, Evidence_Data_Types):
                if Evidence_Raster_Layer in raster_tables:
                    valid_rasters.append(Evidence_Raster_Layer)
                    valid_raster_datatypes.append(Evidence_Data_Type)
                    tables.append(raster_tables[Evidence_Raster_Layer])

            #Get ranges for number of tables for each evidence layer (in input evidence order)
            # Py 3.4 fixes here:
            ranges = list(map(range, list(map(len, tables))))
            dwrite("Tables: " + str(tables))
            dwrite("Ranges: " + str(ranges))
            #ranges = map(range,map(len, tables))
            #gp.addmessage(str(ranges))
            #Get combinations of valid wts table for input evidence_rasters
            Weights_Tables_Per_Test = nested_fors(ranges, tables, len(tables))
            for Testnum, Weights_Tables in enumerate(Weights_Tables_Per_Test):
                Test = Testnum + 1
                gp.addmessage("------ Running tests... (%s) ------" % (Test))
                # Process: Calculate Response...
                dwrite("Weight tables: " + str(Weights_Tables))
                Weights_Tables = ";".join(Weights_Tables)
                prefix = Grand_WOFE_Name[1:] + str(Test)
                gp.addMessage("%s: Response & Logistic Regression: %s,%s\n" %
                              (Test, ";".join(valid_rasters), Weights_Tables))
                Output_Post_Prob_Raster = gp.createuniquename(
                    prefix + "_pprb", gp.workspace)
                Output_Prob_Std_Dev_Raster = gp.createuniquename(
                    prefix + "_pstd", gp.workspace)
                Output_MD_Variance_Raster = gp.createuniquename(
                    prefix + "_mvar", gp.workspace)
                Output_Total_Std_Dev_Raster = gp.createuniquename(
                    prefix + "_tstd", gp.workspace)
                Output_Confidence_Raster = gp.createuniquename(
                    prefix + "_conf", gp.workspace)
                gp.AddToolbox(tbxpath)
                #dwrite (str(dir(arcpy)))
                gp.addMessage(" Calculating response... ")
                out_paths = arcpy.CalculateResponse_ArcSDM(";".join(valid_rasters), Weights_Tables, Input_Training_Sites_Feature_Class, \
                                 Ignore_Missing_Data, Missing_Data_Value, Unit_Area__sq_km_, Output_Post_Prob_Raster, Output_Prob_Std_Dev_Raster, \
                                 Output_MD_Variance_Raster, Output_Total_Std_Dev_Raster, Output_Confidence_Raster)
                # Set the actual output parameters
                gp.addMessage("       ...done")

                actualoutput = []
                dwrite(str(out_paths))
                dwrite("Outputcount: " + str(out_paths.outputCount))
                dwrite("Output0: " + str(out_paths.getOutput(0)))
                paths = ""
                for i in range(0, out_paths.outputCount):
                    dwrite("Output: " + str(out_paths.getOutput(i)))
                    paths = out_paths.getOutput(i) + ";"

                #for raspath in out_paths.split(';'):
                for raspath in paths.split(';'):
                    if gp.exists(raspath.strip()):
                        actualoutput.append(raspath.strip())
                out_paths = ';'.join(actualoutput)
                #Append delimited string to list
                OutSet.append(
                    out_paths)  # Save name of output raster dataset for kluge
                dwrite(" Outset: " + str(OutSet))

                # Process: Logistic Regression...
                Output_Polynomial_Table = gp.createuniquename(
                    prefix + "_lrpoly.dbf", gp.workspace)
                Output_Coefficients_Table = gp.createuniquename(
                    prefix + "_lrcoef.dbf", gp.workspace)
                Output_Post_Probability_raster = gp.createuniquename(
                    prefix + "_lrpprb", gp.workspace)
                Output_Standard_Deviation_raster = gp.createuniquename(
                    prefix + "_lrstd", gp.workspace)
                Output_LR_Confidence_raster = gp.createuniquename(
                    prefix + "_lrconf", gp.workspace)
                #gp.AddToolbox(tbxpath)
                gp.addMessage(" Running logistic regression...")
                dwrite("valid_rasters = " + str(valid_rasters))
                dwrite("valid_raster_datatypes = " +
                       str(valid_raster_datatypes))
                dwrite(
                    "0 Input Raster Layer(s) (GPValueTable: GPRasterLayer) = ';'.join(valid_rasters)"
                )
                dwrite(
                    "1 Evidence type (GPValueTable: GPString) = ';'.join(valid_raster_datatypes)"
                )
                dwrite("w Input weights tables (GPValueTable: DETable) = " +
                       str(Weights_Tables))
                dwrite("2 Training sites (GPFeatureLayer) = " +
                       str(Input_Training_Sites_Feature_Class))
                dwrite("3 Missing data value (GPLong) = " +
                       str(Missing_Data_Value))
                dwrite("4 Unit area (km^2) (GPDouble) = " +
                       str(Unit_Area__sq_km_))
                dwrite("5 Output polynomial table (DEDbaseTable) = " +
                       str(Output_Polynomial_Table))
                dwrite("52 Output coefficients table (DEDbaseTable) = " +
                       str(Output_Coefficients_Table))
                dwrite("6 Output post probablity raster (DERasterDataset) = " +
                       str(Output_Post_Probability_raster))
                dwrite(
                    "62 Output standard deviation raster (DERasterDataset) = "
                    + str(Output_Standard_Deviation_raster))
                dwrite("63 Output confidence raster (DERasterDataset) = " +
                       str(Output_LR_Confidence_raster))

                out_paths = arcpy.LogisticRegressionTool_ArcSDM(
                    ";".join(valid_rasters), ";".join(valid_raster_datatypes),
                    Weights_Tables, Input_Training_Sites_Feature_Class,
                    Missing_Data_Value, Unit_Area__sq_km_,
                    Output_Polynomial_Table, Output_Coefficients_Table,
                    Output_Post_Probability_raster,
                    Output_Standard_Deviation_raster,
                    Output_LR_Confidence_raster)
                dwrite(str(out_paths.status))
                gp.addMessage("     ...done ")

                # Set the output parameters
                #Append delimited string to list
                for i in range(0, out_paths.outputCount):
                    dwrite("Output: " + str(out_paths.getOutput(i)))
                    OutSet.append(out_paths.getOutput(i))
                #OutSet.append(out_paths) # Save name of output raster dataset for kluge
                #Set output parameters
                #gp.addmessage("==== ====")
                dwrite(str(out_paths.status))
            """Kluge because Geoprocessor can't handle variable number of ouputs"""
            dwrite(" Outset: " + str(OutSet))
            OutSet = ';'.join(OutSet)

            gp.addwarning("Copy the following line with ControlC,")
            gp.addwarning(
                "then paste in the Name box of the Add Data command,")
            gp.addwarning("then click Add button")
            dwrite(str(OutSet))
            #stoppitalle();
            gp.addwarning(OutSet)

        else:
            #Stop processing
            gp.AddError('No Valid Weights Tables: Stopped.')

    except:
        # get the traceback object
        tb = sys.exc_info()[2]
        #e = sys.exc_info()[1]
        #dwrite(e.args[0])

        # If using this code within a script tool, AddError can be used to return messages
        #   back to a script tool.  If not, AddError will have no effect.
        #arcpy.AddError(e.args[0])
        ## Begin old.

        # tbinfo contains the line number that the code failed on and the code from that line
        tbinfo = traceback.format_tb(tb)[0]
        # concatenate information together concerning the error into a message string
        pymsg = "PYTHON ERRORS:\nTraceback Info:\n" + tbinfo + "\nError Info:\n    " + \
            str(sys.exc_info()) + "\n"    #AL 040520
        #    str(sys.exc_type)+ ": " + str(sys.exc_value) + "\n"
        # generate a message string for any geoprocessing tool errors
        if len(gp.GetMessages(2)) > 0:
            msgs = "GP ERRORS:\n" + gp.GetMessages(2) + "\n"
            gp.AddError(msgs)
            gp.AddMessage(msgs)

        # return gp messages for use with a script tool
        gp.AddError(pymsg)

        raise
Exemplo n.º 40
0
        def EightDayComposite(self):
                
                print "Please wait..........10 sec"

                # Create Geoprocessing object
                gp = arcgisscripting.create()

                # Check out any License
                gp.CheckOutExtension("spatial")

                # Overwriting the Output
                gp.OverwriteOutput =1

                #Define Workspace
                temppath=self.tempPath # Temporary path for raster computation
                temppath1=self.tempPath1
                finalpath=self.finalPath # Final Destination of raster files
                gp.workspace=self.workSpace

                listArray= list()
                overArray= list()

                # Reading all raster files
                rsList=gp.ListRasters("*")  

                # Raster Counter
                # global counter
                counter=1

                # Use tracking to calculate day of the year
                yearDay=self.ReadLog("yearday.txt")
                

                rs=rsList.Next()
                       
                print "Keep your patience while running it might takes hours..."

                try:                        
                        while rs:
                                rastInfo=self.ReadLog("etolog.txt")
                                print int(rastInfo)
                                print int(rs[-6:])

                                if(int(rastInfo)<int(rs[-6:])):                                                
                                        gp.workspace=self.workSpace
                                        print "Available raster:-: "+str(rs)
                                        # Making group of 8 files 
                                        if(counter<=8):                       
                                                listArray.append(rs)
                                                print listArray
                                                if((len(listArray)==8)and(yearDay<=353)):
                                                        #print listArray

                                                        #Creating list to store temporary paths
                                                        outraster= [(temppath+str(0)),(temppath+str(1)),(temppath+str(2)),(temppath+str(3)),(temppath+str(4)),(temppath+str(5)),(temppath+str(6)),(temppath+str(7))]
                                                        #print outraster[0],outraster[1],outraster[2]

                                                        # Make final raster name as julian day date        
                                                        finalraster=finalpath+"ETo"+str(self.year)+str(yearDay).zfill(3)
                                                        #offset=(counter-1)
                                                        offset=counter
                                                        
                                                        counter=0 # reset counter                        
                                                        print "offset "+str(offset)
                                                        
                                                        # Add 8 rasters together
                                                        print"Raster Addition Progressing ........."  
                                                        gp.Plus_sa(listArray[0],0,outraster[0])
                                                        print listArray[0]+" :Added with: "+"0"+" :Temp output: "+outraster[0]
                                                        print "Complete Step\t"+ str(0)                        
                                                        for i in range(1,offset):
                                                                                           
                                                                try:
                                                                        print"Continue Progressing ........."                                                                             
                                                                        gp.Plus_sa((listArray[i]),outraster[i-1],outraster[i])
                                                                        print listArray[i]+" :Added with: "+outraster[i-1]+" :Temp output: "+outraster[i]
                                                                        # print listArray[i]+ outraster[i-1]+ outraster[i]
                                                                        print "Complete Step\t"+ str(i)
                                                                except:
                                                                        print"Raster Addition Error"
                                                                        raise "exit"                                       
                                                           
                                                        # Calculate Average raster value
                                                        print "Calculating Average Raster "
                                                        gp.Divide_sa(outraster[i],offset,finalraster)

                                                        # Display Program Status in the IDLE Screen
                                                        print "output raster-->"+outraster[i]
                                                        print "final raster -->"+finalraster

                                                        # Reprojction ETo raster into AEA
                                                        ObjEtoReprojection=EtoProjection.EtoReprojection(finalraster,yearDay,self.year,self.datum,self.projection,self.sampleSize,self.finalPath,self.reprojectDir)
                                                        ObjEtoReprojection.Reproject()
                                                        
                                                        print "Deleting intermediate Rasters........"                                        

                                                        # Delete Intermediate rasters
                                                        gp.Delete_management(outraster[0], "Raster Dataset")
                                                        gp.Delete_management(outraster[1], "Raster Dataset")
                                                        gp.Delete_management(outraster[2], "Raster Dataset")
                                                        gp.Delete_management(outraster[3], "Raster Dataset")
                                                        gp.Delete_management(outraster[4], "Raster Dataset")
                                                        gp.Delete_management(outraster[5], "Raster Dataset")
                                                        gp.Delete_management(outraster[6], "Raster Dataset")
                                                        gp.Delete_management(outraster[7], "Raster Dataset")

                                                        print "Raster yearday:-: "+str(yearDay)+" completed!!."
                                                        print "\n"
                                                        print "\n"

                                                        # write to log
                                                        
                                                        self.WriteLog("etolog.txt",listArray[7][-6:])

                                                        # Empty list increment day and ready for next Cycle
                                                        yearDay=yearDay+offset

                                                        #          
                                                        self.WriteLog("yearday.txt",str(yearDay))
                                                        listArray=list()


                                                elif(yearDay>353):
                                                        overCounter=0
                                                        print"upto here----->1"
                                                        listArray=list()                                             
                                                        
                                                        finalraster=finalpath+"ETo"+str(self.year)+str(361).zfill(3)
                                                        
                                                        #Creating list to store temporary paths
                                                        outputraster=[(temppath1+str(0)),(temppath1+str(1)),(temppath1+str(2)),(temppath1+str(3)),(temppath1+str(4)),(temppath1+str(5)),(temppath1+str(6)),(temppath1+str(7))]
                                                        

                                                        while rs:
                                                                overArray.append(rs)
                                                                rs=rsList.Next()                
                                                                overCounter=overCounter+1
                                                        print "overArray"+str(overArray)
                                                        print"Raster Addition Progressing beyond "+str(353)
                                                        print "Total no. of files after 353:-: "+str(overCounter) 
                                                        gp.Plus_sa(overArray[0],0,outputraster[0])
                                                        print "Complete Step\t"+ str(0)
                                                        
                                                        for x in range(1,overCounter):
                                                                try:
                                                                        print"Continue Progressing ........."                                                                             
                                                                        gp.Plus_sa((overArray[x]),outputraster[x-1],outputraster[x]) ##########
                                                                        print overArray[x]+ outputraster[x-1]+ outputraster[x]
                                                                        print "Complete Step\t"+ str(x)
                                                                except:
                                                                        print gp.GetMessages()
                                                        # Calculate Average raster value
                                                        print "Calculating Average Raster "+str(overCounter)
                                                        gp.Divide_sa(outputraster[x],overCounter,finalraster) ########

                                                        # Display Program Status in the IDLE Screen
                                                        print "output raster-->"+outputraster[x]
                                                        print "final raster -->"+finalraster

                                                        # Reprojction ETo raster into AEA
                                                        ObjEtoReprojection=EtoProjection.EtoReprojection(finalraster,361)
                                                        ObjEtoReprojection.Reproject()

                                                        print "Deleting intermediate Rasters........"
                                                        self.WriteLog("yearday.txt","361")
                                                        self.WriteLog("etolog.txt","0")
                                                
                                                        # Delete Intermediate rasters
                                                        if(overCounter==1):
                                                                gp.Delete_management(outputraster[0], "Raster Dataset")
                                                        if(overCounter==2):
                                                                gp.Delete_management(outputraster[0], "Raster Dataset")
                                                                gp.Delete_management(outputraster[1], "Raster Dataset")                                                        
                                                        if(overCounter==3):
                                                                gp.Delete_management(outputraster[0], "Raster Dataset")
                                                                gp.Delete_management(outputraster[1], "Raster Dataset")
                                                                gp.Delete_management(outputraster[2], "Raster Dataset")
                                                        if(overCounter==4):
                                                                gp.Delete_management(outputraster[0], "Raster Dataset")
                                                                gp.Delete_management(outputraster[1], "Raster Dataset")
                                                                gp.Delete_management(outputraster[2], "Raster Dataset")
                                                                gp.Delete_management(outputraster[3], "Raster Dataset")
                                                        if(overCounter==5):
                                                                gp.Delete_management(outputraster[0], "Raster Dataset")
                                                                gp.Delete_management(outputraster[1], "Raster Dataset")
                                                                gp.Delete_management(outputraster[2], "Raster Dataset")
                                                                gp.Delete_management(outputraster[3], "Raster Dataset")
                                                                gp.Delete_management(outputraster[4], "Raster Dataset")
                                                        if(overCounter==6):
                                                                gp.Delete_management(outputraster[0], "Raster Dataset")
                                                                gp.Delete_management(outputraster[1], "Raster Dataset")
                                                                gp.Delete_management(outputraster[2], "Raster Dataset")
                                                                gp.Delete_management(outputraster[3], "Raster Dataset")
                                                                gp.Delete_management(outputraster[4], "Raster Dataset")
                                                                gp.Delete_management(outputraster[5], "Raster Dataset")
                                                        if(overCounter==7):
                                                                gp.Delete_management(outputraster[0], "Raster Dataset")
                                                                gp.Delete_management(outputraster[1], "Raster Dataset")
                                                                gp.Delete_management(outputraster[2], "Raster Dataset")
                                                                gp.Delete_management(outputraster[3], "Raster Dataset")
                                                                gp.Delete_management(outputraster[4], "Raster Dataset")
                                                                gp.Delete_management(outputraster[5], "Raster Dataset")
                                                                gp.Delete_management(outputraster[6], "Raster Dataset")
                                                        if(overCounter==8):
                                                                gp.Delete_management(outputraster[0], "Raster Dataset")
                                                                gp.Delete_management(outputraster[1], "Raster Dataset")
                                                                gp.Delete_management(outputraster[2], "Raster Dataset")
                                                                gp.Delete_management(outputraster[3], "Raster Dataset")
                                                                gp.Delete_management(outputraster[4], "Raster Dataset")
                                                                gp.Delete_management(outputraster[5], "Raster Dataset")
                                                                gp.Delete_management(outputraster[6], "Raster Dataset")
                                                                gp.Delete_management(outputraster[7], "Raster Dataset")                     

                                                        print"upto here------>2"
                                                        print"ETO composite finished !!!"
                                                else:
                                                        if(counter<8):
                                                                print "Wait for new rasters to make 8 days Composite...."
                                                        elif(counter==8):
                                                                print "Ready for rasters composite........."
                                                        
                                                        
                                                rs=rsList.Next()
                                               
                                                counter=counter+1
                                                                
                                        else:
                                               print "Error !!!........Counter > 8. "
                                else:
                                        print"Eto already computed...."
                                        rs=rsList.Next()
         

                except:
                        print gp.GetMessages()
Exemplo n.º 41
0
def removeStatName(name):
    try:
        gp = arcgisscripting.create(9.3)
        deleteAnalysisName(name)
    except Exception:
        gp.AddMessage(traceback.format_exc())
Exemplo n.º 42
0
def displayStatName():
    try:
        gp = arcgisscripting.create(9.3)
        displayAnalysisNames()
    except Exception:
        gp.AddMessage(traceback.format_exc())
Exemplo n.º 43
0
def makeTrendsTables():
    try:
        gp = arcgisscripting.create(9.3)
        gp.OverwriteOutput = True

        #Trends Change Data table
        tableName = "TrendsChangeData"
        gp.CreateTable(TrendsNames.dbTemplate[:-1], tableName)
        tableLoc = os.path.join(TrendsNames.dbTemplate, tableName)

        gp.AddField(tableLoc, "AnalysisNum", "long")
        gp.AddField(tableLoc, "EcoLevel3ID", "long")
        gp.AddField(tableLoc, "ChangePeriod", "text", "15")
        gp.AddField(tableLoc, "Resolution", "text", "10")
        gp.AddField(tableLoc, "BlkLabel", "long")

        for trans in range(TrendsNames.numConversions):
            gp.AddField(tableLoc, "CT_" + str(trans + 1), "long")

        #Trends Change Stats table
        tableName = "TrendsChangeStats"
        gp.CreateTable(TrendsNames.dbTemplate[:-1], tableName)
        tableLoc = os.path.join(TrendsNames.dbTemplate, tableName)

        gp.AddField(tableLoc, "AnalysisNum", "long")
        gp.AddField(tableLoc, "EcoLevel3ID", "long")
        gp.AddField(tableLoc, "ChangePeriod", "text", "15")
        gp.AddField(tableLoc, "Resolution", "text", "10")
        gp.AddField(tableLoc, "Statistic", "text", "20")

        for trans in range(TrendsNames.numConversions):
            gp.AddField(tableLoc, "CT_" + str(trans + 1), "double")

        #Trends Glgn Data table
        tableName = "TrendsGlgnData"
        gp.CreateTable(TrendsNames.dbTemplate[:-1], tableName)
        tableLoc = os.path.join(TrendsNames.dbTemplate, tableName)

        gp.AddField(tableLoc, "AnalysisNum", "long")
        gp.AddField(tableLoc, "EcoLevel3ID", "long")
        gp.AddField(tableLoc, "ChangePeriod", "text", "15")
        gp.AddField(tableLoc, "Resolution", "text", "10")
        gp.AddField(tableLoc, "Glgn", "text", "10")
        gp.AddField(tableLoc, "BlkLabel", "long")

        for trans in range(TrendsNames.numLCtypes):
            gp.AddField(tableLoc, "LC_" + str(trans + 1), "long")

        #Trends Glgn Stats table
        tableName = "TrendsGlgnStats"
        gp.CreateTable(TrendsNames.dbTemplate[:-1], tableName)
        tableLoc = os.path.join(TrendsNames.dbTemplate, tableName)

        gp.AddField(tableLoc, "AnalysisNum", "long")
        gp.AddField(tableLoc, "EcoLevel3ID", "long")
        gp.AddField(tableLoc, "ChangePeriod", "text", "15")
        gp.AddField(tableLoc, "Resolution", "text", "10")
        gp.AddField(tableLoc, "Glgn", "text", "10")
        gp.AddField(tableLoc, "Statistic", "text", "20")

        for trans in range(TrendsNames.numLCtypes):
            gp.AddField(tableLoc, "LC_" + str(trans + 1), "double")

        #Trends Composition Data table
        tableName = "TrendsCompData"
        gp.CreateTable(TrendsNames.dbTemplate[:-1], tableName)
        tableLoc = os.path.join(TrendsNames.dbTemplate, tableName)

        gp.AddField(tableLoc, "AnalysisNum", "long")
        gp.AddField(tableLoc, "EcoLevel3ID", "long")
        gp.AddField(tableLoc, "CompYear", "text", "10")
        gp.AddField(tableLoc, "Resolution", "text", "10")
        gp.AddField(tableLoc, "BlkLabel", "long")

        for trans in range(TrendsNames.numLCtypes):
            gp.AddField(tableLoc, "LC_" + str(trans + 1), "long")

        #Trends Composition Stats table
        tableName = "TrendsCompStats"
        gp.CreateTable(TrendsNames.dbTemplate[:-1], tableName)
        tableLoc = os.path.join(TrendsNames.dbTemplate, tableName)

        gp.AddField(tableLoc, "AnalysisNum", "long")
        gp.AddField(tableLoc, "EcoLevel3ID", "long")
        gp.AddField(tableLoc, "CompYear", "text", "10")
        gp.AddField(tableLoc, "Resolution", "text", "10")
        gp.AddField(tableLoc, "Statistic", "text", "20")

        for trans in range(TrendsNames.numLCtypes):
            gp.AddField(tableLoc, "LC_" + str(trans + 1), "double")

        #Trends Multichange Data table
        tableName = "TrendsMultichangeData"
        gp.CreateTable(TrendsNames.dbTemplate[:-1], tableName)
        tableLoc = os.path.join(TrendsNames.dbTemplate, tableName)

        gp.AddField(tableLoc, "AnalysisNum", "long")
        gp.AddField(tableLoc, "EcoLevel3ID", "long")
        gp.AddField(tableLoc, "ChangePeriod", "text", "15")
        gp.AddField(tableLoc, "Resolution", "text", "10")
        gp.AddField(tableLoc, "BlkLabel", "long")

        for trans in range(TrendsNames.numMulti):
            gp.AddField(tableLoc, "Xchg_" + str(trans), "long")

        #Trends Multichange Stats table
        tableName = "TrendsMultichangeStats"
        gp.CreateTable(TrendsNames.dbTemplate[:-1], tableName)
        tableLoc = os.path.join(TrendsNames.dbTemplate, tableName)

        gp.AddField(tableLoc, "AnalysisNum", "long")
        gp.AddField(tableLoc, "EcoLevel3ID", "long")
        gp.AddField(tableLoc, "ChangePeriod", "text", "15")
        gp.AddField(tableLoc, "Resolution", "text", "10")
        gp.AddField(tableLoc, "Statistic", "text", "20")

        for trans in range(TrendsNames.numMulti):
            gp.AddField(tableLoc, "Xchg_" + str(trans), "double")

    except arcgisscripting.ExecuteError:
        # Get the geoprocessing error messages
        msgs = gp.GetMessage(0)
        msgs += gp.GetMessages(2)
        print(msgs)
        raise

    except Exception:
        #print out the system error traceback
        tb = sys.exc_info()[2]
        tbinfo = traceback.format_tb(tb)[0]
        pymsg = tbinfo + "\n" + str(sys.exc_type) + ": " + str(sys.exc_value)
        print(pymsg)
        raise  #push the error up to exit
################################################################
# May 3, 2010
# CropRotations.py
#
# Produces crop rotation patterns from CDL data
#################################################################
# Import system modules
import sys, string, os, random, time, pdb, math, operator, arcgisscripting, csv, glob, logging, random

gp = arcgisscripting.create()   # Create the Geoprocessor object
gp.CheckOutExtension("spatial") # Check out any necessary licenses
gp.AddToolbox("C:/Program Files (x86)/ArcGIS/ArcToolBox/Toolboxes/Spatial Analyst Tools.tbx")
gp.AddToolbox("C:/Program Files (x86)/ArcGIS/ArcToolBox/Toolboxes/Data Management Tools.tbx")
gp.overwriteoutput = True

base_dir     = 'C:\\Users\\ritvik\\Documents\\PhD\\Projects\\CropIntensity\\'
analysis_dir = 'C:\\Users\\ritvik\\Documents\\PhD\\Projects\\CropRotationsUSA\\src\\'

#######################################################################
# USER SPECIFIED PARAMETERS
# The following parameters are user specified and might need to be changed
# prj_dir: Top level directory where all the code, data and outputs are stored
# data_dir: Contains the input data
# out_dir: Contains the output and intermediate analysis stuff
# inp_raster_files: The list of Crop data layer (CDL) files for each year
# maxCropRotations: The total number of crop rotations we want to determine
#######################################################################
# prj_dir defines the project space
#prj_dir    = 'C:\\Documents and Settings\\Ritvik\\My Documents\\Projects\\CropRotations\\'
state = ''
max_crop_rot = 10
Exemplo n.º 45
0
        def EtfCalculation(self):
                                                          
                # Create the Geoprocessor object
                gp = arcgisscripting.create()

                # Set the necessary product code
                gp.SetProduct("ArcInfo")

                # Check out any necessary licenses
                gp.CheckOutExtension("spatial")

                #Overwriting the Output
                gp.OverwriteOutput = 1

                # Location for Hot and Cold pixel values
                inTable = self.csvTable

                # Define Workspace
                gp.Workspace=self.lstcPath

                # List lstc Rasters
                rsList=gp.ListRasters("*")
                rsLSTC=rsList.Next()

                # Computation on Hot and Cold pixel values to compute average
                Line = 0

                counter=0
                reader=csv.reader(open(inTable,"rb"))

                for row in reader:
                        # Check for Header Text
                        if(row[1]!="DAY"):
                                #Check for Updated files
                                if(int(self.ReadLog())<int(row[1])):
                                        # Check for Empty temp. Cell in CSV file
                                        if(row[2]==""):
                                                print "Empty Temperature(OR Raster not Existed) value"+"Day--->"+str(row[1])

                                        else:
                                                # Walk through the New raster to compute Etf
                                                while(int(rsLSTC[-3:])!=int(row[1].zfill(3))):
                                                        print"ETf already computed for :-: "+str(rsLSTC)
                                                        rsLSTC=rsList.Next()

                                                print"\n"
                                                print"New Raster for ETf:-: "+str(rsLSTC)
                                                print "rsLSTC"+str(int(rsLSTC[-3:]))
                                                print "log value:-: "+str(self.ReadLog())
                                                
                                                # New raster date match with csv year day       
                                                if(int(rsLSTC[-3:].zfill(3))==int(row[1].zfill(3))):
                                                                
                                                        Thot1 =str(row[2])  # Thot1 field in row
                                                        Thot2 =str(row[3])  # Thot2 field in row
                                                        Thot3 =str(row[4])  # Thot3 field in row                    
                                                        Thot = float(Thot1)+float(Thot2)+float(Thot3)  # summed up
                                                        ThotD = Thot/3                                 # average
                                                        
                                                        Tcold1 =str(row[5])  # Tcold1 field in row
                                                        Tcold2 =str(row[6])  # Tcold2 field in row
                                                        Tcold3 =str(row[7])  # Tcold3 field in row
                                                        Tcold = float(Tcold1) +float(Tcold2) +float(Tcold3) # summed up
                                                        TcoldD = Tcold/3 # average
                                                        day=row[1]

                                                        Thot_cold = ThotD - TcoldD 

                                                        # Function to compute ETF
                                                        self.genETF(rsLSTC,ThotD,Thot_cold,day,TcoldD,Thot1,Tcold1)

                                                        # Write State on Log file
                                                        self.WriteLog(str(day))
                                                        print "Successfully Completed: "+"etf2008"+str(day).zfill(3)
                                                        print "\n"
 
                                else:
                                        print "Etf already Computed for day:-: "+str(row[1])
                                        print "\n"

                        else:
                                
                                print"Got Csv Header..."
Exemplo n.º 46
0
    def ComputeZonal(self):
        try:
            print "Wait for Progam Response ..............."
            # Create the Geoprocessor object
            gp = arcgisscripting.create()
            gp.rasterStatistics = "NONE"
            # Check out any necessary licenses
            gp.CheckOutExtension("spatial")
            gp.overwriteoutput = 1

            # Set up input and output files

            gp.Workspace = self.gridPath

            # OutWorkSpace = r"D:\RSData\TRMM"
            Gisdir = self.znlPath
            Zonal_shape = self.shpPath
            print "zonal_shape-->" + str(Zonal_shape)
            #Zonal_shape = sys.argv[2]
            Zonal_field = "FIPS"

            #Zonal_field = sys.argv[3]
            Zonal_raster = Gisdir + "\\trmm_grid"
            Output_filename = self.outcsvPath
            #Output_filename = sys.argv[6]
            outfile = open(Output_filename, 'a')
            Zonal_output_table = Gisdir + "\\Zonal_output_table"
            Zonal_output_dbf = Gisdir + "\\Zonal_Output.dbf"

            Zonelist = []
            Firstpass = 1

            #Get the raster datasets in the input workspace and loop through them from the start
            InputRasters = gp.ListRasters()
            InputRasters.reset()
            InputRaster = InputRasters.next()
            while InputRaster:
                print InputRaster
                Splitfname1 = InputRaster.split("_")
                Maqdate1 = Splitfname1[1]
                print Maqdate1

                if (int(Maqdate1) > int(self.ReadLog())):

                    Year1 = Maqdate1[0:4]
                    Date1 = Maqdate1[4:8]
                    print "-->" + str(Year1)
                    print "-->" + str(Date1)

                    # When processing the first the file, use it as a template to do
                    # vector-raster conversion for the zonal shapefile
                    try:

                        gp.FeatureToRaster_conversion(Zonal_shape, Zonal_field,
                                                      Zonal_raster)
                        #InputRaster = InputRasters.next()
                        print "Converted to Raster!"
                    except:
                        print " Can not converted to Raster"
                        #outfile.close()
                        #raise exit

                    if Firstpass == 1:
                        #gp.FeatureToRaster_conversion(Zonal_shape, Zonal_field, Zonal_raster, In_image1)
                        # Pull a list of the zones out of the raster table
                        cur = gp.SearchCursor(Zonal_shape)
                        row = cur.Next()
                        while row:
                            # Note - FIPS is hardcoded - will need to be changed for
                            # zones other than counties
                            Zonelist.append(row.FIPS)
                            row = cur.Next()

                        # Write the header for the output file
                        #Header_str = "Year, Date, Zone, Count, Mean, Sum, Stdev\n"
                        #outfile.write(Header_str)

                        Firstpass = 0

                        # Process: Zonal Statistics as Table...
                    try:
                        gp.ZonalStatisticsAsTable_sa(Zonal_raster, "Value",
                                                     InputRaster,
                                                     Zonal_output_table,
                                                     "NODATA")
                        print "Process: Zonal Statistics as Table..."
                        # Process: Convert from ESRI table to .dbf
                        gp.CopyRows_management(Zonal_output_table,
                                               Zonal_output_dbf, "")

                        db = dbf.Dbf(Zonal_output_dbf)
                        # Retrieve data from .dbf file
                        i = 0
                        for rec in db:
                            year = Year1
                            day = Date1
                            valuecode = rec["VALUE"]
                            fipsval = Zonelist[i]
                            i = i + 1
                            zone = fipsval
                            count = rec["COUNT"]
                            mean = rec["MEAN"]
                            summ = rec["SUM"]
                            stddev = rec["STD"]
                            self.InsertDB(year, day, zone, count, mean, stddev,
                                          summ)

                        # Close files and clean up
                        db.close()
                        gp.delete_management(Zonal_output_table)
                        call_delete = "del " + Zonal_output_dbf
                        os.system(call_delete)
                        InputRaster = InputRasters.next()
                        self.WriteLog(Maqdate1)
                        print "Completed the Zonal Stat for-->" + str(Maqdate1)
                        Firstpass = 0

                    except Exception, e:
                        print "Can not compute Zonal Statistics" + str(e)
                        raise exit
                else:
                    print "Zonal stat is already calculated for -->" + str(
                        InputRaster)
                    InputRaster = InputRasters.next()

            # Remove Temporary files
            outfile.close()
            if (os.path.exists(self.znlPath + "\\trmm_grid.aux")):
                os.remove(self.znlPath + "\\trmm_grid.aux")
            if (os.path.exists(self.znlPath + "\\Zonal_Output.dbf.xml")):
                os.remove(self.znlPath + "\\Zonal_Output.dbf.xml")
            if os.path.exists(self.znlPath + "\\trmm_grid"):
                for files in os.listdir(self.znlPath + "\\trmm_grid"):
                    #print files
                    os.remove(self.znlPath + "\\trmm_grid\\" + files)
                os.removedirs(self.znlPath + "\\trmm_grid")

            print "TRMM Zonal Stat Finished !!!"
Exemplo n.º 47
0
 def __init__(self):
     """Initialize class and create single geoprocessor object"""
     self.gp = arcgisscripting.create(9.3)
     self.gp.CheckOutExtension("Spatial")
     self.gp.OverwriteOutput = True
     self.lm_configured = False
Exemplo n.º 48
0
        def export_button_func():
            #merge PSUs
            arcpy.CreateFolder_management(outputpath, tmp_state_folder)
            arcpy.env.workspace = fullpath_tmp_state_folder
            fcList = arcpy.ListFeatureClasses("*psu*", "polygon", "")
            tem_merge_file_no_shp = "PSUmerge_"+statename+"_"+re.sub('-', '_', str(uuid.uuid4()))
            tem_merge_file = tem_merge_file_no_shp+".shp"
            out_merge_file_no_shp = "allMerge_"+statename+"_"+re.sub('-', '_', str(uuid.uuid4()))
            out_merge_file = out_merge_file_no_shp+".shp"
            arcpy.Merge_management(fcList, tem_merge_file)

            #Select counties that overlap PSUs and inverse selection
            arcpy.SelectLayerByLocation_management(name1, "WITHIN", tem_merge_file_no_shp, "", "NEW_SELECTION")
            arcpy.SelectLayerByLocation_management(name1, "WITHIN", tem_merge_file_no_shp, "", "SWITCH_SELECTION")

            #Merge PSUs with Counties
            state_output_path = outputpath+tmp_state_folder+'/'
            arcpy.Merge_management([name1, tem_merge_file_no_shp], state_output_path+out_merge_file)
            arcpy.SelectLayerByAttribute_management(out_merge_file_no_shp, "NEW_SELECTION", """"POPULATION"=0""")
            arcpy.CalculateField_management(out_merge_file_no_shp, "POPULATION", "!SUM_POPULA!", "PYTHON")
            arcpy.CalculateField_management(out_merge_file_no_shp, "ALANDSQM", "!SUM_ALANDS!", "PYTHON")

            #Exporting shapefile to csv (and ignoring umlaut character)
            u = unichr(253)
            u.encode('ascii', 'ignore')
            import arcgisscripting, csv
            gp=arcgisscripting.create(10.2)
            output=open(r""+state_output_path+"tableOutput"+statename+"_"+re.sub('-', '_', str(uuid.uuid4()))+".csv","w")
            linewriter=csv.writer(output,delimiter=',')
            fcdescribe=gp.Describe(r""+state_output_path+out_merge_file)
            flds=fcdescribe.Fields
            header = []
            for fld in flds:
                value=fld.Name
                header.append(value)
            linewriter.writerow(header)
            cursor = gp.searchcursor(r""+state_output_path+out_merge_file)
            row = cursor.Next()
            while row:
                line=[]
                for fld in flds:
                    value=row.GetValue(fld.Name)
                    line.append(value)
                linewriter.writerow(line)
                del line
                row=cursor.Next()

            #Zoom to state layer extent
            #(statelyr variable set to state layer)
            lyr = arcpy.mapping.ListLayers(mxd, out_merge_file_no_shp, df)[0]

            RankSymLayer="C:\Users\zwhitman\Documents\census\psu_app\input\RankSymbology.lyr"
            arcpy.ApplySymbologyFromLayer_management(lyr, RankSymLayer)

            #Add SQMI and POPULATION labels
            # expression = """"S:" & [SQMI] & vbCrLf& "P:" & [POPULATION]"""
            # lyr.labelClasses[0].expression=expression
            # for lblClass in lyr.labelClasses:
            #   lblClass.showClassLabels=True
            # lyr.showLabels=True
            # arcpy.RefreshActiveView()

            newExtent=df.extent
            statelyr_extent=lyr.getExtent()
            newExtent.XMin=statelyr_extent.XMin
            newExtent.YMin=statelyr_extent.YMin
            newExtent.XMax=statelyr_extent.XMax
            newExtent.YMax=statelyr_extent.YMax
            df.extent=newExtent

            #Clear Selection
            arcpy.SelectLayerByAttribute_management(name1,"CLEAR_SELECTION")

            #Export map to pdf
            arcpy.mapping.ExportToJPEG(mxd, r""+state_output_path+"mapOutput.jpg")

            # do something
            return
Exemplo n.º 49
0
# ----------------------------------------------------------------
#   CreateDTEDCatalog_ma
# ----------------------------------------------------------------

# Run_CreateDTEDCatalog.py
# Description:
#   Creates an MA DTED Catalog
# Requirements: None
# Author: ESRI
# Date: July 31, 2007

# Import system modules
import arcgisscripting

# Create the Geoprocessor object
gp = arcgisscripting.create()

# Input variables
Input_Geodatabase = "C:\Workspace\Test\Hawaii_Data.mdb"
DTED_Catalog_Name = "Hawaii_DTED"

try:
    # Create the catalog
    gp.CreateDTEDCatalog(Input_Geodatabase,DTED_Catalog_Name)

except:
    # If an error occurred while running a tool, then print the messages.
    print gp.GetMessages()
    
    
# ----------------------------------------------------------------
# feature_class_to_shapefile_conversion.py
# 
# > Use FeatureClassToShapefile_conversion from ArcGIS python library (arcpy)
#    to convert pre-v.10.x ESRI File Geodatabases (.gdb) to ESRI shape files (.shp, etc.)
#    ; for use in e.g. Alteryx
#    ; see also: http://resources.arcgis.com/en/help/main/10.1/index.html#//00120000003m000000
#
# cgutierrez

## import system & geoprocessecing modules
import os
import arcgisscripting as ags
import arcpy
from arcpy import env

my_env = ags.create(9.3)

def list_fcs_in_fgdb(gdb):
    ''' list all Feature Classes in a geodatabase, including inside Feature Datasets '''
    ''' parm : gdb : your arcpy.env.workspace '''
    print 'Environment : ', my_env
    my_env.workspace = gdb
    print 'Processing workspace : ', my_env.workspace
    fcs = [] 
    for fds in my_env.ListDatasets('','feature') + ['']:
        print fds
        for fc in my_env.ListFeatureClasses('','',fds):
            print fc
            #yield os.path.join(env.workspace, fds, fc)
            fcs.append(os.path.join(my_env.workspace, fds, fc))
    return fcs
Exemplo n.º 51
0
def globSelectFiles (pattern="*"):
	import glob 
	return(glob.glob(pattern
	

# globSearchList ----------------------------------------------------------------
# returns a list of files from a list of glob search patterns
def globSearchList (extensions = ["*.jp2","*.img"]):
	# make list of files with the the following extensions 
	import glob
	files = list()
	for ext in extensions: files = files + glob.glob(ext)
	return(files)


# >>> ARCPY FUNCTIONS.............................................................
# http://pro.arcgis.com/en/pro-app/tool-reference/introduction-anatomy/anatomy-of-a-tool-reference-page.htm


# arcDescribeData -----------------------------------------------------------
# returns a dictionary of for each file with data descriptions and projections
def arcDescribeData(fileList):
	import arcpy
	descriptions = dict()
	for f in fileList:
		print(f)
		dsc = arcpy.Describe(f)
		prj = dsc.spatialreference
		descriptions[str(f)]=[dsc,prj] #dictionary
	return(descriptions)


# arcReprojectData -----------------------------------------------------------------
# reprojects a list of data files to a specified output coordinated reference system
def arcReprojectData(fileList,outCRS):
	import arcpy
	import re 
	for f in fileList:
		# remove file extensions
		ext = re.match(".*(\..*)",f) 
		print(ext)
		if m != None: 
			data = f[:len(ext.group(1))] 
		else: 
			data = f
			
		# run the reprojection tool
		try:
			if ext=='.shp': arcpy.Project_management(data,"prj"+data,outCRS)
			if ext!='.shp': arcpy.ProjectRaster_management(data,"prj"+data,outCRS)
			# print messages when the tool runs successfully
			print(arcpy.GetMessages(0))
		except arcpy.ExecuteError:
			print(arcpy.GetMessages(2))
		except Exception as ex:
			print(ex.args[0])
			

# arcSplitShapefileByFeature --------------------------------------------------------
# separate one shapefile in multiple ones by one specific attribute
# Need to change Name to a field
# debugged? 
def arcSplitShapefileByFeature (inDir = "C:/Temp/data/",inFile="Points.shp",outDir = "C:/Temp/data/idPoints/",field="Name"):
	import arcgisscripting
	# Starts Geoprocessing
	gp = arcgisscripting.create(9.3)
	gp.OverWriteOutput = 1
	
	# set input file path
	inputFile = inDir + inFile

	# Make the outDir if it doesn't already exist using os
	import os 
	if not os.path.isdir(outDir):
		os.mkdir(outDir)

	# Read Shapefile for different values in the attribute
	rows = gp.searchcursor(inputFile)
	row = rows.next()
	attribute_types = set([])

	while row:
		attribute_types.add(eval("row."+field)) #<-- CHANGE my_attribute to the name of your attribute
		row = rows.next()
	# Output a Shapefile for each different attribute
	for each_attribute in attribute_types:
		outSHP = outDir + each_attribute + u".shp"
		print outSHP
		gp.Select_analysis (inputFile, outSHP, "\""+field+"\" = '" + each_attribute + "'") #<-- CHANGE my_attribute to the name of your attribute
	del rows, row, attribute_types, gp


# arcReformatRaster ---------------------------------------------------
# converts list of raster files to a single format in specified path
# inFilesList: list of files e.g.
#                       files = ['file.img',file2.jpg','file3.gif']
#http://resources.arcgis.com/en/help/main/10.1/index.html#//001200000032000000
def arcReformatRaster (FileList=["file1.tif","file2.tif"],
	inPath=None,outPath=None,Format="TIFF"):
	import arcpy, os
	if inPath==None: inPath=os.getcwd()
	if outPath==None: outPath=os.getcwd()
	inFileList = list()
	outFileList = list()
	for f in FileList:
		inFilePath = inPath+"\\"+f
	try:
		##Convert Multiple Raster Dataset to FGDB
		arcpy.RasterToOtherFormat_conversion(inFilePath,outPath,Format)	
	except:
		print "Raster To Other Format Failed."
		print arcpy.GetMessages()
		

# arcResampleRasters -----------------------------------------------------------------
# takes a list of file names sets cell size to specified scale 
# FileList: list of strings containing raster file names ["file1.tif","file2.tif"]
# inPath(optional): path where input files are located, default is os.getcwd()
# outPath(optional): path where output files are created, default is os.getcwd()
# scale: You can specify the cell size in 3 different ways: 
#			1. Using a single number specifying a square cell size
#			2. Using two numbers that specify the X and Y cell size, which is space delimited
#			3. Using the path of a raster dataset from which the square cell size will be imported
# http://pro.arcgis.com/en/pro-app/tool-reference/data-management/resample.htm
def arcResampleRasters (FileList=["file1.tif","file2.tif"],
	inPath=None,outPath=None,resolution="C:\\templateRaster.tif"):
	import arcpy, os
	if inPath==None: inPath=os.getcwd()
	if outPath==None: outPath=os.getcwd()
	inFileList = list()
	outFileList = list()
	for f in FileList:
		inFilePath = inPath+'\\'+f
		outFilePath = outPath+"\\scaled_"+f
		#Rescale Function Call 
		arcpy.Resample_management(inFilePath, outFilePath, resolution,"CUBIC")
	print("completed rescale of files @ location:", outPath)

	
# arcSaveRasterBands -----------------------------------------------------------------
# saves raster individual bands from a multiband raster file 
def arcSaveRasterBands (InRaster='C:\\Users\\awiegman\\Downloads\\OtterData\\n_4307316_nw_1_20030912.jp2'):
	import arcpy, os
	# get a list of the bands that make up the raster
	arcpy.env.workspace = InRaster
	Bands = arcpy.ListRasters()
	for Bnd in Bands:
		# loop through the bands and export each one with CopyRaster
		InBand  = '{}\\{}'.format(InRaster,Bnd)
		bndDesc = arcpy.Describe(InBand)
		NoData  = bndDesc.noDataValue 
		InSplit = os.path.splitext(InRaster) # split the image name and extension
		# output file name is c:\\some\\path\\raster_Band_X.ext
		OutRaster  = '{}_{}{}'.format(InSplit[0],Bnd,InSplit[1])
		arcpy.CopyRaster_management(InBand,OutRaster,nodata_value = NoData)

		
# >>> ARCPY SPATIAL ANALYST FUNCTIONS................................................
# http://pro.arcgis.com/en/pro-app/tool-reference/spatial-analyst/an-overview-of-the-spatial-analyst-toolbox.htm

		
# arcExportNDVI ---------------------------------------------------------------------
# export normalized vegetation difference index from multispectral raster image
# CURRENTLY CRASHING ArcMap
# inRaster: multiband raster image
# redBand: band Number where red reflectance is stored 
# NIRBand: band Number where near infrared reflectance is stored 
def arcExportNDVI (inRaster,outPath,redBand=3,NIRBand=4):
	import arcpy, os
	from arcpy.sa import*
	red = arcpy.sa.Raster(inRaster+'\\Band_'+str(redBand))
	NIR = arcpy.sa.Raster(inRaster+'\\Band_'+str(NIRBand))

	# Calculate NDVI as (NIR-red)/(NIR+red)
	numerator = arcpy.sa.Float(NIR-red)
	denom = arcpy.sa.Foat(NIR+red)
	NDVI = arcpy.sa.Divide(num, denom)

	outRaster  = '{}_{}{}'.format(InSplit[0],"NDVI",InSplit[1])
	NDVI.Save(outPath+outRaster)

# >>> >>> HYDROLOGY FUNCTIONS...
# http://pro.arcgis.com/en/pro-app/tool-reference/spatial-analyst/an-overview-of-the-hydrology-tools.htm

# basin ------------------------------------------------------------------
# Creates a raster delineating all drainage basins.

# fill -------------------------------------------------------------------
# Fills sinks in a surface raster to remove small imperfections in the data.
# http://pro.arcgis.com/en/pro-app/tool-reference/spatial-analyst/fill.htm
def archyFill(inDEM):
	return(arcpy.sa.Fill(inDEM))
	

# flowAccum -----------------------------------------------------------------
# Creates a raster of accumulated flow into each cell. A weight factor can optionally be applied.
# http://pro.arcgis.com/en/pro-app/tool-reference/spatial-analyst/flow-accumulation.htm
def archyFlowAccum(inFlowDir):
	return(arcpy.sa.FlowAccumulation(inFlowDir))
	

# flowDir --------------------------------------------------------------------
# Creates a raster of flow direction from each cell to its downslope neighbor, or neighbors, using D8, Multiple Flow Direction (MFD) or D-Infinity (DINF) methods.
# http://pro.arcgis.com/en/pro-app/tool-reference/spatial-analyst/flow-direction.htm
def archyFlowDir(inDEM):
	return(arcpy.sa.FlowDirection(inDEM))
	
	
# archyFlowDist -------------------------------------------------------------------
# Computes, for each cell, the horizontal or vertical component of minimum downslope distance, following the flow path(s), to cell(s) on a stream into which they flow.



# archyFlowLen --------------------------------------------------------------------
# Calculates the upstream or downstream distance, or weighted distance, along the flow path for each cell.



# archySink -----------------------------------------------------------------------
# Creates a raster identifying all sinks or areas of internal drainage.



# archySnapPrPnt --------------------------------------------------------------------
# Snaps pour points to the cell of highest flow accumulation within a specified distance.
def archySnapPrPnt(inPoint,inFlowAccum, searchDist=30, pourField="Name"):
	return(arcpy.sa.SnapPourPoint(inPoint, inFlowAccum, searchDist, pourField)) \


	
# archyStreamLink ---------------------------------------------------------------
# Assigns unique values to sections of a raster linear network between intersections.



# archyStreamOrder ---------------------------------------------------------------
# Assigns a numeric order to segments of a raster representing branches of a linear network.



# archyStreamToFeature -----------------------------------------------------------
# Converts a raster representing a linear network to features representing the linear network.



# archyWatershed ------------------------------------------------------------------
# Determines the contributing area above a set of cells in a raster.
def archyWatershed(inPoint,inFlowDir,pourField):
	return(arcpy.sa.Watershed(inFlowDir, inPoint, pourField))
	

	
		
# MAIN PROGRAM ###########################################################

# load modules
import os

# set workspace
arcpyWorkspace("C:\\Users\\awiegman\\Downloads\\OtterData\\")

# resample DEM rasters to 0.7 m resolution 
files = globSearchList(["*.jp2"]) # select raster files 
resolution = 0.7 
arcResampleRasters(FileList=files,inPath="C:\\Users\\awiegman\\Downloads\\OtterData",outPath="C:\\Temp\\Rescaled",resolution=resolution)

# reformat the files to GeoTiff
files = globSearchList(["*.jp2"])
arcReformatRaster(FileList=files,outPath="C:\\Temp\\Rescaled",Format="TIFF")

# resample color imagery rasters to 5 m resolution 
arcpyWorkspace("C:\\Users\\awiegman\\Downloads\\OtterData\\NAIP_imagery")
files = globSearchList(["*.jp2"]) # select raster files 
resolution = 10 
arcResampleRasters(FileList=files,inPath="C:\\Users\\awiegman\\Downloads\\OtterData\\NAIP_imagery",outPath="C:\\Temp\\Rescaled",resolution=resolution)

# save Raster Bands
arcpyWorkspace("C:\\Temp\\Rescaled")
files = globSearchList(["*.jp2"])
for f in files: 
	raster = arcpy.ListRasters("", "tif")
	saveRasterBands(raster)
	
# calculate NDVI from raster bands and export result
arcpyWorkspace("C:\\Users\\awiegman\Downloads\\OtterData\\NAIP_imagery")
arcpyWorkspace("C:\\Temp\\Rescaled")
arcpyWorkspace()
files = globSearchList(["*.jp2"])
for f in files: 
	raster = os.getcwd()+ "\\" + f
	exportNDVI(inRaster=raster)
Exemplo n.º 52
0
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.

# Import system modules...
import sys, string, os, arcgisscripting

# Create the Geoprocessor object...
gp = arcgisscripting.create()

# Path to custom toolbox...
scriptdir = os.path.dirname(sys.argv[0])
toolboxpath = scriptdir + "\\..\\toolbox\\LandAdvisor-ITCP.tbx"
gp.AddToolbox(toolboxpath)

# Check out any necessary licenses
gp.CheckOutExtension("spatial")

# Script Arguments...
ProtectedAreas = sys.argv[1]
ProtectedAreasAdjacencyImportanceRaster = sys.argv[2]
PropertiesRaster = sys.argv[3]
RasterMaskSingleValue = sys.argv[4]
AdjacentOutputRaster = sys.argv[5]
Exemplo n.º 53
0
        def export_button_func():
            #merge PSUs
            arcpy.CreateFolder_management(outputpath, tmp_state_folder)
            arcpy.env.workspace = fullpath_tmp_state_folder
            fcList = arcpy.ListFeatureClasses("*psu*", "polygon", "")
            tem_merge_file_no_shp = "PSUmerge_" + statename + "_" + re.sub(
                '-', '_', str(uuid.uuid4()))
            tem_merge_file = tem_merge_file_no_shp + ".shp"
            out_merge_file_no_shp = "allMerge_" + statename + "_" + re.sub(
                '-', '_', str(uuid.uuid4()))
            out_merge_file = out_merge_file_no_shp + ".shp"
            arcpy.Merge_management(fcList, tem_merge_file)

            #Select counties that overlap PSUs and inverse selection
            arcpy.SelectLayerByLocation_management(name1, "WITHIN",
                                                   tem_merge_file_no_shp, "",
                                                   "NEW_SELECTION")
            arcpy.SelectLayerByLocation_management(name1, "WITHIN",
                                                   tem_merge_file_no_shp, "",
                                                   "SWITCH_SELECTION")

            #Merge PSUs with Counties
            state_output_path = outputpath + tmp_state_folder + '/'
            arcpy.Merge_management([name1, tem_merge_file_no_shp],
                                   state_output_path + out_merge_file)
            arcpy.SelectLayerByAttribute_management(out_merge_file_no_shp,
                                                    "NEW_SELECTION",
                                                    """"POPULATION"=0""")
            arcpy.CalculateField_management(out_merge_file_no_shp,
                                            "POPULATION", "!SUM_POPULA!",
                                            "PYTHON")
            arcpy.CalculateField_management(out_merge_file_no_shp, "ALANDSQM",
                                            "!SUM_ALANDS!", "PYTHON")

            #Exporting shapefile to csv (and ignoring umlaut character)
            u = unichr(253)
            u.encode('ascii', 'ignore')
            import arcgisscripting, csv
            gp = arcgisscripting.create(10.2)
            output = open(
                r"" + state_output_path + "tableOutput" + statename + "_" +
                re.sub('-', '_', str(uuid.uuid4())) + ".csv", "w")
            linewriter = csv.writer(output, delimiter=',')
            fcdescribe = gp.Describe(r"" + state_output_path + out_merge_file)
            flds = fcdescribe.Fields
            header = []
            for fld in flds:
                value = fld.Name
                header.append(value)
            linewriter.writerow(header)
            cursor = gp.searchcursor(r"" + state_output_path + out_merge_file)
            row = cursor.Next()
            while row:
                line = []
                for fld in flds:
                    value = row.GetValue(fld.Name)
                    line.append(value)
                linewriter.writerow(line)
                del line
                row = cursor.Next()

            #Zoom to state layer extent
            #(statelyr variable set to state layer)
            lyr = arcpy.mapping.ListLayers(mxd, out_merge_file_no_shp, df)[0]

            RankSymLayer = "C:\Users\zwhitman\Documents\census\psu_app\input\RankSymbology.lyr"
            arcpy.ApplySymbologyFromLayer_management(lyr, RankSymLayer)

            #Add SQMI and POPULATION labels
            # expression = """"S:" & [SQMI] & vbCrLf& "P:" & [POPULATION]"""
            # lyr.labelClasses[0].expression=expression
            # for lblClass in lyr.labelClasses:
            #   lblClass.showClassLabels=True
            # lyr.showLabels=True
            # arcpy.RefreshActiveView()

            newExtent = df.extent
            statelyr_extent = lyr.getExtent()
            newExtent.XMin = statelyr_extent.XMin
            newExtent.YMin = statelyr_extent.YMin
            newExtent.XMax = statelyr_extent.XMax
            newExtent.YMax = statelyr_extent.YMax
            df.extent = newExtent

            #Clear Selection
            arcpy.SelectLayerByAttribute_management(name1, "CLEAR_SELECTION")

            #Export map to pdf
            arcpy.mapping.ExportToJPEG(
                mxd, r"" + state_output_path + "mapOutput.jpg")

            # do something
            return
Exemplo n.º 54
0
    )
    records = data.fetchall()
    toExcel.write_to_xlsx(records)
    cur.close()
    conn.close()


def main():
    overlay()
    create_table()
    data_statistic()


if __name__ == '__main__':
    input_dltb = "DLTB"
    input_xzdw = "XZDW"
    input_lxdw = "LXDW"
    output_dltb = "PDLTB"
    output_xzdw = "PXZDW"
    output_lxdw = "PLXDW"
    GP = arc.create(9.3)
    # Input feature class
    location = GP.GetParameterAsText(0)
    input_dk = GP.GetParameterAsText(1)
    input_ptb = GP.GetParameterAsText(2)
    is_hectare = GP.GetParameterAsText(3)
    # Set Workspace
    GP.Workspace = location
    toExcel = ManipulateExcel("土地利用现状统计表", location)
    main()
Exemplo n.º 55
0
def Execute(self, parameters, messages):

    try:
        # Import system modules
        import sys, os, math, traceback
        import arcsdm.sdmvalues

        import arcsdm.sdmvalues
        import arcsdm.workarounds_93
        try:
            importlib.reload(arcsdm.sdmvalues)
            importlib.reload(arcsdm.workarounds_93)
        except:
            reload(arcsdm.sdmvalues)
            reload(arcsdm.workarounds_93)
        # Create the Geoprocessor object
        #Todo: Refactor to arcpy.
        import arcgisscripting

        gp = arcgisscripting.create()

        # Check out any necessary licenses
        gp.CheckOutExtension("spatial")
        ''' Parameters '''

        Evidence = parameters[0].valueAsText  #gp.GetParameterAsText(0)
        Wts_Tables = parameters[1].valueAsText  #gp.GetParameterAsText(1)
        Training_Points = parameters[2].valueAsText  #gp.GetParameterAsText(2)
        IgnoreMsgData = parameters[3].value  #gp.GetParameter(3)
        MissingDataValue = parameters[4].value  #gp.GetParameter(4)
        #Cleanup extramessages after stuff
        #gp.AddMessage('Got arguments' )
        if IgnoreMsgData:  # for nodata argument to CopyRaster tool
            NoDataArg = MissingDataValue
        else:
            NoDataArg = '#'
        UnitArea = parameters[5].value  #gp.GetParameter(5)

        arcsdm.sdmvalues.appendSDMValues(gp, UnitArea, Training_Points)
        # Local variables...

        #Getting Study Area in counts and sq. kilometers

        Counts = arcsdm.sdmvalues.getMaskSize(
            arcsdm.sdmvalues.getMapUnits(True))
        gp.AddMessage("\n" + "=" * 21 + " Starting calculate response " +
                      "=" * 21)
        #gp.AddMessage(str(gp.CellSize))
        #CellSize = float(gp.CellSize)
        Study_Area = Counts / UnitArea  # getMaskSize returns mask size in sqkm now - TODO: WHy is this divided with UnitArea? (Counts * CellSize * CellSize / 1000000.0) / UnitArea
        gp.AddMessage(("%-20s %s" % ("Study Area:", str(Study_Area))))

        #Get number of training points
        numTPs = gp.GetCount_management(Training_Points)
        gp.AddMessage("%-20s %s" % ("# training points:", str(numTPs)))

        #Prior probability
        Prior_prob = float(numTPs) / Study_Area
        gp.AddMessage("%-20s %s" % ("Prior_prob:", str(Prior_prob)))

        #Get input evidence rasters
        Input_Rasters = Evidence.split(";")

        # Process things and removve grouplayer names including EXTRA ' ' symbols around spaced grouplayer name
        gp.AddMessage("Input rasters: " + str(Input_Rasters))
        for i, s in enumerate(Input_Rasters):
            Input_Rasters[i] = arcpy.Describe(s.strip("'")).file
        gp.AddMessage("Input rasters: " + str(Input_Rasters))

        #Get input weight tables
        Wts_Tables = Wts_Tables.split(";")
        #gp.AddMessage("Wts_Tables = " + str(Wts_Tables))

        #Create weight raster from raster's associated weights table
        #gp.AddMessage("Getting Weights rasters...")
        gp.OverwriteOutput = 1
        gp.LogHistory = 1
        Wts_Rasters = []
        i = 0
        # Create a list for the Missing Data Variance tool.
        rasterList = []
        # Evidence rasters should have missing data values, where necessary, for
        # NoData cell values within study area.
        # For each input_raster create a weights raster from the raster and its weights table.
        mdidx = 0
        ''' Weight rasters '''

        gp.AddMessage("\nCreating weight rasters ")
        arcpy.AddMessage("=" * 41)

        for Input_Raster in Input_Rasters:
            #<== RDB
            #++ Needs to be able to extract input raster name from full path.
            #++ Can't assume only a layer from ArcMap.
            ##        Output_Raster = os.path.join(gp.ScratchWorkspace,Input_Raster[:11] + "_W")
            ##Output_Raster = os.path.basename(Input_Raster)[:11] + "_W"
            #outputrastername = (Input_Raster[:9]) + "_W";
            #TODO: Do we need to consider if the file names collide with shapes? We got collision with featureclasses
            desc = arcpy.Describe(Input_Raster)

            Wts_Table = Wts_Tables[i]

            #Compare workspaces to make sure they match
            desc2 = arcpy.Describe(Wts_Table)

            #arcpy.AddMessage(desc.workspaceType);
            #arcpy.AddMessage(desc2.workspaceType);

            arcpy.AddMessage("Processing " + Input_Raster)

            outputrastername = Input_Raster.replace(".", "_")

            outputrastername = outputrastername[:10] + "_W"
            #outputrastername = desc.nameString + "_W2";
            # Create _W raster
            Output_Raster = gp.CreateScratchName(outputrastername, '',
                                                 'raster', gp.scratchworkspace)
            #gp.AddMessage("\n");
            #gp.AddMessage(" Outputraster: " + outputrastername);

            #Increase the count for next round
            i += 1

            #arcpy.AddMessage("WtsTable: " + Wts_Table);
            #Wts_Table = gp.Describe(Wts_Table).CatalogPath
            ## >>>>> Section replaced by join and lookup below >>>>>
            ##        try:
            ##            gp.CreateRaster_sdm(Input_Raster, Wts_Table, "CLASS", "WEIGHT", Output_Raster, IgnoreMsgData , MissingDataValue)
            ##        except:
            ##            gp.AddError(gp.getMessages(2))
            ##            raise
            ##        else:
            ##            gp.AddWarning(gp.getMessages(1))
            ##            gp.AddMessage(gp.getMessages(0))
            ## <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<

            #<==RDB Updated code
            #Same as CreateRaster above
            #++ Removed try: finally: statment as logic did not create any added effect.
            #++ only forced the remove join but what happens if join fails?
            #++ Need to create in-memory Raster Layer for Join
            #Check for unsigned integer raster; cannot have negative missing data
            if NoDataArg != '#' and gp.describe(
                    Input_Raster).pixeltype.upper().startswith('U'):
                NoDataArg2 = '#'
            else:
                NoDataArg2 = NoDataArg
            #Create new rasterlayer from input raster -> Result RasterLayer
            RasterLayer = "OutRas_lyr"
            arcpy.MakeRasterLayer_management(Input_Raster, RasterLayer)

            #++ AddJoin requires and input layer or tableview not Input Raster Dataset.
            #Join result layer with weights table
            arcpy.AddJoin_management(RasterLayer, "VALUE", Wts_Table, "CLASS")
            # THis is where it crashes on ISsue 44!https://github.com/gtkfi/ArcSDM/issues/44
            #return;

            # These are born in wrong place when the scratch workspace is filegeodatabase
            #Temp_Raster = os.path.join(arcpy.env.scratchFolder,'temp_raster')
            # Note! ScratchFolder doesn't seem to work
            #Note Scratch these:
            #Temp_Raster = os.path.join(arcpy.env.scratchWorkspace,'temp_raster')
            Temp_Raster = gp.CreateScratchName('temp_raster', '', 'raster',
                                               gp.scratchworkspace)
            #gp.AddMessage(" RasterLayer=" + RasterLayer);
            gp.AddMessage(" Temp_Raster=" + Temp_Raster)
            gp.AddMessage(" Wts_Table=" + Wts_Table)

            #Delete old temp_raster
            if gp.exists(Temp_Raster):
                arcpy.Delete_management(Temp_Raster)
                gc.collect()
                arcpy.ClearWorkspaceCache_management()

                gp.AddMessage("Deleted tempraster")

            #Copy created and joined raster to temp_raster
            gp.CopyRaster_management(RasterLayer, Temp_Raster, '#', '#',
                                     NoDataArg2)
            gp.AddMessage(" Output_Raster: " + Output_Raster)

            gp.Lookup_sa(Temp_Raster, "WEIGHT", Output_Raster)
            #return;
            #gp.addwarning(gp.getmessages())
            # ISsue 44 fix
            #arcpy.ClearWorkspaceCache_management()
            #arcpy.Delete_management(RasterLayer)

            #++ Optionally you can remove join but not necessary because join is on the layer
            #++ Better to just delete layer
            ##        #++ get name of join from the input table (without extention)
            ##        join = os.path.splitext(os.path.basename(Wts_Table))
            ##        join_name = join[0]
            ##        gp.RemoveJoin_management(RasterLayer,join_name)
            #<==

            #gp.AddMessage(Output_Raster + " exists: " + str(gp.Exists(Output_Raster)))
            if not gp.Exists(Output_Raster):
                gp.AddError(" " + Output_Raster + " does not exist.")
                raise
            #Output_Raster = gp.Describe(Output_Raster).CatalogPath
            Wts_Rasters.append(Output_Raster)
            #Check for Missing Data in raster's Wts table
            if not IgnoreMsgData:
                # Update the list for Missing Data Variance Calculation
                #gp.addMessage("Debug: Wts_Table = " + Wts_Table);
                tblrows = gp.SearchCursor(Wts_Table,
                                          "Class = %s" % MissingDataValue)
                tblrow = tblrows.Next()
                if tblrow:
                    rasterList.append(gp.Describe(Output_Raster).CatalogPath)
            arcpy.AddMessage(" ")  #Cycle done - add ONE linefeed
        #Get Post Logit Raster
        ''' Post Logit Raster '''

        gp.AddMessage("\n" + "Getting Post Logit raster...\n" + "=" * 41)
        # This used to be comma separated, now +
        Input_Data_Str = ' + '.join(
            '"{0}"'.format(w)
            for w in Wts_Rasters)  #must be comma delimited string list
        arcpy.AddMessage(" Input_data_str: " + Input_Data_Str)
        Constant = math.log(Prior_prob / (1.0 - Prior_prob))
        if len(Wts_Rasters) == 1:
            InExpressionPLOG = "%s + %s" % (Constant, Input_Data_Str)
        else:
            InExpressionPLOG = "%s + (%s)" % (Constant, Input_Data_Str)
        #gp.AddMessage("="*41);
        gp.AddMessage(" InexpressionPlog: " + InExpressionPLOG)
        #gp.AddMessage("InExpression = " + str(InExpression))
        ##    PostLogit = os.path.join(gp.Workspace, OutputPrefix + "_PLOG")
        ##    try:
        ##        pass
        ##        gp.SingleOutputMapAlgebra_sa(InExpression, PostLogit)
        ##    except:
        ##        gp.AddError(gp.getMessages(2))
        ##        raise
        ##    else:
        ##        gp.AddWarning(gp.getMessages(1))
        ##        gp.AddMessage(gp.getMessages(0))

        #gp.AddMessage(" Wts_rasters: " + str(Wts_Rasters));

        #Get Post Probability Raster
        #gp.AddMessage("Exists(PostLogit) = " + str(gp.Exists(PostLogit)))

        gp.AddMessage("\nCreating Post Probability Raster...\n" + "=" * 41)
        try:
            #pass
            #PostLogitRL = os.path.join( gp.Workspace, "PostLogitRL")
            #gp.MakeRasterLayer_management(PostLogit,PostLogitRL)
            #InExpression = "EXP(%s) / ( 1.0 + EXP(%s))" %(PostLogitRL,PostLogitRL)
            PostProb = parameters[6].valueAsText  #gp.GetParameterAsText(6)
            ##InExpression = "EXP(%s) / (1.0 + EXP(%s))" %(InExpressionPLOG,InExpressionPLOG)

            #Pre arcgis pro expression
            #InExpression = "%s = EXP(%s) / (1.0 + EXP(%s))" %(PostProb,InExpressionPLOG,InExpressionPLOG)  # <==RDB update to MOMA  07/01/2010
            InExpression = "Exp(%s) / (1.0 + Exp(%s))" % (InExpressionPLOG,
                                                          InExpressionPLOG)
            #gp.AddMessage("InExpression = " + str(InExpression))
            gp.addmessage("InExpression 1 ====> " +
                          InExpression)  # <==RDB  07/01/2010
            # Fix: This is obsolete
            #gp.MultiOutputMapAlgebra_sa(InExpression)  # <==RDB  07/01/2010
            gp.AddMessage("Postprob: " + PostProb)
            output_raster = gp.RasterCalculator(InExpression, PostProb)
            #output_raster.save(postprob)
            #gp.SingleOutputMapAlgebra_sa(InExpression, PostProb)
            #gp.SetParameterAsText(6, PostProb)
        except:
            gp.AddError(gp.getMessages(2))
            raise
        else:
            gp.AddWarning(gp.getMessages(1))
            gp.AddMessage(gp.getMessages(0))
        #gp.AddMessage("Exists(PostProb) = " + str(gp.Exists(PostProb)))

        #Create STD raster from raster's associated weights table
        gp.AddMessage("\nCreating STD rasters...\n" + "=" * 41)
        Std_Rasters = []
        i = 0
        mdidx = 0
        for Input_Raster in Input_Rasters:
            arcpy.AddMessage(" Processing " + Input_Raster)
            #<== RDB
            #++ Needs to be able to extract input raster name from full path.
            #++ Can't assume only a layer from ArcMap.
            ##Output_Raster = Input_Raster[:11] + "_S"
            ##Output_Raster = os.path.basename(Input_Raster)[:11] + "_S"
            stdoutputrastername = os.path.basename(Input_Raster[:9]).replace(
                ".", "_") + "_S"
            # No . allowed in filegeodatgabases

            #arcpy.AddMessage("Debug:" + stdoutputrastername);

            Output_Raster = gp.CreateScratchName(stdoutputrastername, '',
                                                 'raster', gp.scratchworkspace)
            #print ("DEBUG STD1");
            Wts_Table = Wts_Tables[i]

            i += 1
            #Wts_Table = gp.Describe(Wts_Table).CatalogPath
            ##        gp.CreateRaster_sdm(Input_Raster, Wts_Table, "CLASS", "W_STD", Output_Raster, IgnoreMsgData, MissingDataValue)
            gp.AddMessage("OutputRaster:" + Output_Raster + " exists: " +
                          str(gp.Exists(Output_Raster)))

            #<== Updated RDB
            #++ Same as calculate weight rasters above
            #++ Need to create in-memory Raster Layer for Join
            #Check for unsigned integer raster; cannot have negative missing data

            if NoDataArg != '#' and gp.describe(
                    Input_Raster).pixeltype.upper().startswith('U'):
                NoDataArg2 = '#'
            else:
                NoDataArg2 = NoDataArg
            #arcpy.AddMessage("Debug: " + str(NoDataArg));
            RasterLayer = "OutRas_lyr2"
            gp.makerasterlayer(Input_Raster, RasterLayer)
            #++ Input to AddJoin must be a Layer or TableView
            gp.AddJoin_management(RasterLayer, "Value", Wts_Table, "CLASS")
            # Folder doesn't seem to do the trick...
            #Temp_Raster = os.path.join(arcpy.env.scratchFolder,'temp_raster')
            #Temp_Raster = os.path.join(arcpy.env.scratchWorkspace,'temp_raster2')
            Temp_Raster = gp.CreateScratchName('temp_raster', '', 'raster',
                                               gp.scratchworkspace)

            if gp.exists(Temp_Raster):
                arcpy.Delete_management(Temp_Raster)
                gc.collect()
                arcpy.ClearWorkspaceCache_management()
                gp.AddMessage("Tmpraster deleted.")
            gp.AddMessage("RasterLayer=" + RasterLayer)
            gp.AddMessage("Temp_Raster=" + Temp_Raster)

            arcpy.CopyRaster_management(RasterLayer, Temp_Raster, "#", "#",
                                        NoDataArg2)
            #gp.AddMessage("DEBUG STD1");
            gp.Lookup_sa(Temp_Raster, "W_STD", Output_Raster)
            # Issue 44 fix - no delete on temprasters
            #arcpy.Delete_management(RasterLayer)
            #gp.AddMessage("DEBUG STD1");
            #++ Optionally you can remove join but not necessary because join is on the layer
            #++ Better to just delete layer
            ##        #get name of join from the input table (without extenstion)
            ##        join = os.path.splitext(os.path.basename(Wts_Table))
            ##        join_name = join[0]
            ##        gp.RemoveJoin_management(RasterLayer,join_name)
            #<==

            if not gp.Exists(Output_Raster):
                gp.AddError(Output_Raster + " does not exist.")
                raise
            #Output_Raster = gp.Describe(Output_Raster).CatalogPath
            Std_Rasters.append(Output_Raster)
            gp.AddMessage(Output_Raster)  # <==RDB 07/01/2010
        #gp.AddMessage("Created Std_Rasters = " + str(Std_Rasters))

        gp.AddMessage("\nCreating Post Probability STD Raster...\n" + "=" * 41)
        #SQRT(SUM(SQR(kbgeol2_STD), SQR(kjenks_Std), SQR(rclssb2_Std)))
        PostProb_Std = parameters[7].valueAsText  #gp.GetParameterAsText(7)

        #TODO: Figure out what this does!? TR
        #TODO: This is always false now
        if len(Std_Rasters) == 1:  #If there is only one input... ??? TR
            InExpression = '"%s"' % (Std_Rasters[0])
        else:
            SUM_args_list = []
            for Std_Raster in Std_Rasters:
                SUM_args_list.append("Square(\"%s\")" % Std_Raster)
            #SUM_args = ",".join(SUM_args_list)
            SUM_args = " + ".join(SUM_args_list)
            gp.AddMessage("Sum_args: " + SUM_args + "\n" + "=" * 41)

            #Input_Data_Str = ' + '.join('"{0}"'.format(w) for w in Wts_Rasters) #must be comma delimited string list

            Constant = 1.0 / float(numTPs)
            ##InExpression = "SQRT(SQR(%s) * (%s + SUM(%s)))" %(PostProb,Constant,SUM_args)
            #InExpression = "SQRT(SQR(%s) * (%s + SUM(%s)))" %(PostProb,Constant,SUM_args)  # PRe ARcGis pro
            InExpression = "SquareRoot(Square(\"%s\") * (%s +(%s)))" % (
                PostProb, Constant, SUM_args)
            gp.AddMessage("InExpression = " + str(InExpression))
        #SQRT(SUM(SQR(rclssb2_md_S),SQR(kbgeol2_md_S)))
        try:
            gp.addmessage("InExpression 2 ====> " + InExpression)  # <==RDB
            #gp.MultiOutputMapAlgebra_sa(InExpression)   # <==RDB  07/01/2010
            output_raster = gp.RasterCalculator(InExpression, PostProb_Std)
            #gp.SingleOutputMapAlgebra_sa(InExpression, PostProb_Std)
            #gp.SetParameterAsText(7,PostProb_Std)
        except:
            gp.AddError(gp.getMessages(2))
            raise
        else:
            gp.AddWarning(gp.getMessages(1))
            gp.AddMessage(gp.getMessages(0))
        #gp.AddMessage("Exists(PostProb_Std) = " + str(gp.Exists(PostProb_Std)))

        #Create Variance of missing data here and create totVar = VarMD + SQR(VarWts)
        if not IgnoreMsgData:
            #Calculate Missing Data Variance
            #gp.AddMessage("RowCount=%i"%len(rasterList))
            if len(rasterList) > 0:
                import missingdatavar_func
                gp.AddMessage("Calculating Missing Data Variance...")
                ##            MDRasters=[]
                ##            for i in range(len(rasterList)):
                ##                MDRasters.append(str(rasterList[i]))
                MDRasters = rasterList
                #gp.AddMessage("MissingDataRasters = " + str(MDRasters))
                try:
                    MDVariance = parameters[
                        8].valueAsText  #gp.GetParameterAsText(8)
                    if gp.exists(MDVariance):
                        arcpy.Delete_management(MDVariance)
                    #<== Tool DOES NOT EXIST = FAIL
                    #gp.MissingDataVariance_sdm(rasterList,PostProb,MDVariance)
                    missingdatavar_func.MissingDataVariance(
                        gp, rasterList, PostProb, MDVariance)
                    Total_Std = parameters[
                        9].valueAsText  #gp.GetParameterAsText(9)
                    ##InExpression = 'SQRT(SUM(SQR(%s),%s))' % (PostProb_Std, MDVariance)
                    # OBsolete, replaced with raster calc
                    #InExpression = "\"%s\" = SQRT(SUM(SQR(\"%s\"),\"%s\"))" % (Total_Std, PostProb_Std, MDVariance)  # <==RDB update to MOMA
                    #InExpression = "SquareRoot(SUM ( Square (\"%s\"),\"%s\"))" % ( PostProb_Std, MDVariance)  # <==RDB update to MOMA
                    InExpression = "SquareRoot( Square (\"%s\") + \"%s\")" % (
                        PostProb_Std, MDVariance)  # <==RDB update to MOMA
                    #gp.SetParameterAsText(8,MDVariance)
                    #gp.AddMessage(InExpression)
                    gp.AddMessage("Calculating Total STD...")
                    gp.addmessage("InExpression 3 ====> " +
                                  InExpression)  # <==RDB
                    #gp.MultiOutputMapAlgebra_sa(InExpression)  # <==RDB
                    output_raster = gp.RasterCalculator(
                        InExpression, Total_Std)
                    #gp.SingleOutputMapAlgebra_sa(InExpression, Total_Std)
                    #gp.SetParameterAsText(9,Total_Std)
                except:
                    gp.AddError(gp.getMessages(2))
                    raise
            else:
                gp.AddWarning(
                    "No evidence with missing data. Missing Data Variance not calculated."
                )
                MDVariance = None
                Total_Std = PostProb_Std
        else:
            gp.AddWarning(
                "Missing Data Ignored. Missing Data Variance not calculated.")
            MDVariance = None
            Total_Std = PostProb_Std
        #Confidence is PP / sqrt(totVar)
        gp.AddMessage("\nCalculating Confidence...\n" + "=" * 41)
        #PostProb1 / PP_Std
        ##    PostProbRL = os.path.join( gp.Workspace, "PostProbRL")
        ##    gp.MakeRasterLayer_management(PostProb,PostProbRL)
        PostProbRL = gp.describe(PostProb).catalogpath
        ##    PostProb_StdRL = os.path.join( gp.Workspace, "PostProb_StdRL")
        ##    gp.MakeRasterLayer_management(Total_Std, PostProb_StdRL)
        PostProb_StdRL = gp.describe(Total_Std).catalogpath
        Confidence = parameters[10].valueAsText  #gp.GetParameterAsText(10)
        #InExpression = PostProbRL + " / " + PostProb_StdRL
        #InExpression = "%s = %s / %s" %(Confidence,PostProbRL,PostProb_StdRL)  # PreARcGis pro
        InExpression = '"%s" / "%s"' % (PostProbRL, PostProb_StdRL
                                        )  # <==RDB update to MOMA
        #gp.AddMessage("InExpression = " + str(InExpression))
        gp.addmessage("InExpression 4====> " + InExpression)  # <==RDB
        try:
            #gp.MultiOutputMapAlgebra_sa(InExpression)  # <==RDB
            output_raster = arcpy.gp.RasterCalculator_sa(
                InExpression, Confidence)
            #gp.SingleOutputMapAlgebra_sa(InExpression, Confidence)
            #gp.SetParameterAsText(10,Confidence)
        except:
            gp.AddError(gp.getMessages(2))
            raise
        else:
            gp.AddWarning(gp.getMessages(1))
            gp.AddMessage(gp.getMessages(0))
        #Set derived output parameters
        gp.SetParameterAsText(6, PostProb)
        gp.SetParameterAsText(7, PostProb_Std)
        if MDVariance and (not IgnoreMsgData):
            gp.SetParameterAsText(8, MDVariance)
        else:
            gp.AddWarning('No Missing Data Variance.')
        if not (Total_Std == PostProb_Std): gp.SetParameterAsText(9, Total_Std)
        else: gp.AddWarning('Total STD same as Post Probability STD.')
        gp.SetParameterAsText(10, Confidence)

        gp.addmessage("done\n" + "=" * 41)
    except arcpy.ExecuteError as e:
        #TODO: Clean up all these execute errors in final version
        arcpy.AddError("\n")
        arcpy.AddMessage("Calculate weights caught arcpy.ExecuteError ")
        gp.AddError(arcpy.GetMessages())
        if len(e.args) > 0:
            #arcpy.AddMessage("Calculate weights caught arcpy.ExecuteError: ");
            args = e.args[0]
            args.split('\n')
            #arcpy.AddError(args);

        arcpy.AddMessage("-------------- END EXECUTION ---------------")
        raise
    except:
        # get the traceback object
        tb = sys.exc_info()[2]
        # tbinfo contains the line number that the code failed on and the code from that line
        tbinfo = traceback.format_tb(tb)[0]
        # concatenate information together concerning the error into a message string
        pymsg = "PYTHON ERRORS:\nTraceback Info:\n" + tbinfo + "\nError Info:\n    " + \
                str(sys.exc_type)+ ": " + str(sys.exc_value) + "\n"
        # generate a message string for any geoprocessing tool errors
        msgs = "GP ERRORS:\n" + arcpy.GetMessages(2) + "\n"
        arcpy.AddError(msgs)

        # return gp messages for use with a script tool
        arcpy.AddError(pymsg)

        # print messages for use in Python/PythonWin
        print(pymsg)
        print(msgs)

        raise
Exemplo n.º 56
0
import arcpy
import arcgisscripting
from arcpy.sa import *
import timeit

# Checkout the Spatial Analyst Toolkit
arcpy.CheckOutExtension('Spatial')

# from p281 of python scripting for arcgis
script = arcgisscripting.create(9.3)

start = timeit.default_timer()


def main(ws_path, dem_path, extent_path, clip_dem='clipDEM', cost_raster=''):
    """
    Calculate water depth from a flood extent polygon (e.g. from remote sensing analysis) based on
    an underlying DEM (or HAND).

    Program procedure:
        1. Flood extent polygon to polyline
        2. Polyline to Raster - DEM extent and resolution (Env)
        3. Con - DEM values to Raster
        4. Euclidean Allocation - assign boundary cell elevation to nearest domain cells
        5. Calculate water depth by deducting DEM by Euclidean Allocation
        6. Run low-pass Filter

    Created by Sagy Cohen and Austin Raney, Surface Dynamics Modeling Lab, University of Alabama
    email: [email protected]; [email protected]
    web: http://sdml.ua.edu
    """
#This is the output location and name of the seep frequency raster.
Reclass_Output= arcpy.GetParameterAsText(1)
Kmz_Name= arcpy.GetParameterAsText(2)
Kmz_Var= folder_path + "/" + str(Kmz_Name) + ".kmz"


#If the Reclass_Output name is more than 8 characters, the tool won't run and will return a confusing error message that does not explain the problem. This takes care of that.
(path,file_name)=os.path.split(Reclass_Output)
if len(file_name)>=9:
    arcpy.AddError(" ")
    arcpy.AddError(" ")
    arcpy.AddError("The 'Output' name you have entered is too long. Please rerun the tool with a name 8 characters or less.")

#Workspace syntax
arcpy.env.workspace= folder_path
gp= arcgisscripting.create(10.1)
gp.workspace= folder_path

#This is an iteration counter used by the if statements below.
x=0

#This list is used to store the most recent output of the raster addition function and recall it to be used in the next iteration.
main_list= list()

#Creates a list of GRIDs in the workspace.
Rasters = gp.ListRasters("", "GRID")

#For loop searching through the total list of rasters.
for raster in Rasters:
    if len(raster)==13:
    #Creates a temporary list to store the most recent selected raster file from the folder. 
Exemplo n.º 58
0
def loadEcoregion( eco ):
    try:
        gp = arcgisscripting.create(9.3)    
        gp.OverwriteOutput = True
        trlog = TrendsUtilities.trLogger()
        dataAvailable = True
        if eco.analysisNum == TrendsNames.TrendsNum:
            sourceName = "TrendsChangeData"
        else:
            sourceName = "CustomChangeData"
        nointervals = []
        dbReader = databaseReadClass.changeRead( gp, TrendsNames.dbLocation + sourceName)
        for interval in eco.ecoData:
            dataFound = dbReader.databaseRead( gp, eco.analysisNum, eco, interval,
                                                   eco.resolution, eco.ecoData[interval][0],
                                                   "data", TrendsNames.statisticsNames)
            if not dataFound:
                nointervals.append( interval )
                dataAvailable = False
                trlog.trwrite("No data found for ecoregion " + str(eco.ecoNum) + " and interval " +\
                          str(interval))

        #Now remove intervals where no data was found
        for interval in nointervals:
            trlog.trwrite("Removing interval " + interval + " for ecoregion " + str(eco.ecoNum))
            del eco.ecoData[ interval ]
        #If some but not all intervals removed, reset 'datafound' flag
        if len(eco.ecoData) > 0:
            dataAvailable = True

        if eco.analysisNum == TrendsNames.TrendsNum:
            sourceName = "TrendsChangeStats"
        else:
            sourceName = "CustomChangeStats"
        dbReader = databaseReadClass.changeRead( gp, TrendsNames.dbLocation + sourceName)
        for interval in eco.ecoData:
            dbReader.databaseRead( gp, eco.analysisNum, eco, interval,
                                        eco.resolution, eco.ecoData[interval][1],
                                       "stats", TrendsNames.statisticsNames)
                
        #Now update the arrays for composition based on the intervals found
        folderList = eco.ecoData.keys()            
        currentyears = TrendsUtilities.getYearsFromIntervals( gp, folderList )
        compyears = eco.ecoComp.keys()
        for date in compyears:
            if not (date in currentyears):
                del eco.ecoComp[ date ]

        if eco.analysisNum == TrendsNames.TrendsNum:
            sourceName = "TrendsCompData"
        else:
            sourceName = "CustomCompData"
        dbReader = databaseReadClass.compositionRead( gp, TrendsNames.dbLocation + sourceName)
        for year in eco.ecoComp:
            dbReader.databaseRead( gp, eco.analysisNum, eco, year, eco.resolution,
                                             eco.ecoComp[year][0], "data", "")
                
        if eco.analysisNum == TrendsNames.TrendsNum:
            sourceName = "TrendsCompStats"
        else:
            sourceName = "CustomCompStats"
        dbReader = databaseReadClass.compositionRead( gp, TrendsNames.dbLocation + sourceName)
        for year in eco.ecoComp:
            dbReader.databaseRead( gp, eco.analysisNum, eco, year, eco.resolution,
                                             eco.ecoComp[year][1], "stats", TrendsNames.statisticsNames)

        #Load multichange data where found.
        nointervals = []
        if eco.analysisNum == TrendsNames.TrendsNum:
            sourceName = "TrendsMultichangeData"
        else:
            sourceName = "CustomMultichangeData"
        dbReader = databaseReadClass.multichangeRead( gp, TrendsNames.dbLocation + sourceName)
        for interval in eco.ecoMulti:
            dataFound = dbReader.databaseRead( gp, eco.analysisNum, eco, interval, eco.resolution,
                                       eco.ecoMulti[interval][0],"data", "")
            if not dataFound:
                nointervals.append( interval )
                trlog.trwrite("No multichange data found for ecoregion " + str(eco.ecoNum) + " and interval " +\
                          str(interval))

        #Now remove intervals where no data was found
        for interval in nointervals:
            trlog.trwrite("Removing multichange interval " + interval + " for ecoregion " + str(eco.ecoNum))
            del eco.ecoMulti[ interval ]

        #Remove aggregate intervals if this eco isn't within the data boundaries
        keylist = eco.aggregate_gross_keys
        compyears = eco.ecoComp.keys()
        for interval in keylist:
            ptr = eco.aggregate_gross_keys.index(interval)
            splitInfo = interval.split('to')
            start = splitInfo[0]
            end = splitInfo[1]
            if not (start in compyears) or not (end in compyears):
                del eco.aggregate_gross_keys[ptr]

        if eco.analysisNum == TrendsNames.TrendsNum:
            sourceName = "TrendsMultichangeStats"
        else:
            sourceName = "CustomMultichangeStats"
        dbReader = databaseReadClass.multichangeRead( gp, TrendsNames.dbLocation + sourceName)
        for interval in eco.ecoMulti:
            dbReader.databaseRead( gp, eco.analysisNum, eco, interval, eco.resolution,
                                   eco.ecoMulti[interval][1],"stats", TrendsNames.statisticsNames)

        #Load the calculated data for glgn, all change and aggregate
        setUpArrays( eco )
                
        if eco.analysisNum == TrendsNames.TrendsNum:
            sourceName = "TrendsGlgnData"
        else:
            sourceName = "CustomGlgnData"
        dbReader = databaseReadClass.glgnRead( gp, TrendsNames.dbLocation + sourceName)
        for interval in eco.ecoGlgn:
            dbReader.databaseRead( gp, eco.analysisNum, eco, interval, eco.resolution,
                                             eco.ecoGlgn[interval],"data", "")
                
        if eco.analysisNum == TrendsNames.TrendsNum:
            sourceName = "TrendsGlgnStats"
        else:
            sourceName = "CustomGlgnStats"
        dbReader = databaseReadClass.glgnRead( gp, TrendsNames.dbLocation + sourceName)
        for interval in eco.ecoGlgn:
            dbReader.databaseRead( gp, eco.analysisNum, eco, interval, eco.resolution,
                                             eco.ecoGlgn[interval],
                                       "stats", TrendsNames.statisticsNames)

        source = 'gross'
        if eco.analysisNum == TrendsNames.TrendsNum:
            sourceName = "TrendsAggregateData"
        else:
            sourceName = "CustomAggregateData"
        dbReader = databaseReadClass.aggregateRead( gp, TrendsNames.dbLocation + sourceName)
        for interval in eco.aggregate_gross_keys:
            dbReader.databaseRead( gp, eco.analysisNum, eco, interval, eco.resolution,source,
                                             eco.aggregate[interval][source][0],"data", "")
                    
        if eco.analysisNum == TrendsNames.TrendsNum:
            sourceName = "TrendsAggregateStats"
        else:
            sourceName = "CustomAggregateStats"
        dbReader = databaseReadClass.aggregateRead( gp, TrendsNames.dbLocation + sourceName)
        for interval in eco.aggregate_gross_keys:
            dbReader.databaseRead( gp, eco.analysisNum, eco, interval, eco.resolution, source,
                                             eco.aggregate[interval][source][1],
                                           "stats", TrendsNames.statisticsNames)

        if eco.analysisNum == TrendsNames.TrendsNum:
            sourceName = "TrendsAggGlgnData"
        else:
            sourceName = "CustomAggGlgnData"
        dbReader = databaseReadClass.aggGlgnRead( gp, TrendsNames.dbLocation + sourceName)
        for interval in eco.aggregate_gross_keys:
            dbReader.databaseRead( gp, eco.analysisNum, eco, interval, eco.resolution,source,
                                             eco.aggGlgn[interval][source],
                                       "data", "")
                    
        if eco.analysisNum == TrendsNames.TrendsNum:
            sourceName = "TrendsAggGlgnStats"
        else:
            sourceName = "CustomAggGlgnStats"
        dbReader = databaseReadClass.aggGlgnRead( gp, TrendsNames.dbLocation + sourceName)
        for interval in eco.aggregate_gross_keys:
            dbReader.databaseRead( gp, eco.analysisNum, eco, interval, eco.resolution, source,
                                             eco.aggGlgn[interval][source],
                                       "stats", TrendsNames.statisticsNames)

        sourceC = 'conversion'
        sourceA = 'addgross'
        sourceM = 'multichange'
        if eco.analysisNum == TrendsNames.TrendsNum:
            sourceName = "TrendsAllChangeData"
        else:
            sourceName = "CustomAllChangeData"
        dbReader = databaseReadClass.allChangeRead( gp, TrendsNames.dbLocation + sourceName)
        for interval in eco.ecoData:
            dbReader.databaseRead( gp, eco.analysisNum, eco, interval, eco.resolution,
                                       sourceC,
                                        eco.allChange[interval][sourceC][0],
                                       "data", "")
        for interval in eco.aggregate_gross_keys:
            dbReader.databaseRead( gp, eco.analysisNum, eco, interval, eco.resolution,
                                       sourceA,
                                        eco.allChange[interval][sourceA][0],
                                       "data", "")
        for interval in eco.ecoMulti:
            dbReader.databaseRead( gp, eco.analysisNum, eco, interval, eco.resolution,
                                   sourceM,
                                    eco.allChange[interval][sourceM][0],
                                   "data", "")
                    
        if eco.analysisNum == TrendsNames.TrendsNum:
            sourceName = "TrendsAllChangeStats"
        else:
            sourceName = "CustomAllChangeStats"
        dbReader = databaseReadClass.allChangeRead( gp, TrendsNames.dbLocation + sourceName)
        for interval in eco.ecoData:
            dbReader.databaseRead( gp, eco.analysisNum, eco, interval, eco.resolution,
                                       sourceC, eco.allChange[interval][sourceC][1],
                                       "stats", TrendsNames.statisticsNames)
        for interval in eco.aggregate_gross_keys:
            dbReader.databaseRead( gp, eco.analysisNum, eco, interval, eco.resolution,
                                       sourceA, eco.allChange[interval][sourceA][1],
                                       "stats", TrendsNames.statisticsNames)
        for interval in eco.ecoMulti:
            dbReader.databaseRead( gp, eco.analysisNum, eco, interval, eco.resolution,
                                    sourceM, eco.allChange[interval][sourceM][1],
                                   "stats", TrendsNames.statisticsNames)
        return dataAvailable
    except arcgisscripting.ExecuteError:
        # Get the geoprocessing error messages
        msgs = gp.GetMessage(0)
        msgs += gp.GetMessages(2)
        trlog.trwrite(msgs)
        raise            
    except TrendsUtilities.TrendsErrors, Terr:
        #Get errors specific to Trends execution
        trlog.trwrite( Terr.message )
        raise
Exemplo n.º 59
0
# ---------------------------------------------------------------------------------
# Author: Carlos Navarro
# Date: June 13th, 2011
# Purpose: Extraction by mask PRECIS surfaces
# ----------------------------------------------------------------------------------

import arcgisscripting, os, sys, string
gp = arcgisscripting.create(9.3)

#Syntax
if len(sys.argv) < 4:
	os.system('cls')
	print "\n Too few args"
	print "   - ie: python Extract_Mask.py L:\climate_change\RCM_Data\SRES_A1B D:\Masks\ColPlains\ColPlains.shp D:\climate_change\RCM_Data\_extract_ColPlains\SRES_A1B"
	print "   Syntax	: <Extract_MaskGCM.py>, <dirbase>, <mask>, <dirout>"
	print "   dirbase	: Root folder where are storaged the datasets"
	print "	  mask		: shape with full path and extension"
	print "   dirout	: Out folder"
	sys.exit(1)

#Set variables
dirbase = sys.argv[1]
mask = sys.argv[2]
dirout = sys.argv[3]

gp.CheckOutExtension("Spatial")
os.system('cls')

print "~~~~~~~~~~~~~~~~~~~~~~~~"
print " EXTRACT BY MASK PRECIS "
print "~~~~~~~~~~~~~~~~~~~~~~~~"
Exemplo n.º 60
0
def slope_calc(batch_point, workspace, drain, epsg, dem, uttl):

    gp = arcgisscripting.create()
    gp.CheckOutExtension("Spatial")
    gp.SetProgressor('default', 'starting vertex extraction...')
    arcpy.env.overwriteOutput = True

    folder = os.path.dirname(workspace)
    UTTL_Basins = uttl.split('/')[-1]

    arcpy.SplitLineAtPoint_management(
        os.path.join(workspace, 'SmoothDrain3D'), batch_point,
        os.path.join(folder, r'Temp/SmoothDrain3DSplit.shp'))

    arcpy.AddField_management(
        os.path.join(folder, r'Temp/SmoothDrain3DSplit.shp'), 'Slope', 'FLOAT',
        6, 4, "", 'Slope', 'NULLABLE', 'REQUIRED')

    arcpy.AddGeometryAttributes_management(
        os.path.join(folder, r'Temp/SmoothDrain3DSplit.shp'), 'LENGTH')
    arcpy.AddGeometryAttributes_management(
        os.path.join(folder, r'Temp/SmoothDrain3DSplit.shp'),
        'LINE_START_MID_END')
    arcpy.CalculateField_management(
        os.path.join(folder, r'Temp/SmoothDrain3DSplit.shp'), 'Slope',
        '( !START_Z! - !END_Z! ) / !LENGTH!', 'PYTHON', '#')

    keep_field = ['FID', 'Shape', 'LENGTH', 'START_Z', 'END_Z', 'Slope']
    fields = [
        x.name for x in arcpy.ListFields(
            os.path.join(folder, r'Temp/SmoothDrain3DSplit.shp'))
    ]

    arcpy.DeleteField_management(
        os.path.join(folder, r'Temp/SmoothDrain3DSplit.shp'),
        [x for x in fields if x not in keep_field])

    arcpy.MakeFeatureLayer_management(os.path.join(workspace, UTTL_Basins),
                                      'UTTL_Basins')
    arcpy.MakeFeatureLayer_management(
        os.path.join(folder, r'Temp/SmoothDrain3DSplit.shp'),
        'SmoothDrain3DSplit')

    arcpy.Intersect_analysis(in_features='UTTL_Basins #;SmoothDrain3DSplit #',
                             out_feature_class=os.path.join(
                                 folder,
                                 r'Temp/SmoothDrain3DSplitIntersect.shp'),
                             join_attributes='NO_FID',
                             cluster_tolerance='-1 Unknown',
                             output_type='INPUT')

    arcpy.Dissolve_management(
        in_features=os.path.join(folder,
                                 r'Temp/SmoothDrain3DSplitIntersect.shp'),
        out_feature_class=os.path.join(folder, r'Temp/SD3DSIDissolve.shp'),
        dissolve_field='Name',
        statistics_fields=
        'LENGTH MAX;Shape_area MAX;Slope MAX;START_Z MAX;END_Z MIN',
        multi_part='MULTI_PART',
        unsplit_lines='DISSOLVE_LINES')

    arcpy.CopyFeatures_management(
        os.path.join(folder, r'Temp/SD3DSIDissolve.shp'),
        os.path.join(workspace, r'Drain_UTTL'))

    table2csv(os.path.join(folder, r'Temp/SD3DSIDissolve.shp'),
              os.path.join(folder, r'Temp/TableSlope.csv'))
    df_slope = pd.read_csv(os.path.join(folder, r'Temp/TableSlope.csv'),
                           index_col='Name')
    df_slope.drop(['FID', 'Shape', 'MAX_Shape_'], axis=1, inplace=True)
    df_slope.columns = ['Length', 'Slope', 'Start Z', 'End Z']
    df_slope.index.name = 'Name'
    df_slope.to_csv(os.path.join(folder, r'temp/TableSlope.csv'), header=True)

    # Deleting Temporally Files
    arcpy.Delete_management(os.path.join(folder, r'Temp/SD3DSIDissolve.shp'))
    arcpy.Delete_management(
        os.path.join(folder, r'Temp/SmoothDrain3DSplit.shp'))
    arcpy.Delete_management(
        os.path.join(folder, r'Temp/SmoothDrain3DSplitIntersect.shp'))
    arcpy.Delete_management(os.path.join(workspace, 'Drain3D'))
    arcpy.Delete_management(os.path.join(workspace, 'SmoothDrain3D_UTM'))

    gp.AddMessage('Slope Algorithm was successful')