def STEP7_calc_centrality(): """ Analyze network centrality using Circuitscape given Linkage Mapper outputs """ try: lu.dashline(0) gprint('Running script ' + _SCRIPT_NAME) arcpy.env.workspace = cfg.SCRATCHDIR # Check for valid LCP shapefile prevLcpShapefile = lu.get_lcp_shapefile(None, thisStep=7) if not arcpy.Exists(prevLcpShapefile): msg = ('Cannot find an LCP shapefile from step 5. Please ' 'rerun that step and any previous ones if necessary.') lu.raise_error(msg) # Remove lcp shapefile from this step if run previously lcpShapefile = path.join(cfg.DATAPASSDIR, "lcpLines_s7.shp") lu.delete_data(lcpShapefile) invalidFNs = ['fid', 'id', 'oid', 'shape'] if cfg.COREFN.lower() in invalidFNs: #if cfg.COREFN == 'FID' or cfg.COREFN == 'ID': lu.dashline(1) msg = ('ERROR: Core area field names ID, FID, SHAPE, and OID are' ' reserved for ArcGIS. \nPlease choose another field- must' ' be a positive integer.') lu.raise_error(msg) lu.dashline(1) gprint('Mapping centrality of network cores and links' '\nusing Circuitscape....') lu.dashline(0) # set the analysis extent and cell size to that of the resistance # surface coreCopy = path.join(cfg.SCRATCHDIR, 'cores.shp') arcpy.CopyFeatures_management(cfg.COREFC, coreCopy) if not arcpy.ListFields(coreCopy, "CF_Central"): arcpy.AddField_management(coreCopy, "CF_Central", "DOUBLE") inLinkTableFile = lu.get_prev_step_link_table(step=7) linkTable = lu.load_link_table(inLinkTableFile) numLinks = linkTable.shape[0] numCorridorLinks = lu.report_links(linkTable) if numCorridorLinks == 0: lu.dashline(1) msg = ('\nThere are no linkages. Bailing.') lu.raise_error(msg) if linkTable.shape[1] < 16: # If linktable has no entries from prior # centrality or pinchpint analyses extraCols = npy.zeros((numLinks, 6), dtype=npy.float64) linkTable = linkTable[:, 0:10] linkTable = npy.append(linkTable, extraCols, axis=1) linkTable[:, cfg.LTB_LCPLEN] = -1 linkTable[:, cfg.LTB_CWDEUCR] = -1 linkTable[:, cfg.LTB_CWDPATHR] = -1 linkTable[:, cfg.LTB_EFFRESIST] = -1 linkTable[:, cfg.LTB_CWDTORR] = -1 del extraCols linkTable[:, cfg.LTB_CURRENT] = -1 coresToProcess = npy.unique(linkTable[:, cfg.LTB_CORE1:cfg.LTB_CORE2 + 1]) maxCoreNum = max(coresToProcess) del coresToProcess lu.dashline(0) coreList = linkTable[:, cfg.LTB_CORE1:cfg.LTB_CORE2 + 1] coreList = npy.sort(coreList) # set up directory for centrality INCENTRALITYDIR = cfg.CENTRALITYBASEDIR OUTCENTRALITYDIR = path.join(cfg.CENTRALITYBASEDIR, cfg.CIRCUITOUTPUTDIR_NM) CONFIGDIR = path.join(INCENTRALITYDIR, cfg.CIRCUITCONFIGDIR_NM) # Set Circuitscape options and write config file options = lu.set_cs_options() options['data_type'] = 'network' options['habitat_file'] = path.join(INCENTRALITYDIR, 'Circuitscape_graph.txt') # Setting point file equal to graph to do all pairs in Circuitscape options['point_file'] = path.join(INCENTRALITYDIR, 'Circuitscape_graph.txt') outputFN = 'Circuitscape_network.out' options['output_file'] = path.join(OUTCENTRALITYDIR, outputFN) configFN = 'Circuitscape_network.ini' outConfigFile = path.join(CONFIGDIR, configFN) lu.write_cs_cfg_file(outConfigFile, options) delRows = npy.asarray(npy.where(linkTable[:, cfg.LTB_LINKTYPE] < 1)) delRowsVector = npy.zeros((delRows.shape[1]), dtype="int32") delRowsVector[:] = delRows[0, :] LT = lu.delete_row(linkTable, delRowsVector) del delRows del delRowsVector graphList = npy.zeros((LT.shape[0], 3), dtype=npy.float64) graphList[:, 0] = LT[:, cfg.LTB_CORE1] graphList[:, 1] = LT[:, cfg.LTB_CORE2] graphList[:, 2] = LT[:, cfg.LTB_CWDIST] write_graph(options['habitat_file'], graphList) gprint('\nCalculating current flow centrality using Circuitscape...') memFlag = lu.call_circuitscape(cfg.CSPATH, outConfigFile) outputFN = 'Circuitscape_network_branch_currents_cum.txt' currentList = path.join(OUTCENTRALITYDIR, outputFN) if not arcpy.Exists(currentList): write_graph(options['habitat_file'], graphList) gprint('\nCalculating current flow centrality using Circuitscape ' '(2nd try)...') memFlag = lu.call_circuitscape(cfg.CSPATH, outConfigFile) if not arcpy.Exists(currentList): lu.dashline(1) msg = ('ERROR: No Circuitscape output found.\n' 'It looks like Circuitscape failed.') arcpy.AddError(msg) lu.write_log(msg) exit(1) currents = load_graph(currentList, graphType='graph/network', datatype=npy.float64) numLinks = currents.shape[0] for x in range(0, numLinks): corex = currents[x, 0] corey = currents[x, 1] #linkId = LT[x,cfg.LTB_LINKID] row = lu.get_links_from_core_pairs(linkTable, corex, corey) #row = lu.get_linktable_row(linkId, linkTable) linkTable[row, cfg.LTB_CURRENT] = currents[x, 2] coreCurrentFN = 'Circuitscape_network_node_currents_cum.txt' nodeCurrentList = path.join(OUTCENTRALITYDIR, coreCurrentFN) nodeCurrents = load_graph(nodeCurrentList, graphType='graph/network', datatype=npy.float64) numNodeCurrents = nodeCurrents.shape[0] rows = arcpy.UpdateCursor(coreCopy) row = rows.newRow() for row in rows: coreID = row.getValue(cfg.COREFN) for i in range(0, numNodeCurrents): if coreID == nodeCurrents[i, 0]: row.setValue("CF_Central", nodeCurrents[i, 1]) break rows.updateRow(row) #row = rows.newRow() del row, rows gprint('Done with centrality calculations.') finalLinkTable = lu.update_lcp_shapefile(linkTable, lastStep=5, thisStep=7) linkTableFile = path.join(cfg.DATAPASSDIR, "linkTable_s5_plus.csv") lu.write_link_table(finalLinkTable, linkTableFile, inLinkTableFile) linkTableFinalFile = path.join(cfg.OUTPUTDIR, cfg.PREFIX + "_linkTable_s5_plus.csv") lu.write_link_table(finalLinkTable, linkTableFinalFile, inLinkTableFile) gprint('Copy of final linkTable written to ' + linkTableFinalFile) finalCoreFile = path.join(cfg.CORECENTRALITYGDB, cfg.PREFIX + '_Cores') #copy core area map to gdb. if not arcpy.Exists(cfg.CORECENTRALITYGDB): arcpy.CreateFileGDB_management( cfg.OUTPUTDIR, path.basename(cfg.CORECENTRALITYGDB)) arcpy.CopyFeatures_management(coreCopy, finalCoreFile) gprint('Creating shapefiles with linework for links.') lu.write_link_maps(linkTableFinalFile, step=7) # Copy final link maps to gdb and clean up. lu.copy_final_link_maps(step=7) # Return GEOPROCESSING specific errors except arcpy.ExecuteError: lu.dashline(1) gprint('****Failed in step 7. Details follow.****') lu.exit_with_geoproc_error(_SCRIPT_NAME) # Return any PYTHON or system specific errors except Exception: lu.dashline(1) gprint('****Failed in step 7. Details follow.****') lu.exit_with_python_error(_SCRIPT_NAME) return
def STEP3_calc_cwds(): """Calculates cost-weighted distances from each core area. Uses bounding circles around source and target cores to limit extent of cwd calculations and speed computation. """ try: lu.dashline(1) gprint('Running script ' + _SCRIPT_NAME) lu.dashline(0) # Super secret setting to re-start failed run. Enter 'RESTART' as the # Name of the pairwise distance table in step 2, and uncheck step 2. # We can eventually place this in a .ini file. rerun = False if cfg.S2EUCDISTFILE != None: if cfg.S2EUCDISTFILE.lower() == "restart": rerun = True # if cfg.TMAXCWDIST is None: # gprint('NOT using a maximum cost-weighted distance.') # else: # gprint('Max cost-weighted distance for CWD calcs set ' # 'to ' + str(cfg.TMAXCWDIST) + '\n') if (cfg.BUFFERDIST) is not None: gprint('Bounding circles plus a buffer of ' + str(float(cfg.BUFFERDIST)) + ' map units will ' 'be used \n to limit extent of cost distance ' 'calculations.') elif cfg.TOOL <> cfg.TOOL_CC: gprint('NOT using bounding circles in cost distance ' 'calculations.') # set the analysis extent and cell size # So we don't extract rasters that go beyond extent of original raster if arcpy: arcpy.env.cellSize = cfg.RESRAST arcpy.env.extent="MINOF" else: gp.cellSize = gp.Describe(cfg.RESRAST).MeanCellHeight gp.Extent = "MINOF" gp.mask = cfg.RESRAST if arcpy: arcpy.env.overwriteOutput = True arcpy.env.workspace = cfg.SCRATCHDIR arcpy.env.scratchWorkspace = cfg.ARCSCRATCHDIR else: gp.OverwriteOutput = True gp.workspace = cfg.SCRATCHDIR gp.scratchWorkspace = cfg.ARCSCRATCHDIR # Load linkTable (created in previous script) linkTableFile = lu.get_prev_step_link_table(step=3) linkTable = lu.load_link_table(linkTableFile) lu.report_links(linkTable) # Identify cores to map from LinkTable coresToMap = npy.unique(linkTable[:, cfg.LTB_CORE1:cfg.LTB_CORE2 + 1]) numCoresToMap = len(coresToMap) if numCoresToMap < 3: # No need to check for intermediate cores, because there aren't any cfg.S3DROPLCCSic = False else: cfg.S3DROPLCCSic = cfg.S3DROPLCCS gprint('\nNumber of core areas to connect: ' + str(numCoresToMap)) if rerun: # If picking up a failed run, make sure needed files are there lu.dashline(1) gprint ('\n****** RESTART MODE ENABLED ******\n') gprint ('**** NOTE: This mode picks up step 3 where a\n' 'previous run left off due to a crash or user\n' 'abort. It assumes you are using the same input\n' 'data used in the terminated run.\n\n') lu.warn('IMPORTANT: Your LCP and stick feature classes\n' 'will LOSE LCPs that were already created, but\n' 'your final raster corridor map should be complete.\n') lu.dashline(0) lu.snooze(10) savedLinkTableFile = path.join(cfg.DATAPASSDIR, "temp_linkTable_s3_partial.csv") coreListFile = path.join(cfg.DATAPASSDIR, "temp_cores_to_map.csv") if not path.exists(savedLinkTableFile) or not path.exists( coreListFile): gprint('No partial results file found from previous ' 'stopped run. Starting run from beginning.\n') lu.dashline(0) rerun = False # If picking up a failed run, use old folders if not rerun: startIndex = 0 if cfg.TOOL <> cfg.TOOL_CC: lu.make_cwd_paths(max(coresToMap)) # Set up cwd directories # make a feature layer for input cores to select from gp.MakeFeatureLayer(cfg.COREFC, cfg.FCORES) # Drop links that are too long gprint('\nChecking for corridors that are too long to map.') DISABLE_LEAST_COST_NO_VAL = False linkTable,numDroppedLinks = lu.drop_links(linkTable, cfg.MAXEUCDIST, 0, cfg.MAXCOSTDIST, 0, DISABLE_LEAST_COST_NO_VAL) # ------------------------------------------------------------------ # Bounding boxes if (cfg.BUFFERDIST) is not None: # create bounding boxes around cores start_time = time.clock() # lu.dashline(1) gprint('Calculating bounding boxes for core areas.') extentBoxList = npy.zeros((0,5), dtype='float32') for x in range(len(coresToMap)): core = coresToMap[x] boxCoords = lu.get_extent_box_coords(core) extentBoxList = npy.append(extentBoxList, boxCoords, axis=0) gprint('\nDone calculating bounding boxes.') start_time = lu.elapsed_time(start_time) # lu.dashline() # Bounding circle code if cfg.BUFFERDIST is not None: # Make a set of circles encompassing core areas we'll be connecting start_time = time.clock() gprint('Calculating bounding circles around potential' ' corridors.') # x y corex corey radius- stores data for bounding circle centroids boundingCirclePointArray = npy.zeros((0,5), dtype='float32') circleList = npy.zeros((0,3), dtype='int32') numLinks = linkTable.shape[0] for x in range(0, numLinks): if ((linkTable[x,cfg.LTB_LINKTYPE] == cfg.LT_CORR) or (linkTable[x,cfg.LTB_LINKTYPE] == cfg.LT_KEEP)): # if it's a valid corridor link linkId = int(linkTable[x,cfg.LTB_LINKID]) # fixme- this code is clumsy- can trim down cores = npy.zeros((1,3), dtype='int32') cores[0,:] = npy.sort([0, linkTable[x,cfg.LTB_CORE1], linkTable[x,cfg.LTB_CORE2]]) corex = cores[0,1] corey = cores[0,2] cores[0,0] = linkId ################### foundFlag = False for y in range(0,len(circleList)): # clumsy if (circleList[y,1] == corex and circleList[y,2] == corey): foundFlag = True if not foundFlag: circlePointData = ( lu.get_bounding_circle_data(extentBoxList, corex, corey, cfg.BUFFERDIST)) boundingCirclePointArray = ( npy.append(boundingCirclePointArray, circlePointData, axis=0)) # keep track of which cores we draw bounding circles # around circleList = npy.append(circleList, cores, axis=0) gprint('\nCreating bounding circles using buffer ' 'analysis.') dir, BNDCIRCENS = path.split(cfg.BNDCIRCENS) lu.make_points(cfg.SCRATCHDIR, boundingCirclePointArray, BNDCIRCENS) lu.delete_data(cfg.BNDCIRS) gp.buffer_analysis(cfg.BNDCIRCENS, cfg.BNDCIRS, "radius") gp.deletefield (cfg.BNDCIRS, "BUFF_DIST") gprint('Successfully created bounding circles around ' 'potential corridors using \na buffer of ' + str(float(cfg.BUFFERDIST)) + ' map units.') start_time = lu.elapsed_time(start_time) gprint('Reducing global processing area using bounding ' 'circle plus buffer of ' + str(float(cfg.BUFFERDIST)) + ' map units.\n') extentBoxList = npy.zeros((0,5),dtype='float32') boxCoords = lu.get_extent_box_coords() extentBoxList = npy.append(extentBoxList,boxCoords,axis=0) extentBoxList[0,0] = 0 boundingCirclePointArray = npy.zeros((0,5),dtype='float32') circlePointData=lu.get_bounding_circle_data(extentBoxList, 0, 0, cfg.BUFFERDIST) dir, BNDCIRCEN = path.split(cfg.BNDCIRCEN) lu.make_points(cfg.SCRATCHDIR, circlePointData, BNDCIRCEN) lu.delete_data(cfg.BNDCIR) gp.buffer_analysis(cfg.BNDCIRCEN, cfg.BNDCIR, "radius") gprint('Extracting raster....') cfg.BOUNDRESIS = cfg.BOUNDRESIS + tif lu.delete_data(cfg.BOUNDRESIS) count = 0 statement = ( 'gp.ExtractByMask_sa(cfg.RESRAST, cfg.BNDCIR, cfg.BOUNDRESIS)') while True: try: exec statement randomerror() except: count,tryAgain = lu.retry_arc_error(count,statement) if not tryAgain: exec statement else: break gprint('\nReduced resistance raster extracted using ' 'bounding circle.') else: #if not using bounding circles, just go with resistance raster. cfg.BOUNDRESIS = cfg.RESRAST # --------------------------------------------------------------------- # Rasterize core areas to speed cost distance calcs # lu.dashline(1) gprint("Creating core area raster.") gp.SelectLayerByAttribute(cfg.FCORES, "CLEAR_SELECTION") if arcpy: arcpy.env.cellSize = cfg.BOUNDRESIS arcpy.env.extent = cfg.BOUNDRESIS else: gp.cellSize = gp.Describe(cfg.BOUNDRESIS).MeanCellHeight gp.extent = gp.Describe(cfg.BOUNDRESIS).extent if rerun: # saved linktable replaces the one now in memory linkTable = lu.load_link_table(savedLinkTableFile) coresToMapSaved = npy.loadtxt(coreListFile, dtype='Float64', comments='#', delimiter=',') startIndex = coresToMapSaved[0] # Index of core where we left off del coresToMapSaved gprint ('\n****** Re-starting run at core area number ' + str(int(coresToMap[startIndex]))+ ' ******\n') lu.dashline(0) if arcpy: arcpy.env.extent = "MINOF" else: gp.extent = "MINOF" #---------------------------------------------------------------------- # Loop through cores, do cwd calcs for each if cfg.TOOL == cfg.TOOL_CC: gprint("\nMapping least-cost paths.\n") else: gprint("\nStarting cost distance calculations.\n") lcpLoop = 0 failures = 0 x = startIndex endIndex = len(coresToMap) linkTableMod = linkTable.copy() while x < endIndex: startTime1 = time.clock() # Modification of linkTable in function was causing problems. so # make a copy: linkTablePassed = linkTableMod.copy() (linkTableReturned, failures, lcpLoop) = do_cwd_calcs(x, linkTablePassed, coresToMap, lcpLoop, failures) if failures == 0: # If iteration was successful, continue with next core linkTableMod = linkTableReturned sourceCore = int(coresToMap[x]) gprint('Done with all calculations for core ID #' + str(sourceCore) + '. ' + str(int(x + 1)) + ' of ' + str(endIndex) + ' cores have been processed.') start_time = lu.elapsed_time(startTime1) outlinkTableFile = path.join(cfg.DATAPASSDIR, "temp_linkTable_s3_partial.csv") lu.write_link_table(linkTableMod, outlinkTableFile) # Increment loop counter x = x + 1 else: # If iteration failed, try again after a wait period delay_restart(failures) #---------------------------------------------------------------------- linkTable = linkTableMod # reinstate temporarily disabled links rows = npy.where(linkTable[:,cfg.LTB_LINKTYPE] > 1000) linkTable[rows,cfg.LTB_LINKTYPE] = (linkTable[rows,cfg.LTB_LINKTYPE] - 1000) # Drop links that are too long DISABLE_LEAST_COST_NO_VAL = True linkTable,numDroppedLinks = lu.drop_links(linkTable, cfg.MAXEUCDIST, cfg.MINEUCDIST, cfg.MAXCOSTDIST, cfg.MINCOSTDIST, DISABLE_LEAST_COST_NO_VAL) # Write link table file outlinkTableFile = lu.get_this_step_link_table(step=3) gprint('Updating ' + outlinkTableFile) lu.write_link_table(linkTable, outlinkTableFile) linkTableLogFile = path.join(cfg.LOGDIR, "linkTable_s3.csv") lu.write_link_table(linkTable, linkTableLogFile) start_time = time.clock() gprint('Creating shapefiles with linework for links...') try: lu.write_link_maps(outlinkTableFile, step=3) except: lu.write_link_maps(outlinkTableFile, step=3) start_time = lu.elapsed_time(start_time) gprint('\nIndividual cost-weighted distance layers written ' 'to "cwd" directory. \n') gprint(outlinkTableFile + '\n updated with cost-weighted distances between core areas.') #Clean up temporary files for restart code tempFile = path.join(cfg.DATAPASSDIR, "temp_cores_to_map.csv") lu.delete_file(tempFile) tempFile = path.join(cfg.DATAPASSDIR, "temp_linkTable_s3_partial.csv") lu.delete_file(tempFile) # Check if climate tool is calling linkage mapper if cfg.TOOL == cfg.TOOL_CC: coreList = npy.unique(linkTable[:, cfg.LTB_CORE1:cfg.LTB_CORE2 + 1]) for core in coreList: cwdRaster = lu.get_cwd_path(int(core)) back_rast = cwdRaster.replace("cwd_", "back_") lu.delete_data(back_rast) # Return GEOPROCESSING specific errors except arcgisscripting.ExecuteError: lu.dashline(1) gprint('****Failed in step 3. Details follow.****') lu.exit_with_geoproc_error(_SCRIPT_NAME) # Return any PYTHON or system specific errors except: lu.dashline(1) gprint('****Failed in step 3. Details follow.****') lu.exit_with_python_error(_SCRIPT_NAME) return
def calc_lccs(normalize): try: if normalize: mosaicBaseName = "_corridors" writeTruncRaster = cfg.WRITETRUNCRASTER outputGDB = cfg.OUTPUTGDB SAVENORMLCCS = cfg.SAVENORMLCCS else: mosaicBaseName = "_NON_NORMALIZED_corridors" SAVENORMLCCS = False outputGDB = cfg.EXTRAGDB writeTruncRaster = False lu.dashline(1) gprint('Running script ' + _SCRIPT_NAME) linkTableFile = lu.get_prev_step_link_table(step=5) arcpy.env.workspace = cfg.SCRATCHDIR arcpy.env.scratchWorkspace = cfg.ARCSCRATCHDIR arcpy.env.compression = "NONE" if cfg.MAXEUCDIST is not None: gprint('Max Euclidean distance between cores') gprint('for linkage mapping set to ' + str(cfg.MAXEUCDIST)) if cfg.MAXCOSTDIST is not None: gprint('Max cost-weighted distance between cores') gprint('for linkage mapping set to ' + str(cfg.MAXCOSTDIST)) # set the analysis extent and cell size to that of the resistance # surface arcpy.env.extent = cfg.RESRAST arcpy.env.cellSize = arcpy.Describe(cfg.RESRAST).MeanCellHeight arcpy.env.snapRaster = cfg.RESRAST arcpy.env.mask = cfg.RESRAST linkTable = lu.load_link_table(linkTableFile) numLinks = linkTable.shape[0] numCorridorLinks = lu.report_links(linkTable) if numCorridorLinks == 0: lu.dashline(1) msg =('\nThere are no corridors to map. Bailing.') lu.raise_error(msg) if not cfg.STEP3 and not cfg.STEP4: # re-check for links that are too long or in case script run out of # sequence with more stringent settings gprint('Double-checking for corridors that are too long to map.') DISABLE_LEAST_COST_NO_VAL = True linkTable,numDroppedLinks = lu.drop_links( linkTable, cfg.MAXEUCDIST, cfg.MINEUCDIST, cfg.MAXCOSTDIST, cfg.MINCOSTDIST, DISABLE_LEAST_COST_NO_VAL) # Added to try to speed up: arcpy.env.pyramid = "NONE" arcpy.env.rasterStatistics = "NONE" # set up directories for normalized lcc and mosaic grids dirCount = 0 gprint("Creating output folder: " + cfg.LCCBASEDIR) lu.delete_dir(cfg.LCCBASEDIR) arcpy.CreateFolder_management(path.dirname(cfg.LCCBASEDIR), path.basename(cfg.LCCBASEDIR)) arcpy.CreateFolder_management(cfg.LCCBASEDIR, cfg.LCCNLCDIR_NM) clccdir = path.join(cfg.LCCBASEDIR, cfg.LCCNLCDIR_NM) gprint("") if normalize: gprint('Normalized least-cost corridors will be written ' 'to ' + clccdir + '\n') PREFIX = cfg.PREFIX # Add CWD layers for core area pairs to produce NORMALIZED LCC layers numGridsWritten = 0 coreList = linkTable[:,cfg.LTB_CORE1:cfg.LTB_CORE2+1] coreList = npy.sort(coreList) x = 0 linkCount = 0 endIndex = numLinks while x < endIndex: if (linkTable[x, cfg.LTB_LINKTYPE] < 1): # If not a valid link x = x + 1 continue linkCount = linkCount + 1 start_time = time.clock() linkId = str(int(linkTable[x, cfg.LTB_LINKID])) # source and target cores corex=int(coreList[x,0]) corey=int(coreList[x,1]) # Get cwd rasters for source and target cores cwdRaster1 = lu.get_cwd_path(corex) cwdRaster2 = lu.get_cwd_path(corey) if not arcpy.Exists(cwdRaster1): msg =('\nError: cannot find cwd raster:\n' + cwdRaster1) if not arcpy.Exists(cwdRaster2): msg =('\nError: cannot find cwd raster:\n' + cwdRaster2) lu.raise_error(msg) lccNormRaster = path.join(clccdir, str(corex) + "_" + str(corey))# + ".tif") arcpy.env.extent = "MINOF" link = lu.get_links_from_core_pairs(linkTable, corex, corey) offset = 10000 # Normalized lcc rasters are created by adding cwd rasters and # subtracting the least cost distance between them. lcDist = (float(linkTable[link,cfg.LTB_CWDIST]) - offset) if normalize: statement = ('outras = arcpy.sa.Raster(cwdRaster1) ' '+ arcpy.sa.Raster(cwdRaster2) - lcDist; ' 'outras.save(lccNormRaster)') else: statement = ('outras = arcpy.sa.Raster(cwdRaster1) ' '+ arcpy.sa.Raster(cwdRaster2); ' 'outras.save(lccNormRaster)') count = 0 while True: try: exec(statement) except Exception: count,tryAgain = lu.retry_arc_error(count,statement) if not tryAgain: exec(statement) else: break if normalize: try: minObject = arcpy.GetRasterProperties_management(lccNormRaster, "MINIMUM") rasterMin = float(str(minObject.getOutput(0))) except Exception: lu.warn('\n------------------------------------------------') lu.warn('WARNING: Raster minimum check failed in step 5. \n' 'This may mean the output rasters are corrupted. Please \n' 'be sure to check for valid rasters in '+ outputGDB) rasterMin = 0 tolerance = (float(arcpy.env.cellSize) * -10) if rasterMin < tolerance: lu.dashline(1) msg = ('WARNING: Minimum value of a corridor #' + str(x+1) + ' is much less than zero ('+str(rasterMin)+').' '\nThis could mean that BOUNDING CIRCLE BUFFER DISTANCES ' 'were too small and a corridor passed outside of a ' 'bounding circle, or that a corridor passed outside of the ' 'resistance map. \n') lu.warn(msg) arcpy.env.extent = cfg.RESRAST mosaicDir = path.join(cfg.LCCBASEDIR,'mos'+str(x+1)) lu.create_dir(mosaicDir) mosFN = 'mos'#.tif' change and move mosaicRaster = path.join(mosaicDir,mosFN) if numGridsWritten == 0 and dirCount == 0: #If this is the first grid then copy rather than mosaic arcpy.CopyRaster_management(lccNormRaster, mosaicRaster) else: statement = ( 'arcpy.MosaicToNewRaster_management(' 'input_rasters=";".join([lccNormRaster, ' 'lastMosaicRaster]), output_location=mosaicDir, ' 'raster_dataset_name_with_extension=mosFN, ' 'pixel_type="32_BIT_FLOAT", cellsize=arcpy.env.cellSize, ' 'number_of_bands="1", mosaic_method="MINIMUM")') count = 0 while True: try: lu.write_log('Executing mosaic for link #'+str(linkId)) exec(statement) lu.write_log('Done with mosaic.') except Exception: count,tryAgain = lu.retry_arc_error(count,statement) lu.delete_data(mosaicRaster) lu.delete_dir(mosaicDir) # Try a new directory mosaicDir = path.join(cfg.LCCBASEDIR,'mos'+str(x+1)+ '_' + str(count)) lu.create_dir(mosaicDir) mosaicRaster = path.join(mosaicDir,mosFN) if not tryAgain: exec(statement) else: break endTime = time.clock() processTime = round((endTime - start_time), 2) if normalize == True: printText = "Normalized and mosaicked " else: printText = "Mosaicked NON-normalized " gprint(printText + "corridor for link ID #" + str(linkId) + " connecting core areas " + str(corex) + " and " + str(corey)+ " in " + str(processTime) + " seconds. " + str(int(linkCount)) + " out of " + str(int(numCorridorLinks)) + " links have been " "processed.") # temporarily disable links in linktable - don't want to mosaic # them twice for y in range (x+1,numLinks): corex1 = int(coreList[y,0]) corey1 = int(coreList[y,1]) if corex1 == corex and corey1 == corey: linkTable[y,cfg.LTB_LINKTYPE] = ( linkTable[y,cfg.LTB_LINKTYPE] + 1000) elif corex1==corey and corey1==corex: linkTable[y,cfg.LTB_LINKTYPE] = ( linkTable[y,cfg.LTB_LINKTYPE] + 1000) numGridsWritten = numGridsWritten + 1 if not SAVENORMLCCS: lu.delete_data(lccNormRaster) lu.delete_dir(clccdir) lu.create_dir(clccdir) else: if numGridsWritten == 100: # We only write up to 100 grids to any one folder # because otherwise Arc slows to a crawl dirCount = dirCount + 1 numGridsWritten = 0 clccdir = path.join(cfg.LCCBASEDIR, cfg.LCCNLCDIR_NM + str(dirCount)) gprint("Creating output folder: " + clccdir) arcpy.CreateFolder_management(cfg.LCCBASEDIR, path.basename(clccdir)) if numGridsWritten > 1 or dirCount > 0: lu.delete_data(lastMosaicRaster) lu.delete_dir(path.dirname(lastMosaicRaster)) lastMosaicRaster = mosaicRaster x = x + 1 #rows that were temporarily disabled rows = npy.where(linkTable[:,cfg.LTB_LINKTYPE]>1000) linkTable[rows,cfg.LTB_LINKTYPE] = ( linkTable[rows,cfg.LTB_LINKTYPE] - 1000) # --------------------------------------------------------------------- # Create output geodatabase if not arcpy.Exists(outputGDB): arcpy.CreateFileGDB_management(cfg.OUTPUTDIR, path.basename(outputGDB)) arcpy.env.workspace = outputGDB arcpy.env.pyramid = "NONE" arcpy.env.rasterStatistics = "NONE" # --------------------------------------------------------------------- # convert mosaic raster to integer intRaster = path.join(outputGDB,PREFIX + mosaicBaseName) statement = ('outras = arcpy.sa.Int(arcpy.sa.Raster(mosaicRaster) ' '- offset + 0.5); ' 'outras.save(intRaster)') count = 0 while True: try: exec(statement) except Exception: count,tryAgain = lu.retry_arc_error(count,statement) if not tryAgain: exec(statement) else: break # --------------------------------------------------------------------- if writeTruncRaster: # ----------------------------------------------------------------- # Set anything beyond cfg.CWDTHRESH to NODATA. truncRaster = (outputGDB + '\\' + PREFIX + mosaicBaseName + '_truncated_at_' + lu.cwd_cutoff_str(cfg.CWDTHRESH)) statement = ('outRas = arcpy.sa.Raster(intRaster)' '* (arcpy.sa.Con(arcpy.sa.Raster(intRaster) ' '<= cfg.CWDTHRESH, 1)); ' 'outRas.save(truncRaster)') count = 0 while True: try: exec(statement) except Exception: count,tryAgain = lu.retry_arc_error(count,statement) if not tryAgain: exec(statement) else: break # --------------------------------------------------------------------- # Check for unreasonably low minimum NLCC values try: mosaicGrid = path.join(cfg.LCCBASEDIR,'mos') # Copy to grid to test arcpy.CopyRaster_management(mosaicRaster, mosaicGrid) minObject = arcpy.GetRasterProperties_management(mosaicGrid, "MINIMUM") rasterMin = float(str(minObject.getOutput(0))) except Exception: lu.warn('\n------------------------------------------------') lu.warn('WARNING: Raster minimum check failed in step 5. \n' 'This may mean the output rasters are corrupted. Please \n' 'be sure to check for valid rasters in '+ outputGDB) rasterMin = 0 tolerance = (float(arcpy.env.cellSize) * -10) if rasterMin < tolerance: lu.dashline(1) msg = ('WARNING: Minimum value of mosaicked corridor map is ' 'much less than zero ('+str(rasterMin)+').' '\nThis could mean that BOUNDING CIRCLE BUFFER DISTANCES ' 'were too small and a corridor passed outside of a ' 'bounding circle, or that a corridor passed outside of the ' 'resistance map. \n') lu.warn(msg) gprint('\nWriting final LCP maps...') if cfg.STEP4: finalLinkTable = lu.update_lcp_shapefile(linkTable, lastStep=4, thisStep=5) elif cfg.STEP3: finalLinkTable = lu.update_lcp_shapefile(linkTable, lastStep=3, thisStep=5) else: # Don't know if step 4 was run, since this is started at step 5. # Use presence of previous linktable files to figure this out. # Linktable name includes step number. prevLinkTableFile = lu.get_prev_step_link_table(step=5) prevStepInd = len(prevLinkTableFile) - 5 lastStep = prevLinkTableFile[prevStepInd] finalLinkTable = lu.update_lcp_shapefile(linkTable, lastStep, thisStep=5) outlinkTableFile = lu.get_this_step_link_table(step=5) gprint('Updating ' + outlinkTableFile) lu.write_link_table(linkTable, outlinkTableFile) linkTableLogFile = path.join(cfg.LOGDIR, "linkTable_s5.csv") lu.write_link_table(linkTable, linkTableLogFile) linkTableFinalFile = path.join(cfg.OUTPUTDIR, PREFIX + "_linkTable_s5.csv") lu.write_link_table(finalLinkTable, linkTableFinalFile) gprint('Copy of final linkTable written to '+ linkTableFinalFile) gprint('Creating shapefiles with linework for links.') try: lu.write_link_maps(outlinkTableFile, step=5) except Exception: lu.write_link_maps(outlinkTableFile, step=5) # Create final linkmap files in output directory, and remove files from # scratch. lu.copy_final_link_maps(step=5) if not SAVENORMLCCS: lu.delete_dir(cfg.LCCBASEDIR) # Build statistics for corridor rasters arcpy.AddMessage('\nBuilding output statistics and pyramids ' 'for corridor raster') lu.build_stats(intRaster) if writeTruncRaster: arcpy.AddMessage('Building output statistics ' 'for truncated corridor raster') lu.build_stats(truncRaster) save_parameters() if cfg.OUTPUTFORMODELBUILDER: arcpy.CopyFeatures_management(cfg.COREFC, cfg.OUTPUTFORMODELBUILDER) # Return GEOPROCESSING specific errors except arcpy.ExecuteError: lu.dashline(1) gprint('****Failed in step 5. Details follow.****') lu.exit_with_geoproc_error(_SCRIPT_NAME) # Return any PYTHON or system specific errors except Exception: lu.dashline(1) gprint('****Failed in step 5. Details follow.****') lu.exit_with_python_error(_SCRIPT_NAME) return
def STEP8_calc_pinchpoints(): """ Maps pinch points in Linkage Mapper corridors using Circuitscape given CWD calculations from s3_calcCwds.py. """ try: lu.dashline(0) gprint('Running script ' + _SCRIPT_NAME) restartFlag = False if cfg.CWDCUTOFF < 0: cfg.CWDCUTOFF = cfg.CWDCUTOFF * -1 restartFlag = True # Restart code in progress CSPATH = lu.get_cs_path() outputGDB = path.join(cfg.OUTPUTDIR, path.basename(cfg.PINCHGDB)) arcpy.OverWriteOutput = True arcpy.env.workspace = cfg.SCRATCHDIR arcpy.env.scratchWorkspace = cfg.ARCSCRATCHDIR arcpy.env.pyramid = "NONE" arcpy.env.rasterstatistics = "NONE" # set the analysis extent and cell size to that of the resistance # surface arcpy.env.extent = cfg.RESRAST arcpy.env.cellSize = cfg.RESRAST arcpy.snapraster = cfg.RESRAST resRaster = cfg.RESRAST arcpy.env.extent = "MINOF" minObject = arcpy.GetRasterProperties_management(resRaster, "MINIMUM") rasterMin = float(str(minObject.getOutput(0))) if rasterMin <= 0: msg = ('Error: resistance raster cannot have 0 or negative values.') lu.raise_error(msg) if cfg.DO_ADJACENTPAIRS: prevLcpShapefile = lu.get_lcp_shapefile(None, thisStep = 8) if not arcpy.Exists(prevLcpShapefile): msg = ('Cannot find an LCP shapefile from step 5. Please ' 'rerun that step and any previous ones if necessary.') lu.raise_error(msg) # Remove lcp shapefile lcpShapefile = path.join(cfg.DATAPASSDIR, "lcpLines_s8.shp") lu.delete_data(lcpShapefile) inLinkTableFile = lu.get_prev_step_link_table(step=8) linkTable = lu.load_link_table(inLinkTableFile) numLinks = linkTable.shape[0] numCorridorLinks = lu.report_links(linkTable) if numCorridorLinks == 0: lu.dashline(1) msg =('\nThere are no linkages. Bailing.') lu.raise_error(msg) if linkTable.shape[1] < 16: # If linktable has no entries from prior # centrality or pinchpint analyses extraCols = npy.zeros((numLinks, 6), dtype="float64") linkTable = linkTable[:,0:10] linkTable = npy.append(linkTable, extraCols, axis=1) linkTable[:, cfg.LTB_LCPLEN] = -1 linkTable[:, cfg.LTB_CWDEUCR] = -1 linkTable[:, cfg.LTB_CWDPATHR] = -1 linkTable[:, cfg.LTB_EFFRESIST] = -1 linkTable[:, cfg.LTB_CWDTORR] = -1 linkTable[:, cfg.LTB_CURRENT] = -1 del extraCols # set up directories for circuit and circuit mosaic grids # Create output geodatabase if not arcpy.Exists(cfg.PINCHGDB): arcpy.CreateFileGDB_management(cfg.OUTPUTDIR, path.basename(cfg.PINCHGDB)) mosaicRaster = path.join(cfg.CIRCUITBASEDIR, "current_mos" + tif) coresToProcess = npy.unique( linkTable[:, cfg.LTB_CORE1:cfg.LTB_CORE2 + 1]) maxCoreNum = max(coresToProcess) del coresToProcess lu.dashline(0) coreList = linkTable[:,cfg.LTB_CORE1:cfg.LTB_CORE2+1] coreList = npy.sort(coreList) #gprint('There are ' + str(len(npy.unique(coreList))) ' core areas.') INCIRCUITDIR = cfg.CIRCUITBASEDIR OUTCIRCUITDIR = path.join(cfg.CIRCUITBASEDIR, cfg.CIRCUITOUTPUTDIR_NM) CONFIGDIR = path.join(INCIRCUITDIR, cfg.CIRCUITCONFIGDIR_NM) # Cutoff value text to append to filenames cutoffText = str(cfg.CWDCUTOFF) if cutoffText[-6:] == '000000': cutoffText = cutoffText[0:-6]+'m' elif cutoffText[-3:] == '000': cutoffText = cutoffText[0:-3]+'k' if cfg.SQUARERESISTANCES: # Square resistance values squaredRaster = path.join(cfg.SCRATCHDIR,'res_sqr') arcpy.env.workspace = cfg.SCRATCHDIR arcpy.env.scratchWorkspace = cfg.ARCSCRATCHDIR outRas = Raster(resRaster) * Raster(resRaster) outRas.save(squaredRaster) resRaster = squaredRaster if cfg.DO_ADJACENTPAIRS: linkLoop = 0 lu.dashline(1) gprint('Mapping pinch points in individual corridors \n' 'using Circuitscape.') lu.dashline(1) gprint('If you try to cancel your run and the Arc dialog hangs, ') gprint('you can kill Circuitscape by opening Windows Task Manager') gprint('and ending the cs_run.exe process.') lu.dashline(2) for x in range(0,numLinks): linkId = str(int(linkTable[x,cfg.LTB_LINKID])) if not (linkTable[x,cfg.LTB_LINKTYPE] > 0): continue linkLoop = linkLoop + 1 linkDir = path.join(cfg.SCRATCHDIR, 'link' + linkId) if restartFlag == True and path.exists(linkDir): gprint('continuing') continue restartFlag = False lu.create_dir(linkDir) start_time1 = time.clock() # source and target cores corex=int(coreList[x,0]) corey=int(coreList[x,1]) # Get cwd rasters for source and target cores cwdRaster1 = lu.get_cwd_path(corex) cwdRaster2 = lu.get_cwd_path(corey) lccNormRaster = path.join(linkDir, 'lcc_norm') arcpy.env.extent = "MINOF" link = lu.get_links_from_core_pairs(linkTable, corex, corey) lcDist = float(linkTable[link,cfg.LTB_CWDIST]) # Normalized lcc rasters are created by adding cwd rasters # and subtracting the least cost distance between them. outRas = Raster(cwdRaster1) + Raster(cwdRaster2) - lcDist outRas.save(lccNormRaster) #create raster mask resMaskRaster = path.join(linkDir, 'res_mask'+tif) #create raster mask outCon = arcpy.sa.Con(Raster(lccNormRaster) <= cfg.CWDCUTOFF, 1) outCon.save(resMaskRaster) # Convert to poly. Use as mask to clip resistance raster. resMaskPoly = path.join(linkDir, 'res_mask_poly.shp') arcpy.RasterToPolygon_conversion(resMaskRaster, resMaskPoly, "NO_SIMPLIFY") arcpy.env.extent = resMaskPoly # Includes 0 values in some cases with CP LI model if tif # so using ESRI Grid format resClipRasterMasked = path.join(linkDir, 'res_clip_m') # Extract masked resistance raster. # Needs to be float to get export to npy to work. outRas = arcpy.sa.ExtractByMask(resRaster, resMaskPoly) + 0.0 outRas.save(resClipRasterMasked) resNpyFN = 'resistances_link_' + linkId + '.npy' resNpyFile = path.join(INCIRCUITDIR, resNpyFN) numElements, numResistanceNodes = export_ras_to_npy(resClipRasterMasked, resNpyFile) totMem, availMem = lu.get_mem() # gprint('Total memory: str(totMem)) if numResistanceNodes / availMem > 2000000: lu.dashline(1) gwarn('Warning:') gwarn('Circuitscape can only solve 2-3 million nodes') gwarn('per gigabyte of available RAM. \nTotal physical RAM' ' on your machine is ~' + str(totMem) + ' GB. \nAvailable memory is ~'+ str(availMem) + ' GB. \nYour resistance raster has ' + str(numResistanceNodes) + ' nodes.') lu.dashline(2) corePairRaster = path.join(linkDir, 'core_pairs'+tif) arcpy.env.extent = resClipRasterMasked # Next result needs to be floating pt for numpy export outCon = arcpy.sa.Con(Raster(cwdRaster1) == 0, corex, arcpy.sa.Con(Raster(cwdRaster2) == 0, corey + 0.0)) outCon.save(corePairRaster) coreNpyFN = 'cores_link_' + linkId + '.npy' coreNpyFile = path.join(INCIRCUITDIR, coreNpyFN) numElements, numNodes = export_ras_to_npy(corePairRaster, coreNpyFile) arcpy.env.extent = "MINOF" # Set circuitscape options and call options = lu.setCircuitscapeOptions() if cfg.WRITE_VOLT_MAPS == True: options['write_volt_maps']=True options['habitat_file'] = resNpyFile # if int(linkId) > 2: # options['habitat_file'] = 'c:\\test.dummy' options['point_file'] = coreNpyFile options['set_focal_node_currents_to_zero']=True outputFN = 'Circuitscape_link' + linkId + '.out' options['output_file'] = path.join(OUTCIRCUITDIR, outputFN) if numElements > 250000: options['print_timings']=True configFN = 'pinchpoint_config' + linkId + '.ini' outConfigFile = path.join(CONFIGDIR, configFN) lu.writeCircuitscapeConfigFile(outConfigFile, options) gprint('Processing link ID #' + str(linkId) + '. Resistance map' ' has ' + str(int(numResistanceNodes)) + ' nodes.') memFlag = call_circuitscape(CSPATH, outConfigFile) currentFN = ('Circuitscape_link' + linkId + '_cum_curmap.npy') currentMap = path.join(OUTCIRCUITDIR, currentFN) if not arcpy.Exists(currentMap): print_failure(numResistanceNodes, memFlag, 10) numElements, numNodes = export_ras_to_npy( resClipRasterMasked,resNpyFile) memFlag = call_circuitscape(CSPATH, outConfigFile) currentFN = ('Circuitscape_link' + linkId + '_cum_curmap.npy') currentMap = path.join(OUTCIRCUITDIR, currentFN) if not arcpy.Exists(currentMap): msg = ('\nCircuitscape failed. See error information above.') arcpy.AddError(msg) lu.write_log(msg) exit(1) # Either set core areas to nodata in current map or # divide each by its radius currentRaster = path.join(linkDir, "current" + tif) import_npy_to_ras(currentMap,corePairRaster,currentRaster) if cfg.WRITE_VOLT_MAPS == True: voltFN = ('Circuitscape_link' + linkId + '_voltmap_' + str(corex) + '_'+str(corey) + '.npy') voltMap = path.join(OUTCIRCUITDIR, voltFN) voltRaster = path.join(outputGDB, cfg.PREFIX + "_voltMap_"+ str(corex) + '_'+str(corey)) import_npy_to_ras(voltMap,corePairRaster,voltRaster) gprint('Building output statistics and pyramids ' 'for voltage raster\n') lu.build_stats(voltRaster) arcpy.env.extent = currentRaster if SETCORESTONULL: # Set core areas to NoData in current map for color ramping currentRaster2 = currentRaster + '2' + tif outCon = arcpy.sa.Con(arcpy.sa.IsNull(Raster (corePairRaster)), Raster(currentRaster)) outCon.save(currentRaster2) currentRaster = currentRaster2 arcpy.env.extent = "MAXOF" if linkLoop == 1: lu.delete_data(mosaicRaster) @retry(10) def copyRas2(): arcpy.CopyRaster_management(currentRaster, mosaicRaster) copyRas2() else: @retry(10) def mosaicRas(): arcpy.Mosaic_management(currentRaster, mosaicRaster, "MAXIMUM", "MATCH") mosaicRas() resistancesFN = ('Circuitscape_link' + linkId + '_resistances_3columns.out') resistancesFile = path.join(OUTCIRCUITDIR,resistancesFN) resistances = npy.loadtxt(resistancesFile, dtype = 'Float64', comments='#') resistance = float(str(arcpy.env.cellSize)) * resistances[2] linkTable[link,cfg.LTB_EFFRESIST] = resistance # Ratio if not cfg.SQUARERESISTANCES: linkTable[link,cfg.LTB_CWDTORR] = (linkTable[link, cfg.LTB_CWDIST] / linkTable[link,cfg.LTB_EFFRESIST]) # Clean up if cfg.SAVE_TEMP_CIRCUIT_FILES == False: lu.delete_file(coreNpyFile) coreNpyBase, extension = path.splitext(coreNpyFile) lu.delete_data(coreNpyBase + '.hdr') lu.delete_file(resNpyFile) resNpyBase, extension = path.splitext(resNpyFile) lu.delete_data(resNpyBase + '.hdr') lu.delete_file(currentMap) curMapBase, extension = path.splitext(currentMap) lu.delete_data(curMapBase + '.hdr') lu.delete_data(currentRaster) lu.clean_out_workspace(linkDir) lu.delete_dir(linkDir) gprint('Finished with link ID #' + str(linkId) + '. ' + str(linkLoop) + ' out of ' + str(numCorridorLinks) + ' links have been processed.') start_time1 = lu.elapsed_time(start_time1) outputRaster = path.join(outputGDB, cfg.PREFIX + "_current_adjacentPairs_" + cutoffText) lu.delete_data(outputRaster) @retry(10) def copyRas(): arcpy.CopyRaster_management(mosaicRaster, outputRaster) copyRas() gprint('Building output statistics and pyramids ' 'for corridor pinch point raster\n') lu.build_stats(outputRaster) finalLinkTable = lu.update_lcp_shapefile(linkTable, lastStep=5, thisStep=8) linkTableFile = path.join(cfg.DATAPASSDIR, "linkTable_s5_plus.csv") lu.write_link_table(finalLinkTable, linkTableFile, inLinkTableFile) linkTableFinalFile = path.join(cfg.OUTPUTDIR, cfg.PREFIX + "_linkTable_s5_plus.csv") lu.write_link_table(finalLinkTable, linkTableFinalFile, inLinkTableFile) gprint('Copy of linkTable written to '+ linkTableFinalFile) #fixme: update sticks? gprint('Creating shapefiles with linework for links.') lu.write_link_maps(linkTableFinalFile, step=8) # Copy final link maps to gdb. lu.copy_final_link_maps(step=8) lu.delete_data(mosaicRaster) if not cfg.DO_ALLPAIRS: # Clean up temporary files if not cfg.SAVECURRENTMAPS: lu.delete_dir(OUTCIRCUITDIR) return lu.dashline(1) gprint('Mapping global pinch points among all\n' 'core area pairs using Circuitscape.') if cfg.ALL_PAIR_SCENARIO=='pairwise': gprint('Circuitscape will be run in PAIRWISE mode.') else: gprint('Circuitscape will be run in ALL-TO-ONE mode.') arcpy.env.workspace = cfg.SCRATCHDIR arcpy.env.scratchWorkspace = cfg.ARCSCRATCHDIR arcpy.env.extent = cfg.RESRAST arcpy.env.cellSize = cfg.RESRAST S8CORE_RAS = "s8core_ras" s8CoreRasPath = path.join(cfg.SCRATCHDIR,S8CORE_RAS) arcpy.FeatureToRaster_conversion(cfg.COREFC, cfg.COREFN, s8CoreRasPath, arcpy.env.cellSize) binaryCoreRaster = path.join(cfg.SCRATCHDIR,"core_ras_bin") # The following commands cause file lock problems on save. using gp # instead. # outCon = arcpy.sa.Con(S8CORE_RAS, 1, "#", "VALUE > 0") # outCon.save(binaryCoreRaster) # gp.Con_sa(s8CoreRasPath, 1, binaryCoreRaster, "#", "VALUE > 0") outCon = arcpy.sa.Con(Raster(s8CoreRasPath) > 0, 1) outCon.save(binaryCoreRaster) s5corridorRas = path.join(cfg.OUTPUTGDB,cfg.PREFIX + "_corridors") if not arcpy.Exists(s5corridorRas): s5corridorRas = path.join(cfg.OUTPUTGDB,cfg.PREFIX + "_lcc_mosaic_int") outCon = arcpy.sa.Con(Raster(s5corridorRas) <= cfg.CWDCUTOFF, Raster( resRaster), arcpy.sa.Con(Raster( binaryCoreRaster) > 0, Raster(resRaster))) resRasClipPath = path.join(cfg.SCRATCHDIR,'res_ras_clip') outCon.save(resRasClipPath) arcpy.env.cellSize = resRasClipPath arcpy.env.extent = resRasClipPath s8CoreRasClipped = s8CoreRasPath + '_c' # Produce core raster with same extent as clipped resistance raster # added to ensure correct data type- nodata values were positive for # cores otherwise outCon = arcpy.sa.Con(arcpy.sa.IsNull(Raster(s8CoreRasPath)), -9999, Raster(s8CoreRasPath)) outCon.save(s8CoreRasClipped) resNpyFN = 'resistances.npy' resNpyFile = path.join(INCIRCUITDIR, resNpyFN) numElements, numResistanceNodes = export_ras_to_npy(resRasClipPath,resNpyFile) totMem, availMem = lu.get_mem() # gprint('Total memory: str(totMem)) if numResistanceNodes / availMem > 2000000: lu.dashline(1) gwarn('Warning:') gwarn('Circuitscape can only solve 2-3 million nodes') gwarn('per gigabyte of available RAM. \nTotal physical RAM ' 'on your machine is ~' + str(totMem) + ' GB. \nAvailable memory is ~'+ str(availMem) + ' GB. \nYour resistance raster has ' + str(numResistanceNodes) + ' nodes.') lu.dashline(0) coreNpyFN = 'cores.npy' coreNpyFile = path.join(INCIRCUITDIR, coreNpyFN) numElements, numNodes = export_ras_to_npy(s8CoreRasClipped,coreNpyFile) arcpy.env.extent = "MINOF" options = lu.setCircuitscapeOptions() options['scenario']=cfg.ALL_PAIR_SCENARIO options['habitat_file'] = resNpyFile options['point_file'] = coreNpyFile options['set_focal_node_currents_to_zero']=True outputFN = 'Circuitscape.out' options['output_file'] = path.join(OUTCIRCUITDIR, outputFN) options['print_timings']=True configFN = 'pinchpoint_allpair_config.ini' outConfigFile = path.join(CONFIGDIR, configFN) lu.writeCircuitscapeConfigFile(outConfigFile, options) gprint('\nResistance map has ' + str(int(numResistanceNodes)) + ' nodes.') lu.dashline(1) gprint('If you try to cancel your run and the Arc dialog hangs, ') gprint('you can kill Circuitscape by opening Windows Task Manager') gprint('and ending the cs_run.exe process.') lu.dashline(0) call_circuitscape(CSPATH, outConfigFile) # test = subprocess.call([CSPATH, outConfigFile], # creationflags = subprocess.CREATE_NEW_CONSOLE) if options['scenario']=='pairwise': rasterSuffix = "_current_allPairs_" + cutoffText else: rasterSuffix = "_current_allToOne_" + cutoffText currentFN = 'Circuitscape_cum_curmap.npy' currentMap = path.join(OUTCIRCUITDIR, currentFN) outputRaster = path.join(outputGDB, cfg.PREFIX + rasterSuffix) currentRaster = path.join(cfg.SCRATCHDIR, "current") try: import_npy_to_ras(currentMap,resRasClipPath,outputRaster) except: lu.dashline(1) msg = ('ERROR: Circuitscape failed. \n' 'Note: Circuitscape can only solve 2-3 million nodes' '\nper gigabyte of available RAM. The resistance ' '\nraster for the last corridor had ' + str(numResistanceNodes) + ' nodes.\n\nResistance ' 'raster values that vary by >6 orders of \nmagnitude' ' can also cause failures, as can a mismatch in ' '\ncore area and resistance raster extents.') arcpy.AddError(msg) lu.write_log(msg) exit(1) #set core areas to nodata if SETCORESTONULL: # Set core areas to NoData in current map for color ramping outputRasterND = outputRaster + '_noDataCores' outCon = arcpy.sa.SetNull(Raster(s8CoreRasClipped) > 0, Raster(outputRaster)) outCon.save(outputRasterND) gprint('\nBuilding output statistics and pyramids ' 'for centrality raster.') lu.build_stats(outputRaster) lu.build_stats(outputRasterND) # Clean up temporary files if not cfg.SAVECURRENTMAPS: lu.delete_dir(OUTCIRCUITDIR) # Return GEOPROCESSING specific errors except arcpy.ExecuteError: lu.dashline(1) gprint('****Failed in step 8. Details follow.****') lu.exit_with_geoproc_error(_SCRIPT_NAME) # Return any PYTHON or system specific errors except: lu.dashline(1) gprint('****Failed in step 8. Details follow.****') lu.exit_with_python_error(_SCRIPT_NAME)
def STEP2_build_network(): """Generates initial version of linkTable.csv based on euclidean distances and adjacencies of core areas. """ try: lu.dashline(1) gprint('Running script ' + _SCRIPT_NAME) outlinkTableFile = lu.get_this_step_link_table(step=2) # Warning flag for missing distances in conefor file # dropFlag = False # ------------------------------------------------------------------ # adjacency file created from s1_getAdjacencies.py if cfg.S2ADJMETH_EU and not path.exists(cfg.EUCADJFILE): msg = ('\nERROR: Euclidean adjacency file required from ' 'Step 1: ' + cfg.EUCADJFILE) lu.raise_error(msg) # ------------------------------------------------------------------ # adjacency file created from s1_getAdjacencies.py if cfg.S2ADJMETH_CW and not path.exists(cfg.CWDADJFILE): msg = ('\nERROR: Cost-weighted adjacency file required from' 'Step 1: ' + cfg.CWDADJFILE) lu.raise_error(msg) #---------------------------------------------------------------------- # Load eucDists matrix from file and npy.sort if cfg.S2EUCDISTFILE is None: eucdist_file = generate_distance_file() else: eucdist_file = cfg.S2EUCDISTFILE eucDists_in = npy.loadtxt(eucdist_file, dtype='Float64', comments='#') if eucDists_in.size == 3: # If just one line in file eucDists = npy.zeros((1, 3), dtype='Float64') eucDists[0, :] = eucDists_in numDists = 1 else: eucDists = eucDists_in numDists = eucDists.shape[0] del eucDists_in eucDists[:, 0:2] = npy.sort(eucDists[:, 0:2]) ind = npy.lexsort((eucDists[:, 2], eucDists[:, 1], eucDists[:, 0])) eucDists = eucDists[ind] gprint('Core area distance list loaded.') gprint('number of pairwise distances = ' + str(numDists)) # sort eucDists by 1st column then by 2nd then by 3rd #---------------------------------------------------------------------- # Get rid of duplicate pairs of cores, retaining MINIMUM distance # between them numDistsOld = numDists for x in range(numDists - 2, -1, -1): if (eucDists[x, 0] == eucDists[x + 1, 0] and (eucDists[x, 1] == eucDists[x + 1, 1])): eucDists[x + 1, 0] = 0 delRows = npy.asarray(npy.where(eucDists[:, 0] == 0)) delRowsVector = npy.zeros((delRows.shape[1]), dtype="int32") delRowsVector[:] = delRows[0, :] eucDists = lu.delete_row(eucDists, delRowsVector) del delRows del delRowsVector numDists = eucDists.shape[0] lu.dashline(1) gprint('Removed ' + str(numDistsOld - numDists) + ' duplicate core pairs in Euclidean distance table.' '\n') maxEucDistID = max(eucDists[:, 1]) gprint('After removing duplicates and distances that exceed' ' maximum, \nthere are ' + str(numDists) + ' pairwise distances. Max core ID number is ' + str(int(maxEucDistID)) + '.') # Begin creating and manipulating linktables # zeros and many other array functions are imported from numpy linkTable = npy.zeros((len(eucDists), 10), dtype='int32') linkTable[:, 1:3] = eucDists[:, 0:2] linkTable[:, cfg.LTB_EUCDIST] = eucDists[:, 2] #---------------------------------------------------------------------- # Get adjacencies using adj files from step 1. if cfg.S2ADJMETH_CW or cfg.S2ADJMETH_EU: # Keep ALL links cwdAdjList = [] eucAdjList = [] if cfg.S2ADJMETH_CW: cwdAdjTable = get_adj_list(cfg.CWDADJFILE) cwdAdjList = [] for i in range(0, len(cwdAdjTable)): listEntry = (str(cwdAdjTable[i, 0]) + '_' + str(cwdAdjTable[i, 1])) cwdAdjList.append(listEntry) gprint('Cost-weighted adjacency file loaded.') maxCwdAdjCoreID = max(cwdAdjTable[:, 1]) del cwdAdjTable if cfg.S2ADJMETH_EU: eucAdjTable = get_adj_list(cfg.EUCADJFILE) eucAdjList = [] for i in range(0, len(eucAdjTable)): listEntry = (str(eucAdjTable[i, 0]) + '_' + str(eucAdjTable[i, 1])) eucAdjList.append(listEntry) maxEucAdjCoreID = max(eucAdjTable[:, 1]) del eucAdjTable # maxCoreId = max(maxEucAdjCoreID, maxCwdAdjCoreID, maxEucDistID) del eucDists gprint('Creating link table') linkTable[:, cfg.LTB_CWDADJ] = -1 # Euc adjacency not evaluated linkTable[:, cfg.LTB_EUCADJ] = -1 if cfg.S2ADJMETH_CW or cfg.S2ADJMETH_EU: for x in range(0, linkTable.shape[0]): listEntry = (str(linkTable[x, cfg.LTB_CORE1]) + '_' + str(linkTable[x, cfg.LTB_CORE2])) if listEntry in cwdAdjList: linkTable[x, cfg.LTB_CWDADJ] = 1 else: linkTable[x, cfg.LTB_CWDADJ] = 0 if listEntry in eucAdjList: linkTable[x, cfg.LTB_EUCADJ] = 1 else: linkTable[x, cfg.LTB_EUCADJ] = 0 if cfg.S2ADJMETH_CW and cfg.S2ADJMETH_EU: # "Keep all adjacent links" gprint("\nKeeping all adjacent links\n") rows = [] for row in range(0, linkTable.shape[0]): if (linkTable[row, cfg.LTB_EUCADJ] == 0 and linkTable[row, cfg.LTB_CWDADJ] == 0): rows.append(row) linkTable = lu.delete_row(linkTable, rows) elif cfg.S2ADJMETH_CW: gprint("\nKeeping cost-weighted adjacent links\n") delRows = npy.asarray(npy.where(linkTable[:, cfg.LTB_CWDADJ] == 0)) delRowsVector = npy.zeros((delRows.shape[1]), dtype="int32") delRowsVector[:] = delRows[0, :] linkTable = lu.delete_row(linkTable, delRowsVector) elif cfg.S2ADJMETH_EU: gprint("\nKeeping Euclidean adjacent links\n") delRows = npy.asarray(npy.where(linkTable[:, cfg.LTB_EUCADJ] == 0)) delRowsVector = npy.zeros((delRows.shape[1]), dtype="int32") delRowsVector[:] = delRows[0, :] linkTable = lu.delete_row(linkTable, delRowsVector) else: # For Climate Corridor tool gprint("\nIgnoring adjacency and keeping all links\n") # if dropFlag: # lu.dashline(1) # gprint('NOTE: At least one adjacent link was dropped ' # 'because there was no Euclidean ') # gprint('distance value in the input distance file from ' # 'Conefor extension.') # lu.dashline(2) linkTable[:, cfg.LTB_CLUST1] = -1 # No clusters until later steps linkTable[:, cfg.LTB_CLUST2] = -1 # not evaluated yet. May eventually have ability to get lcdistances # for adjacent cores from s1_getAdjacencies.py linkTable[:, cfg.LTB_CWDIST] = -1 # Get list of core IDs, based on core area shapefile. coreList = lu.get_core_list(cfg.COREFC, cfg.COREFN) if len(npy.unique(coreList[:, 1])) < 2: lu.dashline(1) msg = ('\nERROR: There are less than two core ' 'areas.\nThis means there is nothing to connect ' 'with linkages. Bailing.') lu.raise_error(msg) # Set cfg.LTB_LINKTYPE to valid corridor code linkTable[:, cfg.LTB_LINKTYPE] = cfg.LT_CORR # Make sure linkTable is sorted ind = npy.lexsort((linkTable[:, cfg.LTB_CORE2], linkTable[:, cfg.LTB_CORE1])) if len(linkTable) == 0: msg = ('\nERROR: There are no valid core area ' 'pairs. This can happen when core area numbers in ' 'your Conefor distances text file do not match ' 'those in your core area feature class.') lu.raise_error(msg) linkTable = linkTable[ind] # Assign link IDs in order for x in range(len(linkTable)): linkTable[x, cfg.LTB_LINKID] = x + 1 #---------------------------------------------------------------------- if cfg.CONNECTFRAGS: connect_clusters(linkTable) else: # Drop links that are too long gprint('\nChecking for corridors that are too long to map.') DISABLE_LEAST_COST_NO_VAL = False linkTable, numDroppedLinks = lu.drop_links(linkTable, cfg.MAXEUCDIST, 0, cfg.MINEUCDIST, 0, DISABLE_LEAST_COST_NO_VAL) if numDroppedLinks > 0: lu.dashline(1) gprint('Removed ' + str(numDroppedLinks) + ' links that were too long in Euclidean ' 'distance.') # Write linkTable to disk gprint('Writing ' + outlinkTableFile) lu.write_link_table(linkTable, outlinkTableFile) linkTableLogFile = path.join(cfg.LOGDIR, "linkTable_s2.csv") lu.write_link_table(linkTable, linkTableLogFile) lu.report_links(linkTable) gprint('Creating shapefiles with linework for links.\n') try: lu.write_link_maps(outlinkTableFile, step=2) except: lu.write_link_maps(outlinkTableFile, step=2) gprint('Linework shapefiles written.') # if dropFlag: # print_conefor_warning() # Return GEOPROCESSING specific errors except arcgisscripting.ExecuteError: lu.dashline(1) gprint('****Failed in step 2. Details follow.****') lu.exit_with_geoproc_error(_SCRIPT_NAME) # Return any PYTHON or system specific errors except: lu.dashline(1) gprint('****Failed in step 2. Details follow.****') lu.exit_with_python_error(_SCRIPT_NAME) return
def calc_lccs(normalize): try: if normalize: mosaicBaseName = "_corridors" writeTruncRaster = cfg.WRITETRUNCRASTER outputGDB = cfg.OUTPUTGDB if cfg.CALCNONNORMLCCS: SAVENORMLCCS = False else: SAVENORMLCCS = cfg.SAVENORMLCCS else: mosaicBaseName = "_NON_NORMALIZED_corridors" SAVENORMLCCS = False outputGDB = cfg.EXTRAGDB writeTruncRaster = False lu.dashline(1) gprint('Running script ' + _SCRIPT_NAME) linkTableFile = lu.get_prev_step_link_table(step=5) if cfg.useArcpy: arcpy.env.workspace = cfg.SCRATCHDIR arcpy.env.scratchWorkspace = cfg.ARCSCRATCHDIR arcpy.env.overwriteOutput = True arcpy.env.compression = "NONE" else: gp.workspace = cfg.SCRATCHDIR gp.scratchWorkspace = cfg.ARCSCRATCHDIR gp.OverwriteOutput = True if cfg.MAXEUCDIST is not None: gprint('Max Euclidean distance between cores') gprint('for linkage mapping set to ' + str(cfg.MAXEUCDIST)) if cfg.MAXCOSTDIST is not None: gprint('Max cost-weighted distance between cores') gprint('for linkage mapping set to ' + str(cfg.MAXCOSTDIST)) # set the analysis extent and cell size to that of the resistance # surface if cfg.useArcpy: arcpy.env.Extent = cfg.RESRAST arcpy.env.cellSize = cfg.RESRAST arcpy.env.snapRaster = cfg.RESRAST arcpy.env.mask = cfg.RESRAST else: gp.Extent = (gp.Describe(cfg.RESRAST)).Extent gp.cellSize = gp.Describe(cfg.RESRAST).MeanCellHeight gp.mask = cfg.RESRAST gp.snapraster = cfg.RESRAST linkTable = lu.load_link_table(linkTableFile) numLinks = linkTable.shape[0] numCorridorLinks = lu.report_links(linkTable) if numCorridorLinks == 0: lu.dashline(1) msg =('\nThere are no corridors to map. Bailing.') lu.raise_error(msg) if not cfg.STEP3 and not cfg.STEP4: # re-check for links that are too long or in case script run out of # sequence with more stringent settings gprint('Double-checking for corridors that are too long to map.') DISABLE_LEAST_COST_NO_VAL = True linkTable,numDroppedLinks = lu.drop_links( linkTable, cfg.MAXEUCDIST, cfg.MINEUCDIST, cfg.MAXCOSTDIST, cfg.MINCOSTDIST, DISABLE_LEAST_COST_NO_VAL) # Added to try to speed up: gp.pyramid = "NONE" gp.rasterstatistics = "NONE" # set up directories for normalized lcc and mosaic grids dirCount = 0 gprint("Creating output folder: " + cfg.LCCBASEDIR) lu.delete_dir(cfg.LCCBASEDIR) gp.CreateFolder_management(path.dirname(cfg.LCCBASEDIR), path.basename(cfg.LCCBASEDIR)) gp.CreateFolder_management(cfg.LCCBASEDIR, cfg.LCCNLCDIR_NM) clccdir = path.join(cfg.LCCBASEDIR, cfg.LCCNLCDIR_NM) # mosaicGDB = path.join(cfg.LCCBASEDIR, "mosaic.gdb") # gp.createfilegdb(cfg.LCCBASEDIR, "mosaic.gdb") #mosaicRaster = mosaicGDB + '\\' + "nlcc_mos" # Full path gprint("") if normalize: gprint('Normalized least-cost corridors will be written ' 'to ' + clccdir + '\n') PREFIX = cfg.PREFIX # Add CWD layers for core area pairs to produce NORMALIZED LCC layers numGridsWritten = 0 coreList = linkTable[:,cfg.LTB_CORE1:cfg.LTB_CORE2+1] coreList = npy.sort(coreList) x = 0 linkCount = 0 endIndex = numLinks while x < endIndex: if (linkTable[x, cfg.LTB_LINKTYPE] < 1): # If not a valid link x = x + 1 continue linkCount = linkCount + 1 start_time = time.clock() linkId = str(int(linkTable[x, cfg.LTB_LINKID])) # source and target cores corex=int(coreList[x,0]) corey=int(coreList[x,1]) # Get cwd rasters for source and target cores cwdRaster1 = lu.get_cwd_path(corex) cwdRaster2 = lu.get_cwd_path(corey) if not gp.Exists(cwdRaster1): msg =('\nError: cannot find cwd raster:\n' + cwdRaster1) if not gp.Exists(cwdRaster2): msg =('\nError: cannot find cwd raster:\n' + cwdRaster2) lu.raise_error(msg) lccNormRaster = path.join(clccdir, str(corex) + "_" + str(corey))# + ".tif") if cfg.useArcpy: arcpy.env.Extent = "MINOF" else: gp.Extent = "MINOF" # FIXME: need to check for this?: # if exists already, don't re-create #if not gp.Exists(lccRaster): link = lu.get_links_from_core_pairs(linkTable, corex, corey) offset = 10000 # Normalized lcc rasters are created by adding cwd rasters and # subtracting the least cost distance between them. count = 0 if arcpyAvailable: cfg.useArcpy = True # Fixes Canran Liu's bug with lcDist if cfg.useArcpy: lcDist = (float(linkTable[link,cfg.LTB_CWDIST]) - offset) if normalize: statement = ('outras = Raster(cwdRaster1) + Raster(' 'cwdRaster2) - lcDist; outras.save(lccNormRaster)') else: statement = ('outras =Raster(cwdRaster1) + Raster(' 'cwdRaster2); outras.save(lccNormRaster)') else: if normalize: lcDist = str(linkTable[link,cfg.LTB_CWDIST] - offset) expression = (cwdRaster1 + " + " + cwdRaster2 + " - " + lcDist) else: expression = (cwdRaster1 + " + " + cwdRaster2) statement = ('gp.SingleOutputMapAlgebra_sa(expression, ' 'lccNormRaster)') count = 0 while True: try: exec statement randomerror() except: count,tryAgain = lu.retry_arc_error(count,statement) if not tryAgain: exec statement else: break cfg.useArcpy = False # End fix for Conran Liu's bug with lcDist if normalize and cfg.useArcpy: try: minObject = gp.GetRasterProperties(lccNormRaster, "MINIMUM") rasterMin = float(str(minObject.getoutput(0))) except: gp.AddWarning('\n------------------------------------------------') gp.AddWarning('WARNING: Raster minimum check failed in step 5. \n' 'This may mean the output rasters are corrupted. Please \n' 'be sure to check for valid rasters in '+ outputGDB) rasterMin = 0 tolerance = (float(gp.cellSize) * -10) + offset if rasterMin < tolerance: lu.dashline(1) msg = ('WARNING: Minimum value of a corridor #' + str(x+1) + ' is much less than zero ('+str(rasterMin)+').' '\nThis could mean that BOUNDING CIRCLE BUFFER DISTANCES ' 'were too small and a corridor passed outside of a ' 'bounding circle, or that a corridor passed outside of the ' 'resistance map. \n') gp.AddWarning(msg) if cfg.useArcpy: arcpy.env.Extent = cfg.RESRAST else: gp.Extent = (gp.Describe(cfg.RESRAST)).Extent mosaicDir = path.join(cfg.LCCBASEDIR,'mos'+str(x+1)) lu.create_dir(mosaicDir) mosFN = 'mos'#.tif' change and move mosaicRaster = path.join(mosaicDir,mosFN) if numGridsWritten == 0 and dirCount == 0: #If this is the first grid then copy rather than mosaic arcObj.CopyRaster_management(lccNormRaster, mosaicRaster) else: rasterString = '"'+lccNormRaster+";"+lastMosaicRaster+'"' statement = ('arcObj.MosaicToNewRaster_management(' 'rasterString,mosaicDir,mosFN, "", ' '"32_BIT_FLOAT", gp.cellSize, "1", "MINIMUM", ' '"MATCH")') # statement = ('arcpy.Mosaic_management(lccNormRaster, ' # 'mosaicRaster, "MINIMUM", "MATCH")') count = 0 while True: try: lu.write_log('Executing mosaic for link #'+str(linkId)) exec statement lu.write_log('Done with mosaic.') randomerror() except: count,tryAgain = lu.retry_arc_error(count,statement) lu.delete_data(mosaicRaster) lu.delete_dir(mosaicDir) # Try a new directory mosaicDir = path.join(cfg.LCCBASEDIR,'mos'+str(x+1)+ '_' + str(count)) lu.create_dir(mosaicDir) mosaicRaster = path.join(mosaicDir,mosFN) if not tryAgain: exec statement else: break endTime = time.clock() processTime = round((endTime - start_time), 2) if normalize == True: printText = "Normalized and mosaicked " else: printText = "Mosaicked NON-normalized " gprint(printText + "corridor for link ID #" + str(linkId) + " connecting core areas " + str(corex) + " and " + str(corey)+ " in " + str(processTime) + " seconds. " + str(int(linkCount)) + " out of " + str(int(numCorridorLinks)) + " links have been " "processed.") # temporarily disable links in linktable - don't want to mosaic # them twice for y in range (x+1,numLinks): corex1 = int(coreList[y,0]) corey1 = int(coreList[y,1]) if corex1 == corex and corey1 == corey: linkTable[y,cfg.LTB_LINKTYPE] = ( linkTable[y,cfg.LTB_LINKTYPE] + 1000) elif corex1==corey and corey1==corex: linkTable[y,cfg.LTB_LINKTYPE] = ( linkTable[y,cfg.LTB_LINKTYPE] + 1000) numGridsWritten = numGridsWritten + 1 if not SAVENORMLCCS: lu.delete_data(lccNormRaster) lu.delete_dir(clccdir) lu.create_dir(clccdir) else: if numGridsWritten == 100: # We only write up to 100 grids to any one folder # because otherwise Arc slows to a crawl dirCount = dirCount + 1 numGridsWritten = 0 clccdir = path.join(cfg.LCCBASEDIR, cfg.LCCNLCDIR_NM + str(dirCount)) gprint("Creating output folder: " + clccdir) gp.CreateFolder_management(cfg.LCCBASEDIR, path.basename(clccdir)) if numGridsWritten > 1 or dirCount > 0: lu.delete_data(lastMosaicRaster) lu.delete_dir(path.dirname(lastMosaicRaster)) lastMosaicRaster = mosaicRaster x = x + 1 #rows that were temporarily disabled rows = npy.where(linkTable[:,cfg.LTB_LINKTYPE]>1000) linkTable[rows,cfg.LTB_LINKTYPE] = ( linkTable[rows,cfg.LTB_LINKTYPE] - 1000) # --------------------------------------------------------------------- # Create output geodatabase if not gp.exists(outputGDB): gp.createfilegdb(cfg.OUTPUTDIR, path.basename(outputGDB)) if cfg.useArcpy: arcpy.env.workspace = outputGDB else: gp.workspace = outputGDB gp.pyramid = "NONE" gp.rasterstatistics = "NONE" # Copy mosaic raster to output geodatabase saveFloatRaster = False if saveFloatRaster == True: floatRaster = outputGDB + '\\' + PREFIX + mosaicBaseName + '_flt' # Full path statement = 'arcObj.CopyRaster_management(mosaicRaster, floatRaster)' try: exec statement except: pass # --------------------------------------------------------------------- # convert mosaic raster to integer intRaster = path.join(outputGDB,PREFIX + mosaicBaseName) if cfg.useArcpy: statement = ('outras = Int(Raster(mosaicRaster) - offset + 0.5); ' 'outras.save(intRaster)') else: expression = "int(" + mosaicRaster + " - " + str(offset) + " + 0.5)" statement = 'gp.SingleOutputMapAlgebra_sa(expression, intRaster)' count = 0 while True: try: exec statement randomerror() except: count,tryAgain = lu.retry_arc_error(count,statement) if not tryAgain: exec statement else: break # --------------------------------------------------------------------- if writeTruncRaster: # ----------------------------------------------------------------- # Set anything beyond cfg.CWDTHRESH to NODATA. if arcpyAvailable: cfg.useArcpy = True # For Alissa Pump's error with 10.1 cutoffText = str(cfg.CWDTHRESH) if cutoffText[-6:] == '000000': cutoffText = cutoffText[0:-6]+'m' elif cutoffText[-3:] == '000': cutoffText = cutoffText[0:-3]+'k' truncRaster = (outputGDB + '\\' + PREFIX + mosaicBaseName + '_truncated_at_' + cutoffText) count = 0 if cfg.useArcpy: statement = ('outRas = Raster(intRaster) * ' '(Con(Raster(intRaster) <= cfg.CWDTHRESH,1)); ' 'outRas.save(truncRaster)') else: expression = ("(" + intRaster + " * (con(" + intRaster + "<= " + str(cfg.CWDTHRESH) + ",1)))") statement = ('gp.SingleOutputMapAlgebra_sa(expression, ' 'truncRaster)') count = 0 while True: try: exec statement randomerror() except: count,tryAgain = lu.retry_arc_error(count,statement) if not tryAgain: exec statement else: break cfg.useArcpy = False # End fix for Alissa Pump's error with 10.1 # --------------------------------------------------------------------- # Check for unreasonably low minimum NLCC values try: mosaicGrid = path.join(cfg.LCCBASEDIR,'mos') # Copy to grid to test arcObj.CopyRaster_management(mosaicRaster, mosaicGrid) minObject = gp.GetRasterProperties(mosaicGrid, "MINIMUM") rasterMin = float(str(minObject.getoutput(0))) except: gp.AddWarning('\n------------------------------------------------') gp.AddWarning('WARNING: Raster minimum check failed in step 5. \n' 'This may mean the output rasters are corrupted. Please \n' 'be sure to check for valid rasters in '+ outputGDB) rasterMin = 0 tolerance = (float(gp.cellSize) * -10) if rasterMin < tolerance: lu.dashline(1) msg = ('WARNING: Minimum value of mosaicked corridor map is ' 'much less than zero ('+str(rasterMin)+').' '\nThis could mean that BOUNDING CIRCLE BUFFER DISTANCES ' 'were too small and a corridor passed outside of a ' 'bounding circle, or that a corridor passed outside of the ' 'resistance map. \n') gp.AddWarning(msg) gprint('\nWriting final LCP maps...') if cfg.STEP4: finalLinkTable = lu.update_lcp_shapefile(linkTable, lastStep=4, thisStep=5) elif cfg.STEP3: finalLinkTable = lu.update_lcp_shapefile(linkTable, lastStep=3, thisStep=5) else: # Don't know if step 4 was run, since this is started at step 5. # Use presence of previous linktable files to figure this out. # Linktable name includes step number. prevLinkTableFile = lu.get_prev_step_link_table(step=5) prevStepInd = len(prevLinkTableFile) - 5 lastStep = prevLinkTableFile[prevStepInd] finalLinkTable = lu.update_lcp_shapefile(linkTable, lastStep, thisStep=5) outlinkTableFile = lu.get_this_step_link_table(step=5) gprint('Updating ' + outlinkTableFile) lu.write_link_table(linkTable, outlinkTableFile) linkTableLogFile = path.join(cfg.LOGDIR, "linkTable_s5.csv") lu.write_link_table(linkTable, linkTableLogFile) linkTableFinalFile = path.join(cfg.OUTPUTDIR, PREFIX + "_linkTable_s5.csv") lu.write_link_table(finalLinkTable, linkTableFinalFile) gprint('Copy of final linkTable written to '+ linkTableFinalFile) gprint('Creating shapefiles with linework for links.') try: lu.write_link_maps(outlinkTableFile, step=5) except: lu.write_link_maps(outlinkTableFile, step=5) # Create final linkmap files in output directory, and remove files from # scratch. lu.copy_final_link_maps(step=5) if not SAVENORMLCCS: lu.delete_dir(cfg.LCCBASEDIR) # Build statistics for corridor rasters gp.addmessage('\nBuilding output statistics and pyramids ' 'for corridor raster') lu.build_stats(intRaster) if writeTruncRaster: gp.addmessage('Building output statistics ' 'for truncated corridor raster') lu.build_stats(truncRaster) # Return GEOPROCESSING specific errors except arcgisscripting.ExecuteError: lu.dashline(1) gprint('****Failed in step 5. Details follow.****') lu.exit_with_geoproc_error(_SCRIPT_NAME) # Return any PYTHON or system specific errors except: lu.dashline(1) gprint('****Failed in step 5. Details follow.****') lu.exit_with_python_error(_SCRIPT_NAME) return
def STEP8_calc_pinchpoints(): """ Maps pinch points in Linkage Mapper corridors using Circuitscape given CWD calculations from s3_calcCwds.py. """ try: lu.dashline(0) gprint('Running script ' + _SCRIPT_NAME) restartFlag = False if cfg.CWDCUTOFF < 0: cfg.CWDCUTOFF = cfg.CWDCUTOFF * -1 restartFlag = True # Restart code in progress CSPATH = lu.get_cs_path() outputGDB = path.join(cfg.OUTPUTDIR, path.basename(cfg.PINCHGDB)) arcpy.OverWriteOutput = True arcpy.env.workspace = cfg.SCRATCHDIR arcpy.env.scratchWorkspace = cfg.ARCSCRATCHDIR arcpy.env.pyramid = "NONE" arcpy.env.rasterstatistics = "NONE" # set the analysis extent and cell size to that of the resistance # surface arcpy.env.extent = cfg.RESRAST arcpy.env.cellSize = cfg.RESRAST arcpy.snapraster = cfg.RESRAST resRaster = cfg.RESRAST arcpy.env.extent = "MINOF" minObject = arcpy.GetRasterProperties_management(resRaster, "MINIMUM") rasterMin = float(str(minObject.getOutput(0))) if rasterMin <= 0: msg = ( 'Error: resistance raster cannot have 0 or negative values.') lu.raise_error(msg) if cfg.DO_ADJACENTPAIRS: prevLcpShapefile = lu.get_lcp_shapefile(None, thisStep=8) if not arcpy.Exists(prevLcpShapefile): msg = ('Cannot find an LCP shapefile from step 5. Please ' 'rerun that step and any previous ones if necessary.') lu.raise_error(msg) # Remove lcp shapefile lcpShapefile = path.join(cfg.DATAPASSDIR, "lcpLines_s8.shp") lu.delete_data(lcpShapefile) inLinkTableFile = lu.get_prev_step_link_table(step=8) linkTable = lu.load_link_table(inLinkTableFile) numLinks = linkTable.shape[0] numCorridorLinks = lu.report_links(linkTable) if numCorridorLinks == 0: lu.dashline(1) msg = ('\nThere are no linkages. Bailing.') lu.raise_error(msg) if linkTable.shape[1] < 16: # If linktable has no entries from prior # centrality or pinchpint analyses extraCols = npy.zeros((numLinks, 6), dtype="float64") linkTable = linkTable[:, 0:10] linkTable = npy.append(linkTable, extraCols, axis=1) linkTable[:, cfg.LTB_LCPLEN] = -1 linkTable[:, cfg.LTB_CWDEUCR] = -1 linkTable[:, cfg.LTB_CWDPATHR] = -1 linkTable[:, cfg.LTB_EFFRESIST] = -1 linkTable[:, cfg.LTB_CWDTORR] = -1 linkTable[:, cfg.LTB_CURRENT] = -1 del extraCols # set up directories for circuit and circuit mosaic grids # Create output geodatabase if not arcpy.Exists(cfg.PINCHGDB): arcpy.CreateFileGDB_management(cfg.OUTPUTDIR, path.basename(cfg.PINCHGDB)) mosaicRaster = path.join(cfg.CIRCUITBASEDIR, "current_mos" + tif) coresToProcess = npy.unique(linkTable[:, cfg.LTB_CORE1:cfg.LTB_CORE2 + 1]) maxCoreNum = max(coresToProcess) del coresToProcess lu.dashline(0) coreList = linkTable[:, cfg.LTB_CORE1:cfg.LTB_CORE2 + 1] coreList = npy.sort(coreList) #gprint('There are ' + str(len(npy.unique(coreList))) ' core areas.') INCIRCUITDIR = cfg.CIRCUITBASEDIR OUTCIRCUITDIR = path.join(cfg.CIRCUITBASEDIR, cfg.CIRCUITOUTPUTDIR_NM) CONFIGDIR = path.join(INCIRCUITDIR, cfg.CIRCUITCONFIGDIR_NM) # Cutoff value text to append to filenames cutoffText = str(cfg.CWDCUTOFF) if cutoffText[-6:] == '000000': cutoffText = cutoffText[0:-6] + 'm' elif cutoffText[-3:] == '000': cutoffText = cutoffText[0:-3] + 'k' if cfg.SQUARERESISTANCES: # Square resistance values squaredRaster = path.join(cfg.SCRATCHDIR, 'res_sqr') arcpy.env.workspace = cfg.SCRATCHDIR arcpy.env.scratchWorkspace = cfg.ARCSCRATCHDIR outRas = Raster(resRaster) * Raster(resRaster) outRas.save(squaredRaster) resRaster = squaredRaster if cfg.DO_ADJACENTPAIRS: linkLoop = 0 lu.dashline(1) gprint('Mapping pinch points in individual corridors \n' 'using Circuitscape.') lu.dashline(1) gprint('If you try to cancel your run and the Arc dialog hangs, ') gprint('you can kill Circuitscape by opening Windows Task Manager') gprint('and ending the cs_run.exe process.') lu.dashline(2) for x in range(0, numLinks): linkId = str(int(linkTable[x, cfg.LTB_LINKID])) if not (linkTable[x, cfg.LTB_LINKTYPE] > 0): continue linkLoop = linkLoop + 1 linkDir = path.join(cfg.SCRATCHDIR, 'link' + linkId) if restartFlag == True and path.exists(linkDir): gprint('continuing') continue restartFlag = False lu.create_dir(linkDir) start_time1 = time.clock() # source and target cores corex = int(coreList[x, 0]) corey = int(coreList[x, 1]) # Get cwd rasters for source and target cores cwdRaster1 = lu.get_cwd_path(corex) cwdRaster2 = lu.get_cwd_path(corey) lccNormRaster = path.join(linkDir, 'lcc_norm') arcpy.env.extent = "MINOF" link = lu.get_links_from_core_pairs(linkTable, corex, corey) lcDist = float(linkTable[link, cfg.LTB_CWDIST]) # Normalized lcc rasters are created by adding cwd rasters # and subtracting the least cost distance between them. outRas = Raster(cwdRaster1) + Raster(cwdRaster2) - lcDist outRas.save(lccNormRaster) #create raster mask resMaskRaster = path.join(linkDir, 'res_mask' + tif) #create raster mask outCon = arcpy.sa.Con( Raster(lccNormRaster) <= cfg.CWDCUTOFF, 1) outCon.save(resMaskRaster) # Convert to poly. Use as mask to clip resistance raster. resMaskPoly = path.join(linkDir, 'res_mask_poly.shp') arcpy.RasterToPolygon_conversion(resMaskRaster, resMaskPoly, "NO_SIMPLIFY") arcpy.env.extent = resMaskPoly # Includes 0 values in some cases with CP LI model if tif # so using ESRI Grid format resClipRasterMasked = path.join(linkDir, 'res_clip_m') # Extract masked resistance raster. # Needs to be float to get export to npy to work. outRas = arcpy.sa.ExtractByMask(resRaster, resMaskPoly) + 0.0 outRas.save(resClipRasterMasked) resNpyFN = 'resistances_link_' + linkId + '.npy' resNpyFile = path.join(INCIRCUITDIR, resNpyFN) numElements, numResistanceNodes = export_ras_to_npy( resClipRasterMasked, resNpyFile) totMem, availMem = lu.get_mem() # gprint('Total memory: str(totMem)) if numResistanceNodes / availMem > 2000000: lu.dashline(1) lu.warn('Warning:') lu.warn('Circuitscape can only solve 2-3 million nodes') lu.warn( 'per gigabyte of available RAM. \nTotal physical RAM' ' on your machine is ~' + str(totMem) + ' GB. \nAvailable memory is ~' + str(availMem) + ' GB. \nYour resistance raster has ' + str(numResistanceNodes) + ' nodes.') lu.dashline(2) corePairRaster = path.join(linkDir, 'core_pairs' + tif) arcpy.env.extent = resClipRasterMasked # Next result needs to be floating pt for numpy export outCon = arcpy.sa.Con( Raster(cwdRaster1) == 0, corex, arcpy.sa.Con(Raster(cwdRaster2) == 0, corey + 0.0)) outCon.save(corePairRaster) coreNpyFN = 'cores_link_' + linkId + '.npy' coreNpyFile = path.join(INCIRCUITDIR, coreNpyFN) numElements, numNodes = export_ras_to_npy( corePairRaster, coreNpyFile) arcpy.env.extent = "MINOF" # Set circuitscape options and call options = lu.setCircuitscapeOptions() if cfg.WRITE_VOLT_MAPS == True: options['write_volt_maps'] = True options['habitat_file'] = resNpyFile # if int(linkId) > 2: # options['habitat_file'] = 'c:\\test.dummy' options['point_file'] = coreNpyFile options['set_focal_node_currents_to_zero'] = True outputFN = 'Circuitscape_link' + linkId + '.out' options['output_file'] = path.join(OUTCIRCUITDIR, outputFN) if numElements > 250000: options['print_timings'] = True configFN = 'pinchpoint_config' + linkId + '.ini' outConfigFile = path.join(CONFIGDIR, configFN) lu.writeCircuitscapeConfigFile(outConfigFile, options) gprint('Processing link ID #' + str(linkId) + '. Resistance map' ' has ' + str(int(numResistanceNodes)) + ' nodes.') memFlag = call_circuitscape(CSPATH, outConfigFile) currentFN = ('Circuitscape_link' + linkId + '_cum_curmap.npy') currentMap = path.join(OUTCIRCUITDIR, currentFN) if not arcpy.Exists(currentMap): print_failure(numResistanceNodes, memFlag, 10) numElements, numNodes = export_ras_to_npy( resClipRasterMasked, resNpyFile) memFlag = call_circuitscape(CSPATH, outConfigFile) currentFN = ('Circuitscape_link' + linkId + '_cum_curmap.npy') currentMap = path.join(OUTCIRCUITDIR, currentFN) if not arcpy.Exists(currentMap): msg = ( '\nCircuitscape failed. See error information above.') arcpy.AddError(msg) lu.write_log(msg) exit(1) # Either set core areas to nodata in current map or # divide each by its radius currentRaster = path.join(linkDir, "current" + tif) import_npy_to_ras(currentMap, corePairRaster, currentRaster) if cfg.WRITE_VOLT_MAPS == True: voltFN = ('Circuitscape_link' + linkId + '_voltmap_' + str(corex) + '_' + str(corey) + '.npy') voltMap = path.join(OUTCIRCUITDIR, voltFN) voltRaster = path.join( outputGDB, cfg.PREFIX + "_voltMap_" + str(corex) + '_' + str(corey)) import_npy_to_ras(voltMap, corePairRaster, voltRaster) gprint('Building output statistics and pyramids ' 'for voltage raster\n') lu.build_stats(voltRaster) arcpy.env.extent = currentRaster if SETCORESTONULL: # Set core areas to NoData in current map for color ramping currentRaster2 = currentRaster + '2' + tif outCon = arcpy.sa.Con( arcpy.sa.IsNull(Raster(corePairRaster)), Raster(currentRaster)) outCon.save(currentRaster2) currentRaster = currentRaster2 arcpy.env.extent = "MAXOF" if linkLoop == 1: lu.delete_data(mosaicRaster) @retry(10) def copyRas2(): arcpy.CopyRaster_management(currentRaster, mosaicRaster) copyRas2() else: @retry(10) def mosaicRas(): arcpy.Mosaic_management(currentRaster, mosaicRaster, "MAXIMUM", "MATCH") mosaicRas() resistancesFN = ('Circuitscape_link' + linkId + '_resistances_3columns.out') resistancesFile = path.join(OUTCIRCUITDIR, resistancesFN) resistances = npy.loadtxt(resistancesFile, dtype='Float64', comments='#') resistance = float(str(arcpy.env.cellSize)) * resistances[2] linkTable[link, cfg.LTB_EFFRESIST] = resistance # Ratio if not cfg.SQUARERESISTANCES: linkTable[link, cfg.LTB_CWDTORR] = ( linkTable[link, cfg.LTB_CWDIST] / linkTable[link, cfg.LTB_EFFRESIST]) # Clean up if cfg.SAVE_TEMP_CIRCUIT_FILES == False: lu.delete_file(coreNpyFile) coreNpyBase, extension = path.splitext(coreNpyFile) lu.delete_data(coreNpyBase + '.hdr') lu.delete_file(resNpyFile) resNpyBase, extension = path.splitext(resNpyFile) lu.delete_data(resNpyBase + '.hdr') lu.delete_file(currentMap) curMapBase, extension = path.splitext(currentMap) lu.delete_data(curMapBase + '.hdr') lu.delete_data(currentRaster) lu.clean_out_workspace(linkDir) lu.delete_dir(linkDir) gprint('Finished with link ID #' + str(linkId) + '. ' + str(linkLoop) + ' out of ' + str(numCorridorLinks) + ' links have been processed.') start_time1 = lu.elapsed_time(start_time1) outputRaster = path.join( outputGDB, cfg.PREFIX + "_current_adjacentPairs_" + cutoffText) lu.delete_data(outputRaster) @retry(10) def copyRas(): arcpy.CopyRaster_management(mosaicRaster, outputRaster) copyRas() gprint('Building output statistics and pyramids ' 'for corridor pinch point raster\n') lu.build_stats(outputRaster) finalLinkTable = lu.update_lcp_shapefile(linkTable, lastStep=5, thisStep=8) linkTableFile = path.join(cfg.DATAPASSDIR, "linkTable_s5_plus.csv") lu.write_link_table(finalLinkTable, linkTableFile, inLinkTableFile) linkTableFinalFile = path.join( cfg.OUTPUTDIR, cfg.PREFIX + "_linkTable_s5_plus.csv") lu.write_link_table(finalLinkTable, linkTableFinalFile, inLinkTableFile) gprint('Copy of linkTable written to ' + linkTableFinalFile) #fixme: update sticks? gprint('Creating shapefiles with linework for links.') lu.write_link_maps(linkTableFinalFile, step=8) # Copy final link maps to gdb. lu.copy_final_link_maps(step=8) lu.delete_data(mosaicRaster) if not cfg.DO_ALLPAIRS: # Clean up temporary files if not cfg.SAVECURRENTMAPS: lu.delete_dir(OUTCIRCUITDIR) return lu.dashline(1) gprint('Mapping global pinch points among all\n' 'core area pairs using Circuitscape.') if cfg.ALL_PAIR_SCENARIO == 'pairwise': gprint('Circuitscape will be run in PAIRWISE mode.') else: gprint('Circuitscape will be run in ALL-TO-ONE mode.') arcpy.env.workspace = cfg.SCRATCHDIR arcpy.env.scratchWorkspace = cfg.ARCSCRATCHDIR arcpy.env.extent = cfg.RESRAST arcpy.env.cellSize = cfg.RESRAST S8CORE_RAS = "s8core_ras" s8CoreRasPath = path.join(cfg.SCRATCHDIR, S8CORE_RAS) arcpy.FeatureToRaster_conversion(cfg.COREFC, cfg.COREFN, s8CoreRasPath, arcpy.env.cellSize) binaryCoreRaster = path.join(cfg.SCRATCHDIR, "core_ras_bin") # The following commands cause file lock problems on save. using gp # instead. # outCon = arcpy.sa.Con(S8CORE_RAS, 1, "#", "VALUE > 0") # outCon.save(binaryCoreRaster) # gp.Con_sa(s8CoreRasPath, 1, binaryCoreRaster, "#", "VALUE > 0") outCon = arcpy.sa.Con(Raster(s8CoreRasPath) > 0, 1) outCon.save(binaryCoreRaster) s5corridorRas = path.join(cfg.OUTPUTGDB, cfg.PREFIX + "_corridors") if not arcpy.Exists(s5corridorRas): s5corridorRas = path.join(cfg.OUTPUTGDB, cfg.PREFIX + "_lcc_mosaic_int") outCon = arcpy.sa.Con( Raster(s5corridorRas) <= cfg.CWDCUTOFF, Raster(resRaster), arcpy.sa.Con(Raster(binaryCoreRaster) > 0, Raster(resRaster))) resRasClipPath = path.join(cfg.SCRATCHDIR, 'res_ras_clip') outCon.save(resRasClipPath) arcpy.env.cellSize = resRasClipPath arcpy.env.extent = resRasClipPath s8CoreRasClipped = s8CoreRasPath + '_c' # Produce core raster with same extent as clipped resistance raster # added to ensure correct data type- nodata values were positive for # cores otherwise outCon = arcpy.sa.Con(arcpy.sa.IsNull(Raster(s8CoreRasPath)), -9999, Raster(s8CoreRasPath)) outCon.save(s8CoreRasClipped) resNpyFN = 'resistances.npy' resNpyFile = path.join(INCIRCUITDIR, resNpyFN) numElements, numResistanceNodes = export_ras_to_npy( resRasClipPath, resNpyFile) totMem, availMem = lu.get_mem() # gprint('Total memory: str(totMem)) if numResistanceNodes / availMem > 2000000: lu.dashline(1) lu.warn('Warning:') lu.warn('Circuitscape can only solve 2-3 million nodes') lu.warn('per gigabyte of available RAM. \nTotal physical RAM ' 'on your machine is ~' + str(totMem) + ' GB. \nAvailable memory is ~' + str(availMem) + ' GB. \nYour resistance raster has ' + str(numResistanceNodes) + ' nodes.') lu.dashline(0) coreNpyFN = 'cores.npy' coreNpyFile = path.join(INCIRCUITDIR, coreNpyFN) numElements, numNodes = export_ras_to_npy(s8CoreRasClipped, coreNpyFile) arcpy.env.extent = "MINOF" options = lu.setCircuitscapeOptions() options['scenario'] = cfg.ALL_PAIR_SCENARIO options['habitat_file'] = resNpyFile options['point_file'] = coreNpyFile options['set_focal_node_currents_to_zero'] = True outputFN = 'Circuitscape.out' options['output_file'] = path.join(OUTCIRCUITDIR, outputFN) options['print_timings'] = True configFN = 'pinchpoint_allpair_config.ini' outConfigFile = path.join(CONFIGDIR, configFN) lu.writeCircuitscapeConfigFile(outConfigFile, options) gprint('\nResistance map has ' + str(int(numResistanceNodes)) + ' nodes.') lu.dashline(1) gprint('If you try to cancel your run and the Arc dialog hangs, ') gprint('you can kill Circuitscape by opening Windows Task Manager') gprint('and ending the cs_run.exe process.') lu.dashline(0) call_circuitscape(CSPATH, outConfigFile) # test = subprocess.call([CSPATH, outConfigFile], # creationflags = subprocess.CREATE_NEW_CONSOLE) if options['scenario'] == 'pairwise': rasterSuffix = "_current_allPairs_" + cutoffText else: rasterSuffix = "_current_allToOne_" + cutoffText currentFN = 'Circuitscape_cum_curmap.npy' currentMap = path.join(OUTCIRCUITDIR, currentFN) outputRaster = path.join(outputGDB, cfg.PREFIX + rasterSuffix) currentRaster = path.join(cfg.SCRATCHDIR, "current") try: import_npy_to_ras(currentMap, resRasClipPath, outputRaster) except: lu.dashline(1) msg = ('ERROR: Circuitscape failed. \n' 'Note: Circuitscape can only solve 2-3 million nodes' '\nper gigabyte of available RAM. The resistance ' '\nraster for the last corridor had ' + str(numResistanceNodes) + ' nodes.\n\nResistance ' 'raster values that vary by >6 orders of \nmagnitude' ' can also cause failures, as can a mismatch in ' '\ncore area and resistance raster extents.') arcpy.AddError(msg) lu.write_log(msg) exit(1) #set core areas to nodata if SETCORESTONULL: # Set core areas to NoData in current map for color ramping outputRasterND = outputRaster + '_noDataCores' outCon = arcpy.sa.SetNull( Raster(s8CoreRasClipped) > 0, Raster(outputRaster)) outCon.save(outputRasterND) gprint('\nBuilding output statistics and pyramids ' 'for centrality raster.') lu.build_stats(outputRaster) lu.build_stats(outputRasterND) # Clean up temporary files if not cfg.SAVECURRENTMAPS: lu.delete_dir(OUTCIRCUITDIR) # Return GEOPROCESSING specific errors except arcpy.ExecuteError: lu.dashline(1) gprint('****Failed in step 8. Details follow.****') lu.exit_with_geoproc_error(_SCRIPT_NAME) # Return any PYTHON or system specific errors except: lu.dashline(1) gprint('****Failed in step 8. Details follow.****') lu.exit_with_python_error(_SCRIPT_NAME)
def STEP3_calc_cwds(): """Calculates cost-weighted distances from each core area. Uses bounding circles around source and target cores to limit extent of cwd calculations and speed computation. """ try: lu.dashline(1) gprint('Running script ' + _SCRIPT_NAME) lu.dashline(0) # Super secret setting to re-start failed run. Enter 'RESTART' as the # Name of the pairwise distance table in step 2, and uncheck step 2. # We can eventually place this in a .ini file. rerun = False if cfg.S2EUCDISTFILE != None: if cfg.S2EUCDISTFILE.lower() == "restart": rerun = True # if cfg.TMAXCWDIST is None: # gprint('NOT using a maximum cost-weighted distance.') # else: # gprint('Max cost-weighted distance for CWD calcs set ' # 'to ' + str(cfg.TMAXCWDIST) + '\n') if (cfg.BUFFERDIST) is not None: gprint('Bounding circles plus a buffer of ' + str(float(cfg.BUFFERDIST)) + ' map units will ' 'be used \n to limit extent of cost distance ' 'calculations.') elif cfg.TOOL <> cfg.TOOL_CC: gprint('NOT using bounding circles in cost distance ' 'calculations.') # set the analysis extent and cell size # So we don't extract rasters that go beyond extent of original raster if arcpy: arcpy.env.cellSize = cfg.RESRAST arcpy.env.extent="MINOF" else: gp.cellSize = gp.Describe(cfg.RESRAST).MeanCellHeight gp.Extent = "MINOF" gp.mask = cfg.RESRAST if arcpy: arcpy.env.overwriteOutput = True arcpy.env.workspace = cfg.SCRATCHDIR arcpy.env.scratchWorkspace = cfg.ARCSCRATCHDIR else: gp.OverwriteOutput = True gp.workspace = cfg.SCRATCHDIR gp.scratchWorkspace = cfg.ARCSCRATCHDIR # Load linkTable (created in previous script) linkTableFile = lu.get_prev_step_link_table(step=3) linkTable = lu.load_link_table(linkTableFile) lu.report_links(linkTable) # Identify cores to map from LinkTable coresToMap = npy.unique(linkTable[:, cfg.LTB_CORE1:cfg.LTB_CORE2 + 1]) numCoresToMap = len(coresToMap) if numCoresToMap < 3: # No need to check for intermediate cores, because there aren't any cfg.S3DROPLCCSic = False else: cfg.S3DROPLCCSic = cfg.S3DROPLCCS gprint('\nNumber of core areas to connect: ' + str(numCoresToMap)) if rerun: # If picking up a failed run, make sure needed files are there lu.dashline(1) gprint ('\n****** RESTART MODE ENABLED ******\n') gprint ('**** NOTE: This mode picks up step 3 where a\n' 'previous run left off due to a crash or user\n' 'abort. It assumes you are using the same input\n' 'data used in the terminated run.****\n') lu.dashline(0) lu.snooze(10) savedLinkTableFile = path.join(cfg.DATAPASSDIR, "temp_linkTable_s3_partial.csv") coreListFile = path.join(cfg.DATAPASSDIR, "temp_cores_to_map.csv") if not path.exists(savedLinkTableFile) or not path.exists( coreListFile): gprint('No partial results file found from previous ' 'stopped run. Starting run from beginning.\n') lu.dashline(0) rerun = False # If picking up a failed run, use old folders if not rerun: startIndex = 0 if cfg.TOOL <> cfg.TOOL_CC: lu.make_cwd_paths(max(coresToMap)) # Set up cwd directories # make a feature layer for input cores to select from gp.MakeFeatureLayer(cfg.COREFC, cfg.FCORES) # Drop links that are too long gprint('\nChecking for corridors that are too long to map.') DISABLE_LEAST_COST_NO_VAL = False linkTable,numDroppedLinks = lu.drop_links(linkTable, cfg.MAXEUCDIST, 0, cfg.MAXCOSTDIST, 0, DISABLE_LEAST_COST_NO_VAL) # ------------------------------------------------------------------ # Bounding boxes if (cfg.BUFFERDIST) is not None: # create bounding boxes around cores start_time = time.clock() # lu.dashline(1) gprint('Calculating bounding boxes for core areas.') extentBoxList = npy.zeros((0,5), dtype='float32') for x in range(len(coresToMap)): core = coresToMap[x] boxCoords = lu.get_extent_box_coords(core) extentBoxList = npy.append(extentBoxList, boxCoords, axis=0) gprint('\nDone calculating bounding boxes.') start_time = lu.elapsed_time(start_time) # lu.dashline() # Bounding circle code if cfg.BUFFERDIST is not None: # Make a set of circles encompassing core areas we'll be connecting start_time = time.clock() gprint('Calculating bounding circles around potential' ' corridors.') # x y corex corey radius- stores data for bounding circle centroids boundingCirclePointArray = npy.zeros((0,5), dtype='float32') circleList = npy.zeros((0,3), dtype='int32') numLinks = linkTable.shape[0] for x in range(0, numLinks): if ((linkTable[x,cfg.LTB_LINKTYPE] == cfg.LT_CORR) or (linkTable[x,cfg.LTB_LINKTYPE] == cfg.LT_KEEP)): # if it's a valid corridor link linkId = int(linkTable[x,cfg.LTB_LINKID]) # fixme- this code is clumsy- can trim down cores = npy.zeros((1,3), dtype='int32') cores[0,:] = npy.sort([0, linkTable[x,cfg.LTB_CORE1], linkTable[x,cfg.LTB_CORE2]]) corex = cores[0,1] corey = cores[0,2] cores[0,0] = linkId ################### foundFlag = False for y in range(0,len(circleList)): # clumsy if (circleList[y,1] == corex and circleList[y,2] == corey): foundFlag = True if not foundFlag: circlePointData = ( lu.get_bounding_circle_data(extentBoxList, corex, corey, cfg.BUFFERDIST)) boundingCirclePointArray = ( npy.append(boundingCirclePointArray, circlePointData, axis=0)) # keep track of which cores we draw bounding circles # around circleList = npy.append(circleList, cores, axis=0) gprint('\nCreating bounding circles using buffer ' 'analysis.') dir, BNDCIRCENS = path.split(cfg.BNDCIRCENS) lu.make_points(cfg.SCRATCHDIR, boundingCirclePointArray, BNDCIRCENS) lu.delete_data(cfg.BNDCIRS) gp.buffer_analysis(cfg.BNDCIRCENS, cfg.BNDCIRS, "radius") gp.deletefield (cfg.BNDCIRS, "BUFF_DIST") gprint('Successfully created bounding circles around ' 'potential corridors using \na buffer of ' + str(float(cfg.BUFFERDIST)) + ' map units.') start_time = lu.elapsed_time(start_time) gprint('Reducing global processing area using bounding ' 'circle plus buffer of ' + str(float(cfg.BUFFERDIST)) + ' map units.\n') extentBoxList = npy.zeros((0,5),dtype='float32') boxCoords = lu.get_extent_box_coords() extentBoxList = npy.append(extentBoxList,boxCoords,axis=0) extentBoxList[0,0] = 0 boundingCirclePointArray = npy.zeros((0,5),dtype='float32') circlePointData=lu.get_bounding_circle_data(extentBoxList, 0, 0, cfg.BUFFERDIST) dir, BNDCIRCEN = path.split(cfg.BNDCIRCEN) lu.make_points(cfg.SCRATCHDIR, circlePointData, BNDCIRCEN) lu.delete_data(cfg.BNDCIR) gp.buffer_analysis(cfg.BNDCIRCEN, cfg.BNDCIR, "radius") gprint('Extracting raster....') cfg.BOUNDRESIS = cfg.BOUNDRESIS + tif lu.delete_data(cfg.BOUNDRESIS) count = 0 statement = ( 'gp.ExtractByMask_sa(cfg.RESRAST, cfg.BNDCIR, cfg.BOUNDRESIS)') while True: try: exec statement randomerror() except: count,tryAgain = lu.retry_arc_error(count,statement) if not tryAgain: exec statement else: break gprint('\nReduced resistance raster extracted using ' 'bounding circle.') else: #if not using bounding circles, just go with resistance raster. cfg.BOUNDRESIS = cfg.RESRAST # --------------------------------------------------------------------- # Rasterize core areas to speed cost distance calcs # lu.dashline(1) gprint("Creating core area raster.") gp.SelectLayerByAttribute(cfg.FCORES, "CLEAR_SELECTION") if arcpy: arcpy.env.cellSize = cfg.BOUNDRESIS arcpy.env.extent = cfg.BOUNDRESIS else: gp.cellSize = gp.Describe(cfg.BOUNDRESIS).MeanCellHeight gp.extent = gp.Describe(cfg.BOUNDRESIS).extent if rerun: # saved linktable replaces the one now in memory linkTable = lu.load_link_table(savedLinkTableFile) coresToMapSaved = npy.loadtxt(coreListFile, dtype='Float64', comments='#', delimiter=',') startIndex = coresToMapSaved[0] # Index of core where we left off del coresToMapSaved gprint ('\n****** Re-starting run at core area number ' + str(int(coresToMap[startIndex]))+ ' ******\n') lu.dashline(0) if arcpy: arcpy.env.extent = "MINOF" else: gp.extent = "MINOF" #---------------------------------------------------------------------- # Loop through cores, do cwd calcs for each if cfg.TOOL == cfg.TOOL_CC: gprint("\nMapping least-cost paths.\n") else: gprint("\nStarting cost distance calculations.\n") lcpLoop = 0 failures = 0 x = startIndex endIndex = len(coresToMap) linkTableMod = linkTable.copy() while x < endIndex: startTime1 = time.clock() # Modification of linkTable in function was causing problems. so # make a copy: linkTablePassed = linkTableMod.copy() (linkTableReturned, failures, lcpLoop) = do_cwd_calcs(x, linkTablePassed, coresToMap, lcpLoop, failures) if failures == 0: # If iteration was successful, continue with next core linkTableMod = linkTableReturned sourceCore = int(coresToMap[x]) gprint('Done with all calculations for core ID #' + str(sourceCore) + '. ' + str(int(x + 1)) + ' of ' + str(endIndex) + ' cores have been processed.') start_time = lu.elapsed_time(startTime1) outlinkTableFile = path.join(cfg.DATAPASSDIR, "temp_linkTable_s3_partial.csv") lu.write_link_table(linkTableMod, outlinkTableFile) # Increment loop counter x = x + 1 else: # If iteration failed, try again after a wait period delay_restart(failures) #---------------------------------------------------------------------- linkTable = linkTableMod # reinstate temporarily disabled links rows = npy.where(linkTable[:,cfg.LTB_LINKTYPE] > 1000) linkTable[rows,cfg.LTB_LINKTYPE] = (linkTable[rows,cfg.LTB_LINKTYPE] - 1000) # Drop links that are too long DISABLE_LEAST_COST_NO_VAL = True linkTable,numDroppedLinks = lu.drop_links(linkTable, cfg.MAXEUCDIST, cfg.MINEUCDIST, cfg.MAXCOSTDIST, cfg.MINCOSTDIST, DISABLE_LEAST_COST_NO_VAL) # Write link table file outlinkTableFile = lu.get_this_step_link_table(step=3) gprint('Updating ' + outlinkTableFile) lu.write_link_table(linkTable, outlinkTableFile) linkTableLogFile = path.join(cfg.LOGDIR, "linkTable_s3.csv") lu.write_link_table(linkTable, linkTableLogFile) start_time = time.clock() gprint('Creating shapefiles with linework for links...') try: lu.write_link_maps(outlinkTableFile, step=3) except: lu.write_link_maps(outlinkTableFile, step=3) start_time = lu.elapsed_time(start_time) gprint('\nIndividual cost-weighted distance layers written ' 'to "cwd" directory. \n') gprint(outlinkTableFile + '\n updated with cost-weighted distances between core areas.') #Clean up temporary files for restart code tempFile = path.join(cfg.DATAPASSDIR, "temp_cores_to_map.csv") lu.delete_file(tempFile) tempFile = path.join(cfg.DATAPASSDIR, "temp_linkTable_s3_partial.csv") lu.delete_file(tempFile) # Check if climate tool is calling linkage mapper if cfg.TOOL == cfg.TOOL_CC: coreList = npy.unique(linkTable[:, cfg.LTB_CORE1:cfg.LTB_CORE2 + 1]) for core in coreList: cwdRaster = lu.get_cwd_path(int(core)) back_rast = cwdRaster.replace("cwd_", "back_") lu.delete_data(back_rast) # Return GEOPROCESSING specific errors except arcgisscripting.ExecuteError: lu.dashline(1) gprint('****Failed in step 3. Details follow.****') lu.exit_with_geoproc_error(_SCRIPT_NAME) # Return any PYTHON or system specific errors except: lu.dashline(1) gprint('****Failed in step 3. Details follow.****') lu.exit_with_python_error(_SCRIPT_NAME) return
def STEP4_refine_network(): """Allows user to only connect each core area to its N nearest neighbors, then connect any disjunct clusters ('constellations') of core areas to their nearest neighboring cluster """ try: lu.dashline(1) gprint('Running script ' + _SCRIPT_NAME) cfg.gp.Workspace = cfg.OUTPUTDIR linkTableFile = lu.get_prev_step_link_table(step=4) linkTable = lu.load_link_table(linkTableFile) numLinks = linkTable.shape[0] lu.report_links(linkTable) if not cfg.STEP3: # re-check for links that are too long in case script run out of # sequence with more stringent settings gprint('Double-checking for corridors that are too long' ' or too short to map.') DISABLE_LEAST_COST_NO_VAL = True linkTable,numDroppedLinks = lu.drop_links( linkTable, cfg.MAXEUCDIST, 0, cfg.MAXCOSTDIST, 0, DISABLE_LEAST_COST_NO_VAL) rows, cols = npy.where( linkTable[:,cfg.LTB_LINKTYPE:cfg.LTB_LINKTYPE + 1] > 0) # == cfg.LT_CORR # or # linkTable[:,cfg.LTB_LINKTYPE:cfg.LTB_LINKTYPE + 1] == cfg.LT_KEEP) corridorLinks = linkTable[rows,:] coresToProcess = npy.unique( corridorLinks[:, cfg.LTB_CORE1:cfg.LTB_CORE2 + 1]) if cfg.S4DISTTYPE_EU: distCol = cfg.LTB_EUCDIST else: distCol = cfg.LTB_CWDIST # Flag links that do not connect any core areas to their nearest # N neighbors. (N = cfg.S4MAXNN) lu.dashline(1) gprint('Connecting each core area to its nearest ' + str(cfg.S4MAXNN) + ' nearest neighbors.') # Code written assuming NO duplicate core pairs for core in coresToProcess: rows,cols = npy.where( corridorLinks[:,cfg.LTB_CORE1:cfg.LTB_CORE2+1] == core) distsFromCore = corridorLinks[rows,:] # Sort by distance from target core ind = npy.argsort(distsFromCore[:,distCol]) distsFromCore = distsFromCore[ind] # Set N nearest neighbor connections to Nearest Neighbor (NNCT) maxRange = min(len(rows), cfg.S4MAXNN) for link in range (0, maxRange): linkId = distsFromCore[link, cfg.LTB_LINKID] # assumes linktable sequentially numbered with no gaps linkTable[linkId - 1, cfg.LTB_LINKTYPE] = cfg.LT_NNCT # Connect constellations (aka compoments or clusters) # Fixme: needs testing. Move to function. if cfg.S4CONNECT: lu.dashline(1) gprint('Connecting constellations') # linkTableComp has 4 extra cols to track COMPONENTS numLinks = linkTable.shape[0] # g1' g2' THEN c1 c2 compCols = npy.zeros((numLinks, 4), dtype="int32") linkTableComp = npy.append(linkTable, compCols, axis=1) del compCols # renumber cores to save memory for this next step. Place in # columns 10 and 11 for coreInd in range(0, len(coresToProcess)): # here, cols are 0 for cfg.LTB_CORE1 and 1 for cfg.LTB_CORE2 rows, cols = npy.where( linkTableComp[:,cfg.LTB_CORE1:cfg.LTB_CORE2+1] == coresToProcess[coreInd]) # want results in cols 10 and 11- These are NEW core numbers # (0 - numcores) linkTableComp[rows, cols + 10] = coreInd rows, cols = npy.where( linkTableComp[:, cfg.LTB_LINKTYPE:cfg.LTB_LINKTYPE + 1] == cfg.LT_NNCT) # The new, improved corridorLinks- only NN links corridorLinksComp = linkTableComp[rows, :] # These are NEW core numbers (range from 0 to numcores) coresToProcess = npy.unique(linkTableComp[:, 10:12]) #Create graph describing connected cores. Graph = npy.zeros((len(coresToProcess), len(coresToProcess)), dtype="int32") rows = corridorLinksComp[:,10].astype('int32') cols = corridorLinksComp[:,11].astype('int32') vals = npy.where(corridorLinksComp[:,cfg.LTB_LINKTYPE] == cfg.LT_NNCT, cfg.LT_CORR, 0) Graph[rows,cols] = vals Graph = Graph + Graph.T # Use graph to identify components (disconnected sub-groups) in # core area network components = lu.components_no_sparse(Graph) for coreInd in range(0,len(coresToProcess)): # In resulting cols, cols are 0 for LTB_CORE1 and 1 for # LTB_CORE2 rows, cols = (npy.where(linkTableComp[:,10:12] == coresToProcess[coreInd])) # want results in cols 12 and 13 Note: we've replaced new core # numbers with COMPONENT numbers. linkTableComp[rows,cols+12] = components[coreInd] # Additional column indexes for linkTableComp component1Col = 12 component2Col = 13 linkTableComp[:,cfg.LTB_CLUST1] = linkTableComp[:,component1Col] linkTableComp[:,cfg.LTB_CLUST2] = linkTableComp[:,component2Col] # Sort by distance ind = npy.argsort(linkTableComp[:,distCol]) linkTableComp = linkTableComp[ind] # Connect constellations via shortest inter-constellation links, # until all constellations connected. for row in range(0,numLinks): if ((linkTableComp[row,distCol] > 0) and ((linkTableComp[row,cfg.LTB_LINKTYPE] == cfg.LT_CORR) or (linkTableComp[row,cfg.LTB_LINKTYPE] == cfg.LT_KEEP)) and (linkTableComp[row,component1Col] != linkTableComp[row,component2Col])): # Make this an inter-component link linkTableComp[row,cfg.LTB_LINKTYPE] = cfg.LT_CLU newComp = min(linkTableComp [row,component1Col:component2Col + 1]) oldComp = max(linkTableComp [row,component1Col:component2Col + 1]) # cols are 0 and 1 rows, cols = npy.where(linkTableComp [:,component1Col:component2Col + 1] == oldComp) # want results in cols 12 and 13 linkTableComp[rows,cols + 12] = newComp # Remove extra columns from link table linkTable = lu.delete_col(linkTableComp,[10, 11, 12, 13]) # Re-sort link table by link ID ind = npy.argsort(linkTable[:,cfg.LTB_LINKID]) linkTable = linkTable[ind] # At end, any non-constellation links that are not NN's get dropped # (too long to be in cfg.S4MAXNN, not a component link) rows = npy.where(linkTable[:,cfg.LTB_LINKTYPE] == cfg.LT_CORR) linkTable[rows,cfg.LTB_LINKTYPE] = cfg.LT_CPLK # set NNCT links to NN corridor links (NNC), get rid # of extra columns, re-sort linktable rows = npy.where(linkTable[:,cfg.LTB_LINKTYPE] == cfg.LT_NNCT) linkTable[rows,cfg.LTB_LINKTYPE] = cfg.LT_NNC # Write linkTable to disk outlinkTableFile = lu.get_this_step_link_table(step=4) # lu.dashline(1) gprint('\nWriting ' + outlinkTableFile) lu.write_link_table(linkTable, outlinkTableFile) linkTableLogFile = path.join(cfg.LOGDIR, "linkTable_s4.csv") lu.write_link_table(linkTable, linkTableLogFile) start_time = time.clock() lu.update_lcp_shapefile(linkTable, lastStep=3, thisStep=4) start_time = lu.elapsed_time(start_time) # lu.dashline() gprint('Creating shapefiles with linework for links.') try: lu.write_link_maps(outlinkTableFile, step=4) except: lu.write_link_maps(outlinkTableFile, step=4) # Return GEOPROCESSING specific errors except arcgisscripting.ExecuteError: gprint('****Failed in step 4. Details follow.****') lu.exit_with_geoproc_error(_SCRIPT_NAME) # Return any PYTHON or system specific errors except: gprint('****Failed in step 4. Details follow.****') lu.exit_with_python_error(_SCRIPT_NAME) return
def STEP4_refine_network(): """Allows user to only connect each core area to its N nearest neighbors, then connect any disjunct clusters ('constellations') of core areas to their nearest neighboring cluster """ try: lu.dashline(1) gprint('Running script ' + _SCRIPT_NAME) cfg.gp.Workspace = cfg.OUTPUTDIR linkTableFile = lu.get_prev_step_link_table(step=4) linkTable = lu.load_link_table(linkTableFile) numLinks = linkTable.shape[0] lu.report_links(linkTable) if not cfg.STEP3: # re-check for links that are too long in case script run out of # sequence with more stringent settings gprint('Double-checking for corridors that are too long' ' or too short to map.') DISABLE_LEAST_COST_NO_VAL = True linkTable, numDroppedLinks = lu.drop_links( linkTable, cfg.MAXEUCDIST, 0, cfg.MAXCOSTDIST, 0, DISABLE_LEAST_COST_NO_VAL) rows, cols = npy.where( linkTable[:, cfg.LTB_LINKTYPE:cfg.LTB_LINKTYPE + 1] > 0) # == cfg.LT_CORR # or # linkTable[:,cfg.LTB_LINKTYPE:cfg.LTB_LINKTYPE + 1] == cfg.LT_KEEP) corridorLinks = linkTable[rows, :] coresToProcess = npy.unique( corridorLinks[:, cfg.LTB_CORE1:cfg.LTB_CORE2 + 1]) if cfg.S4DISTTYPE_EU: distCol = cfg.LTB_EUCDIST else: distCol = cfg.LTB_CWDIST # Flag links that do not connect any core areas to their nearest # N neighbors. (N = cfg.S4MAXNN) lu.dashline(1) # optionally ignore max nearest neighbor setting if cfg.IGNORES4MAXNN: gprint('Connecting each core area to all its neighbors.') else: gprint('Connecting each core area to its nearest ' + str(cfg.S4MAXNN) + ' nearest neighbors.') # Code written assuming NO duplicate core pairs for core in coresToProcess: rows, cols = npy.where(corridorLinks[:, cfg.LTB_CORE1:cfg.LTB_CORE2 + 1] == core) distsFromCore = corridorLinks[rows, :] # Sort by distance from target core ind = npy.argsort(distsFromCore[:, distCol]) distsFromCore = distsFromCore[ind] # Set N nearest neighbor connections to Nearest Neighbor (NNCT) # optionally ignore max nearest neighbor setting if cfg.IGNORES4MAXNN: maxRange = len(rows) else: maxRange = min(len(rows), cfg.S4MAXNN) for link in range(0, maxRange): linkId = distsFromCore[link, cfg.LTB_LINKID] # assumes linktable sequentially numbered with no gaps linkTable[linkId - 1, cfg.LTB_LINKTYPE] = cfg.LT_NNCT # Connect constellations (aka components or clusters) # Fixme: needs testing. Move to function. if cfg.S4CONNECT: lu.dashline(1) gprint('Connecting constellations') # linkTableComp has 4 extra cols to track COMPONENTS numLinks = linkTable.shape[0] # g1' g2' THEN c1 c2 compCols = npy.zeros((numLinks, 4), dtype="int32") linkTableComp = npy.append(linkTable, compCols, axis=1) del compCols # renumber cores to save memory for this next step. Place in # columns 10 and 11 for coreInd in range(0, len(coresToProcess)): # here, cols are 0 for cfg.LTB_CORE1 and 1 for cfg.LTB_CORE2 rows, cols = npy.where( linkTableComp[:, cfg.LTB_CORE1:cfg.LTB_CORE2 + 1] == coresToProcess[coreInd]) # want results in cols 10 and 11- These are NEW core numbers # (0 - numcores) linkTableComp[rows, cols + 10] = coreInd rows, cols = npy.where( linkTableComp[:, cfg.LTB_LINKTYPE:cfg.LTB_LINKTYPE + 1] == cfg.LT_NNCT) # The new, improved corridorLinks- only NN links corridorLinksComp = linkTableComp[rows, :] # These are NEW core numbers (range from 0 to numcores) coresToProcess = npy.unique(linkTableComp[:, 10:12]) #Create graph describing connected cores. Graph = npy.zeros((len(coresToProcess), len(coresToProcess)), dtype="int32") rows = corridorLinksComp[:, 10].astype('int32') cols = corridorLinksComp[:, 11].astype('int32') vals = npy.where( corridorLinksComp[:, cfg.LTB_LINKTYPE] == cfg.LT_NNCT, cfg.LT_CORR, 0) Graph[rows, cols] = vals Graph = Graph + Graph.T # Use graph to identify components (disconnected sub-groups) in # core area network components = lu.components_no_sparse(Graph) for coreInd in range(0, len(coresToProcess)): # In resulting cols, cols are 0 for LTB_CORE1 and 1 for # LTB_CORE2 rows, cols = (npy.where( linkTableComp[:, 10:12] == coresToProcess[coreInd])) # want results in cols 12 and 13 Note: we've replaced new core # numbers with COMPONENT numbers. linkTableComp[rows, cols + 12] = components[coreInd] # Additional column indexes for linkTableComp component1Col = 12 component2Col = 13 linkTableComp[:, cfg.LTB_CLUST1] = linkTableComp[:, component1Col] linkTableComp[:, cfg.LTB_CLUST2] = linkTableComp[:, component2Col] # Sort by distance ind = npy.argsort(linkTableComp[:, distCol]) linkTableComp = linkTableComp[ind] # Connect constellations via shortest inter-constellation links, # until all constellations connected. for row in range(0, numLinks): if ((linkTableComp[row, distCol] > 0) and ((linkTableComp[row, cfg.LTB_LINKTYPE] == cfg.LT_CORR) or (linkTableComp[row, cfg.LTB_LINKTYPE] == cfg.LT_KEEP)) and (linkTableComp[row, component1Col] != linkTableComp[row, component2Col])): # Make this an inter-component link linkTableComp[row, cfg.LTB_LINKTYPE] = cfg.LT_CLU newComp = min( linkTableComp[row, component1Col:component2Col + 1]) oldComp = max( linkTableComp[row, component1Col:component2Col + 1]) # cols are 0 and 1 rows, cols = npy.where( linkTableComp[:, component1Col:component2Col + 1] == oldComp) # want results in cols 12 and 13 linkTableComp[rows, cols + 12] = newComp # Remove extra columns from link table linkTable = lu.delete_col(linkTableComp, [10, 11, 12, 13]) # Re-sort link table by link ID ind = npy.argsort(linkTable[:, cfg.LTB_LINKID]) linkTable = linkTable[ind] # At end, any non-constellation links that are not NN's get dropped # (too long to be in cfg.S4MAXNN, not a component link) rows = npy.where(linkTable[:, cfg.LTB_LINKTYPE] == cfg.LT_CORR) linkTable[rows, cfg.LTB_LINKTYPE] = cfg.LT_CPLK # set NNCT links to NN corridor links (NNC), get rid # of extra columns, re-sort linktable rows = npy.where(linkTable[:, cfg.LTB_LINKTYPE] == cfg.LT_NNCT) linkTable[rows, cfg.LTB_LINKTYPE] = cfg.LT_NNC # Write linkTable to disk outlinkTableFile = lu.get_this_step_link_table(step=4) # lu.dashline(1) gprint('\nWriting ' + outlinkTableFile) lu.write_link_table(linkTable, outlinkTableFile) linkTableLogFile = path.join(cfg.LOGDIR, "linkTable_s4.csv") lu.write_link_table(linkTable, linkTableLogFile) start_time = time.clock() lu.update_lcp_shapefile(linkTable, lastStep=3, thisStep=4) start_time = lu.elapsed_time(start_time) # lu.dashline() gprint('Creating shapefiles with linework for links.') try: lu.write_link_maps(outlinkTableFile, step=4) except: lu.write_link_maps(outlinkTableFile, step=4) # Return GEOPROCESSING specific errors except arcgisscripting.ExecuteError: gprint('****Failed in step 4. Details follow.****') lu.exit_with_geoproc_error(_SCRIPT_NAME) # Return any PYTHON or system specific errors except: gprint('****Failed in step 4. Details follow.****') lu.exit_with_python_error(_SCRIPT_NAME) return
def STEP7_calc_centrality(): """ Analyze network centrality using Circuitscape given Linkage Mapper outputs """ try: lu.dashline(0) gprint('Running script ' + _SCRIPT_NAME) arcpy.env.workspace = cfg.SCRATCHDIR # Check for valid LCP shapefile prevLcpShapefile = lu.get_lcp_shapefile(None, thisStep = 7) if not arcpy.Exists(prevLcpShapefile): msg = ('Cannot find an LCP shapefile from step 5. Please ' 'rerun that step and any previous ones if necessary.') lu.raise_error(msg) # Remove lcp shapefile from this step if run previously lcpShapefile = path.join(cfg.DATAPASSDIR, "lcpLines_s7.shp") lu.delete_data(lcpShapefile) csPath = lu.get_cs_path() invalidFNs = ['fid','id','oid','shape'] if cfg.COREFN.lower() in invalidFNs: #if cfg.COREFN == 'FID' or cfg.COREFN == 'ID': lu.dashline(1) msg = ('ERROR: Core area field names ID, FID, SHAPE, and OID are' ' reserved for ArcGIS. \nPlease choose another field- must' ' be a positive integer.') lu.raise_error(msg) lu.dashline(1) gprint('Mapping centrality of network cores and links' '\nusing Circuitscape....') lu.dashline(0) # set the analysis extent and cell size to that of the resistance # surface coreCopy = path.join(cfg.SCRATCHDIR, 'cores.shp') arcpy.CopyFeatures_management(cfg.COREFC, coreCopy) arcpy.AddField_management(coreCopy, "CF_Central", "DOUBLE", "10", "2") inLinkTableFile = lu.get_prev_step_link_table(step=7) linkTable = lu.load_link_table(inLinkTableFile) numLinks = linkTable.shape[0] numCorridorLinks = lu.report_links(linkTable) if numCorridorLinks == 0: lu.dashline(1) msg =('\nThere are no linkages. Bailing.') lu.raise_error(msg) if linkTable.shape[1] < 16: # If linktable has no entries from prior # centrality or pinchpint analyses extraCols = npy.zeros((numLinks, 6), dtype="float64") linkTable = linkTable[:,0:10] linkTable = npy.append(linkTable, extraCols, axis=1) linkTable[:, cfg.LTB_LCPLEN] = -1 linkTable[:, cfg.LTB_CWDEUCR] = -1 linkTable[:, cfg.LTB_CWDPATHR] = -1 linkTable[:, cfg.LTB_EFFRESIST] = -1 linkTable[:, cfg.LTB_CWDTORR] = -1 del extraCols linkTable[:, cfg.LTB_CURRENT] = -1 coresToProcess = npy.unique(linkTable[:, cfg.LTB_CORE1:cfg.LTB_CORE2 + 1]) maxCoreNum = max(coresToProcess) del coresToProcess lu.dashline(0) coreList = linkTable[:,cfg.LTB_CORE1:cfg.LTB_CORE2+1] coreList = npy.sort(coreList) #gprint('There are ' + str(len(npy.unique(coreList))) ' core areas.') # set up directory for centrality INCENTRALITYDIR = cfg.CENTRALITYBASEDIR OUTCENTRALITYDIR = path.join(cfg.CENTRALITYBASEDIR, cfg.CIRCUITOUTPUTDIR_NM) CONFIGDIR = path.join(INCENTRALITYDIR, cfg.CIRCUITCONFIGDIR_NM) # Set Circuitscape options and write config file options = lu.setCircuitscapeOptions() options['data_type']='network' options['habitat_file'] = path.join(INCENTRALITYDIR, 'Circuitscape_graph.txt') # Setting point file equal to graph to do all pairs in Circuitscape options['point_file'] = path.join(INCENTRALITYDIR, 'Circuitscape_graph.txt') outputFN = 'Circuitscape_network.out' options['output_file'] = path.join(OUTCENTRALITYDIR, outputFN) configFN = 'Circuitscape_network.ini' outConfigFile = path.join(CONFIGDIR, configFN) lu.writeCircuitscapeConfigFile(outConfigFile, options) delRows = npy.asarray(npy.where(linkTable[:,cfg.LTB_LINKTYPE] < 1)) delRowsVector = npy.zeros((delRows.shape[1]), dtype="int32") delRowsVector[:] = delRows[0, :] LT = lu.delete_row(linkTable, delRowsVector) del delRows del delRowsVector graphList = npy.zeros((LT.shape[0],3), dtype="float64") graphList[:,0] = LT[:,cfg.LTB_CORE1] graphList[:,1] = LT[:,cfg.LTB_CORE2] graphList[:,2] = LT[:,cfg.LTB_CWDIST] write_graph(options['habitat_file'] ,graphList) gprint('\nCalculating current flow centrality using Circuitscape...') #subprocess.call([csPath, outConfigFile], shell=True) memFlag = call_circuitscape(csPath, outConfigFile) outputFN = 'Circuitscape_network_branch_currents_cum.txt' currentList = path.join(OUTCENTRALITYDIR, outputFN) if not arcpy.Exists(currentList): write_graph(options['habitat_file'] ,graphList) gprint('\nCalculating current flow centrality using Circuitscape ' '(2nd try)...') # subprocess.call([csPath, outConfigFile], shell=True) memFlag = call_circuitscape(csPath, outConfigFile) if not arcpy.Exists(currentList): lu.dashline(1) msg = ('ERROR: No Circuitscape output found.\n' 'It looks like Circuitscape failed.') arcpy.AddError(msg) lu.write_log(msg) exit(1) currents = load_graph(currentList,graphType='graph/network', datatype='float64') numLinks = currents.shape[0] for x in range(0,numLinks): corex = currents[x,0] corey = currents[x,1] #linkId = LT[x,cfg.LTB_LINKID] row = lu.get_links_from_core_pairs(linkTable, corex, corey) #row = lu.get_linktable_row(linkId, linkTable) linkTable[row,cfg.LTB_CURRENT] = currents[x,2] coreCurrentFN = 'Circuitscape_network_node_currents_cum.txt' nodeCurrentList = path.join(OUTCENTRALITYDIR, coreCurrentFN) nodeCurrents = load_graph(nodeCurrentList,graphType='graph/network', datatype='float64') numNodeCurrents = nodeCurrents.shape[0] rows = arcpy.UpdateCursor(coreCopy) row = rows.newRow() for row in rows: coreID = row.getValue(cfg.COREFN) for i in range (0, numNodeCurrents): if coreID == nodeCurrents[i,0]: row.setValue("CF_Central", nodeCurrents[i,1]) break rows.updateRow(row) #row = rows.newRow() del row, rows gprint('Done with centrality calculations.') finalLinkTable = lu.update_lcp_shapefile(linkTable, lastStep=5, thisStep=7) linkTableFile = path.join(cfg.DATAPASSDIR, "linkTable_s5_plus.csv") lu.write_link_table(finalLinkTable, linkTableFile, inLinkTableFile) linkTableFinalFile = path.join(cfg.OUTPUTDIR, cfg.PREFIX + "_linkTable_s5_plus.csv") lu.write_link_table(finalLinkTable, linkTableFinalFile, inLinkTableFile) gprint('Copy of final linkTable written to '+ linkTableFinalFile) finalCoreFile = path.join(cfg.CORECENTRALITYGDB, cfg.PREFIX + '_Cores') #copy core area map to gdb. if not arcpy.Exists(cfg.CORECENTRALITYGDB): arcpy.CreateFileGDB_management(cfg.OUTPUTDIR, path.basename(cfg.CORECENTRALITYGDB)) arcpy.CopyFeatures_management(coreCopy, finalCoreFile) gprint('Creating shapefiles with linework for links.') lu.write_link_maps(linkTableFinalFile, step=7) # Copy final link maps to gdb and clean up. lu.copy_final_link_maps(step=7) # Return GEOPROCESSING specific errors except arcpy.ExecuteError: lu.dashline(1) gprint('****Failed in step 7. Details follow.****') lu.exit_with_geoproc_error(_SCRIPT_NAME) # Return any PYTHON or system specific errors except: lu.dashline(1) gprint('****Failed in step 7. Details follow.****') lu.exit_with_python_error(_SCRIPT_NAME) return
def calc_lccs(normalize): try: if normalize: mosaicBaseName = "_corridors" writeTruncRaster = cfg.WRITETRUNCRASTER outputGDB = cfg.OUTPUTGDB if cfg.CALCNONNORMLCCS: SAVENORMLCCS = False else: SAVENORMLCCS = cfg.SAVENORMLCCS else: mosaicBaseName = "_NON_NORMALIZED_corridors" SAVENORMLCCS = False outputGDB = cfg.EXTRAGDB writeTruncRaster = False lu.dashline(1) gprint('Running script ' + _SCRIPT_NAME) linkTableFile = lu.get_prev_step_link_table(step=5) if cfg.useArcpy: arcpy.env.workspace = cfg.SCRATCHDIR arcpy.env.scratchWorkspace = cfg.ARCSCRATCHDIR arcpy.env.overwriteOutput = True arcpy.env.compression = "NONE" else: gp.workspace = cfg.SCRATCHDIR gp.scratchWorkspace = cfg.ARCSCRATCHDIR gp.OverwriteOutput = True if cfg.MAXEUCDIST is not None: gprint('Max Euclidean distance between cores') gprint('for linkage mapping set to ' + str(cfg.MAXEUCDIST)) if cfg.MAXCOSTDIST is not None: gprint('Max cost-weighted distance between cores') gprint('for linkage mapping set to ' + str(cfg.MAXCOSTDIST)) # set the analysis extent and cell size to that of the resistance # surface if cfg.useArcpy: arcpy.env.Extent = cfg.RESRAST arcpy.env.cellSize = cfg.RESRAST arcpy.env.snapRaster = cfg.RESRAST arcpy.env.mask = cfg.RESRAST else: gp.Extent = (gp.Describe(cfg.RESRAST)).Extent gp.cellSize = gp.Describe(cfg.RESRAST).MeanCellHeight gp.mask = cfg.RESRAST gp.snapraster = cfg.RESRAST linkTable = lu.load_link_table(linkTableFile) numLinks = linkTable.shape[0] numCorridorLinks = lu.report_links(linkTable) if numCorridorLinks == 0: lu.dashline(1) msg = ('\nThere are no corridors to map. Bailing.') lu.raise_error(msg) if not cfg.STEP3 and not cfg.STEP4: # re-check for links that are too long or in case script run out of # sequence with more stringent settings gprint('Double-checking for corridors that are too long to map.') DISABLE_LEAST_COST_NO_VAL = True linkTable, numDroppedLinks = lu.drop_links( linkTable, cfg.MAXEUCDIST, cfg.MINEUCDIST, cfg.MAXCOSTDIST, cfg.MINCOSTDIST, DISABLE_LEAST_COST_NO_VAL) # Added to try to speed up: gp.pyramid = "NONE" gp.rasterstatistics = "NONE" # set up directories for normalized lcc and mosaic grids dirCount = 0 gprint("Creating output folder: " + cfg.LCCBASEDIR) lu.delete_dir(cfg.LCCBASEDIR) gp.CreateFolder_management(path.dirname(cfg.LCCBASEDIR), path.basename(cfg.LCCBASEDIR)) gp.CreateFolder_management(cfg.LCCBASEDIR, cfg.LCCNLCDIR_NM) clccdir = path.join(cfg.LCCBASEDIR, cfg.LCCNLCDIR_NM) # mosaicGDB = path.join(cfg.LCCBASEDIR, "mosaic.gdb") # gp.createfilegdb(cfg.LCCBASEDIR, "mosaic.gdb") #mosaicRaster = mosaicGDB + '\\' + "nlcc_mos" # Full path gprint("") if normalize: gprint('Normalized least-cost corridors will be written ' 'to ' + clccdir + '\n') PREFIX = cfg.PREFIX # Add CWD layers for core area pairs to produce NORMALIZED LCC layers numGridsWritten = 0 coreList = linkTable[:, cfg.LTB_CORE1:cfg.LTB_CORE2 + 1] coreList = npy.sort(coreList) x = 0 linkCount = 0 endIndex = numLinks while x < endIndex: if (linkTable[x, cfg.LTB_LINKTYPE] < 1): # If not a valid link x = x + 1 continue linkCount = linkCount + 1 start_time = time.clock() linkId = str(int(linkTable[x, cfg.LTB_LINKID])) # source and target cores corex = int(coreList[x, 0]) corey = int(coreList[x, 1]) # Get cwd rasters for source and target cores cwdRaster1 = lu.get_cwd_path(corex) cwdRaster2 = lu.get_cwd_path(corey) if not gp.Exists(cwdRaster1): msg = ('\nError: cannot find cwd raster:\n' + cwdRaster1) if not gp.Exists(cwdRaster2): msg = ('\nError: cannot find cwd raster:\n' + cwdRaster2) lu.raise_error(msg) lccNormRaster = path.join(clccdir, str(corex) + "_" + str(corey)) # + ".tif") if cfg.useArcpy: arcpy.env.Extent = "MINOF" else: gp.Extent = "MINOF" # FIXME: need to check for this?: # if exists already, don't re-create #if not gp.Exists(lccRaster): link = lu.get_links_from_core_pairs(linkTable, corex, corey) offset = 10000 # Normalized lcc rasters are created by adding cwd rasters and # subtracting the least cost distance between them. count = 0 if arcpyAvailable: cfg.useArcpy = True # Fixes Canran Liu's bug with lcDist if cfg.useArcpy: lcDist = (float(linkTable[link, cfg.LTB_CWDIST]) - offset) if normalize: statement = ( 'outras = Raster(cwdRaster1) + Raster(' 'cwdRaster2) - lcDist; outras.save(lccNormRaster)') else: statement = ('outras =Raster(cwdRaster1) + Raster(' 'cwdRaster2); outras.save(lccNormRaster)') else: if normalize: lcDist = str(linkTable[link, cfg.LTB_CWDIST] - offset) expression = (cwdRaster1 + " + " + cwdRaster2 + " - " + lcDist) else: expression = (cwdRaster1 + " + " + cwdRaster2) statement = ('gp.SingleOutputMapAlgebra_sa(expression, ' 'lccNormRaster)') count = 0 while True: try: exec statement randomerror() except: count, tryAgain = lu.retry_arc_error(count, statement) if not tryAgain: exec statement else: break cfg.useArcpy = False # End fix for Conran Liu's bug with lcDist if normalize and cfg.useArcpy: try: minObject = gp.GetRasterProperties(lccNormRaster, "MINIMUM") rasterMin = float(str(minObject.getoutput(0))) except: gp.AddWarning( '\n------------------------------------------------') gp.AddWarning( 'WARNING: Raster minimum check failed in step 5. \n' 'This may mean the output rasters are corrupted. Please \n' 'be sure to check for valid rasters in ' + outputGDB) rasterMin = 0 tolerance = (float(gp.cellSize) * -10) + offset if rasterMin < tolerance: lu.dashline(1) msg = ( 'WARNING: Minimum value of a corridor #' + str(x + 1) + ' is much less than zero (' + str(rasterMin) + ').' '\nThis could mean that BOUNDING CIRCLE BUFFER DISTANCES ' 'were too small and a corridor passed outside of a ' 'bounding circle, or that a corridor passed outside of the ' 'resistance map. \n') gp.AddWarning(msg) if cfg.useArcpy: arcpy.env.Extent = cfg.RESRAST else: gp.Extent = (gp.Describe(cfg.RESRAST)).Extent mosaicDir = path.join(cfg.LCCBASEDIR, 'mos' + str(x + 1)) lu.create_dir(mosaicDir) mosFN = 'mos' #.tif' change and move mosaicRaster = path.join(mosaicDir, mosFN) if numGridsWritten == 0 and dirCount == 0: #If this is the first grid then copy rather than mosaic arcObj.CopyRaster_management(lccNormRaster, mosaicRaster) else: rasterString = '"' + lccNormRaster + ";" + lastMosaicRaster + '"' statement = ('arcObj.MosaicToNewRaster_management(' 'rasterString,mosaicDir,mosFN, "", ' '"32_BIT_FLOAT", gp.cellSize, "1", "MINIMUM", ' '"MATCH")') # statement = ('arcpy.Mosaic_management(lccNormRaster, ' # 'mosaicRaster, "MINIMUM", "MATCH")') count = 0 while True: try: lu.write_log('Executing mosaic for link #' + str(linkId)) exec statement lu.write_log('Done with mosaic.') randomerror() except: count, tryAgain = lu.retry_arc_error(count, statement) lu.delete_data(mosaicRaster) lu.delete_dir(mosaicDir) # Try a new directory mosaicDir = path.join( cfg.LCCBASEDIR, 'mos' + str(x + 1) + '_' + str(count)) lu.create_dir(mosaicDir) mosaicRaster = path.join(mosaicDir, mosFN) if not tryAgain: exec statement else: break endTime = time.clock() processTime = round((endTime - start_time), 2) if normalize == True: printText = "Normalized and mosaicked " else: printText = "Mosaicked NON-normalized " gprint(printText + "corridor for link ID #" + str(linkId) + " connecting core areas " + str(corex) + " and " + str(corey) + " in " + str(processTime) + " seconds. " + str(int(linkCount)) + " out of " + str(int(numCorridorLinks)) + " links have been " "processed.") # temporarily disable links in linktable - don't want to mosaic # them twice for y in range(x + 1, numLinks): corex1 = int(coreList[y, 0]) corey1 = int(coreList[y, 1]) if corex1 == corex and corey1 == corey: linkTable[y, cfg.LTB_LINKTYPE] = ( linkTable[y, cfg.LTB_LINKTYPE] + 1000) elif corex1 == corey and corey1 == corex: linkTable[y, cfg.LTB_LINKTYPE] = ( linkTable[y, cfg.LTB_LINKTYPE] + 1000) numGridsWritten = numGridsWritten + 1 if not SAVENORMLCCS: lu.delete_data(lccNormRaster) lu.delete_dir(clccdir) lu.create_dir(clccdir) else: if numGridsWritten == 100: # We only write up to 100 grids to any one folder # because otherwise Arc slows to a crawl dirCount = dirCount + 1 numGridsWritten = 0 clccdir = path.join(cfg.LCCBASEDIR, cfg.LCCNLCDIR_NM + str(dirCount)) gprint("Creating output folder: " + clccdir) gp.CreateFolder_management(cfg.LCCBASEDIR, path.basename(clccdir)) if numGridsWritten > 1 or dirCount > 0: lu.delete_data(lastMosaicRaster) lu.delete_dir(path.dirname(lastMosaicRaster)) lastMosaicRaster = mosaicRaster x = x + 1 #rows that were temporarily disabled rows = npy.where(linkTable[:, cfg.LTB_LINKTYPE] > 1000) linkTable[rows, cfg.LTB_LINKTYPE] = (linkTable[rows, cfg.LTB_LINKTYPE] - 1000) # --------------------------------------------------------------------- # Create output geodatabase if not gp.exists(outputGDB): gp.createfilegdb(cfg.OUTPUTDIR, path.basename(outputGDB)) if cfg.useArcpy: arcpy.env.workspace = outputGDB else: gp.workspace = outputGDB gp.pyramid = "NONE" gp.rasterstatistics = "NONE" # Copy mosaic raster to output geodatabase saveFloatRaster = False if saveFloatRaster == True: floatRaster = outputGDB + '\\' + PREFIX + mosaicBaseName + '_flt' # Full path statement = 'arcObj.CopyRaster_management(mosaicRaster, floatRaster)' try: exec statement except: pass # --------------------------------------------------------------------- # convert mosaic raster to integer intRaster = path.join(outputGDB, PREFIX + mosaicBaseName) if cfg.useArcpy: statement = ('outras = Int(Raster(mosaicRaster) - offset + 0.5); ' 'outras.save(intRaster)') else: expression = "int(" + mosaicRaster + " - " + str( offset) + " + 0.5)" statement = 'gp.SingleOutputMapAlgebra_sa(expression, intRaster)' count = 0 while True: try: exec statement randomerror() except: count, tryAgain = lu.retry_arc_error(count, statement) if not tryAgain: exec statement else: break # --------------------------------------------------------------------- if writeTruncRaster: # ----------------------------------------------------------------- # Set anything beyond cfg.CWDTHRESH to NODATA. if arcpyAvailable: cfg.useArcpy = True # For Alissa Pump's error with 10.1 cutoffText = str(cfg.CWDTHRESH) if cutoffText[-6:] == '000000': cutoffText = cutoffText[0:-6] + 'm' elif cutoffText[-3:] == '000': cutoffText = cutoffText[0:-3] + 'k' truncRaster = (outputGDB + '\\' + PREFIX + mosaicBaseName + '_truncated_at_' + cutoffText) count = 0 if cfg.useArcpy: statement = ('outRas = Raster(intRaster) * ' '(Con(Raster(intRaster) <= cfg.CWDTHRESH,1)); ' 'outRas.save(truncRaster)') else: expression = ("(" + intRaster + " * (con(" + intRaster + "<= " + str(cfg.CWDTHRESH) + ",1)))") statement = ('gp.SingleOutputMapAlgebra_sa(expression, ' 'truncRaster)') count = 0 while True: try: exec statement randomerror() except: count, tryAgain = lu.retry_arc_error(count, statement) if not tryAgain: exec statement else: break cfg.useArcpy = False # End fix for Alissa Pump's error with 10.1 # --------------------------------------------------------------------- # Check for unreasonably low minimum NLCC values try: mosaicGrid = path.join(cfg.LCCBASEDIR, 'mos') # Copy to grid to test arcObj.CopyRaster_management(mosaicRaster, mosaicGrid) minObject = gp.GetRasterProperties(mosaicGrid, "MINIMUM") rasterMin = float(str(minObject.getoutput(0))) except: gp.AddWarning('\n------------------------------------------------') gp.AddWarning( 'WARNING: Raster minimum check failed in step 5. \n' 'This may mean the output rasters are corrupted. Please \n' 'be sure to check for valid rasters in ' + outputGDB) rasterMin = 0 tolerance = (float(gp.cellSize) * -10) if rasterMin < tolerance: lu.dashline(1) msg = ('WARNING: Minimum value of mosaicked corridor map is ' 'much less than zero (' + str(rasterMin) + ').' '\nThis could mean that BOUNDING CIRCLE BUFFER DISTANCES ' 'were too small and a corridor passed outside of a ' 'bounding circle, or that a corridor passed outside of the ' 'resistance map. \n') gp.AddWarning(msg) gprint('\nWriting final LCP maps...') if cfg.STEP4: finalLinkTable = lu.update_lcp_shapefile(linkTable, lastStep=4, thisStep=5) elif cfg.STEP3: finalLinkTable = lu.update_lcp_shapefile(linkTable, lastStep=3, thisStep=5) else: # Don't know if step 4 was run, since this is started at step 5. # Use presence of previous linktable files to figure this out. # Linktable name includes step number. prevLinkTableFile = lu.get_prev_step_link_table(step=5) prevStepInd = len(prevLinkTableFile) - 5 lastStep = prevLinkTableFile[prevStepInd] finalLinkTable = lu.update_lcp_shapefile(linkTable, lastStep, thisStep=5) outlinkTableFile = lu.get_this_step_link_table(step=5) gprint('Updating ' + outlinkTableFile) lu.write_link_table(linkTable, outlinkTableFile) linkTableLogFile = path.join(cfg.LOGDIR, "linkTable_s5.csv") lu.write_link_table(linkTable, linkTableLogFile) linkTableFinalFile = path.join(cfg.OUTPUTDIR, PREFIX + "_linkTable_s5.csv") lu.write_link_table(finalLinkTable, linkTableFinalFile) gprint('Copy of final linkTable written to ' + linkTableFinalFile) gprint('Creating shapefiles with linework for links.') try: lu.write_link_maps(outlinkTableFile, step=5) except: lu.write_link_maps(outlinkTableFile, step=5) # Create final linkmap files in output directory, and remove files from # scratch. lu.copy_final_link_maps(step=5) if not SAVENORMLCCS: lu.delete_dir(cfg.LCCBASEDIR) # Build statistics for corridor rasters gp.addmessage('\nBuilding output statistics and pyramids ' 'for corridor raster') lu.build_stats(intRaster) if writeTruncRaster: gp.addmessage('Building output statistics ' 'for truncated corridor raster') lu.build_stats(truncRaster) # Return GEOPROCESSING specific errors except arcgisscripting.ExecuteError: lu.dashline(1) gprint('****Failed in step 5. Details follow.****') lu.exit_with_geoproc_error(_SCRIPT_NAME) # Return any PYTHON or system specific errors except: lu.dashline(1) gprint('****Failed in step 5. Details follow.****') lu.exit_with_python_error(_SCRIPT_NAME) return