def processAlgorithm(self, progress): # Do the stuff metric = self.m[self.getParameterValue(self.METRIC)] envlayers = self.getParameterValue(self.ENV) names = [] env = [] shape = None # Load all arrays into an dictonary for lay in envlayers.split(";"): r = Processing.getObject(lay) # QgsRasterLayer object name = str( r.name() ) names.append(name) a = gdal.Open( lay ) array = a.GetRasterBand(1).ReadAsArray() # Prepare by removing all no-data values from array NA = a.GetRasterBand(1).GetNoDataValue() if NA != None: array[array==NA] = 0 env.append(array) if shape == None: # Fast check if array are unequal shape = array.shape else: if shape != array.shape: raise GeoAlgorithmExecutionException("Input layers need to have the same extent and cellsize.") a = r = None if len( env ) == 1: raise GeoAlgorithmExecutionException("You need at least two layers to calculate overlap statistics.") progress.setConsoleInfo("Loaded %s arrays for calculation" % ( str( len( names) ) ) ) func.updateProcessing(progress,1,3) results = [] func.updateProcessing(progress,2,3) if len(env) > 2: # Iterative Calculation of the metrics for j in range(0,len(env)): for k in range(j+1,len(env)): progress.setConsoleInfo("Calculating Overlap of layers %s with %s" % (names[j],names[k]) ) r = self.Overlap(metric,env[j],env[k]) res = (names[j],names[k],metric,r) results.append(res) else: # Only two input layers r = self.Overlap(metric,env[0],env[1]) res = (names[0],names[1],metric,r) results.append(res) progress.setConsoleInfo("Saving results") func.updateProcessing(progress,3,3) output = self.getOutputValue(self.RESULTS) titles = ['Layer1','Layer2','Metric','Overlap'] # Save Output func.saveToCSV(results, titles, output )
def processAlgorithm(self, progress): param = dict() param["responsecurves"] = self.getParameterValue(self.RESP) param["responsecurvesexponent"] = self.getParameterValue(self.RESP_EXP) param["pictures"] = self.getParameterValue(self.PIC) param["randomseed"] = self.getParameterValue(self.RANDOM) param["logscale"] = self.getParameterValue(self.LOGSCALE) param["writeclampgrid"] = self.getParameterValue(self.CLAMPGRID) param["writemess"] = self.getParameterValue(self.MESS) param["randomtestpoints"] = self.getParameterValue(self.RANDOM_POINTS) param["betamultiplier"] = self.getParameterValue(self.BETA_MULT) param["replicates"] = self.getParameterValue(self.REPLICATES) param["replicatetype"] = self.R_TYPE[self.getParameterValue(self.REPLICATE_TYPE)] param["linear"] = self.getParameterValue(self.LINEAR) param["quadratic"] = self.getParameterValue(self.QUADRATIC) param["product"] = self.getParameterValue(self.PRODUCT) param["threshold"] = self.getParameterValue(self.THRESHOLD) param["hinge"] = self.getParameterValue(self.HINGE) param["fadebyclamping"] = self.getParameterValue(self.FADEBYCLAMPING) param["extrapolate"] = self.getParameterValue(self.EXTRAPOLATE) param["plots"] = self.getParameterValue(self.PLOTS) param["maximumiterations"] = self.getParameterValue(self.MAXITERATIONS) param["convergencethreshold"] = self.getParameterValue(self.CONVG_THRESH) param["threads"] = self.getParameterValue(self.PROC) param["defaultprevalence"] = self.getParameterValue(self.DEF_PREV) param["perspeciesresults"] = self.getParameterValue(self.PERSPECRES) # param["applythresholdrule"]= self.getParameterValue(self.APPLY_THRESH) res = [] for option in param.iteritems(): res.append((option[0],str( option[1] ).lower())) out = self.getOutputValue(self.OUT_PARAM) species = func.saveToCSV(res,("command","value"),out)
def processAlgorithm(self, progress): param = dict() param["responsecurves"] = self.getParameterValue(self.RESP) param["responsecurvesexponent"] = self.getParameterValue(self.RESP_EXP) param["pictures"] = self.getParameterValue(self.PIC) param["randomseed"] = self.getParameterValue(self.RANDOM) param["logscale"] = self.getParameterValue(self.LOGSCALE) param["writeclampgrid"] = self.getParameterValue(self.CLAMPGRID) param["writemess"] = self.getParameterValue(self.MESS) param["randomtestpoints"] = self.getParameterValue(self.RANDOM_POINTS) param["betamultiplier"] = self.getParameterValue(self.BETA_MULT) param["replicates"] = self.getParameterValue(self.REPLICATES) param["replicatetype"] = self.R_TYPE[self.getParameterValue( self.REPLICATE_TYPE)] param["linear"] = self.getParameterValue(self.LINEAR) param["quadratic"] = self.getParameterValue(self.QUADRATIC) param["product"] = self.getParameterValue(self.PRODUCT) param["threshold"] = self.getParameterValue(self.THRESHOLD) param["hinge"] = self.getParameterValue(self.HINGE) param["fadebyclamping"] = self.getParameterValue(self.FADEBYCLAMPING) param["extrapolate"] = self.getParameterValue(self.EXTRAPOLATE) param["plots"] = self.getParameterValue(self.PLOTS) param["maximumiterations"] = self.getParameterValue(self.MAXITERATIONS) param["convergencethreshold"] = self.getParameterValue( self.CONVG_THRESH) param["threads"] = self.getParameterValue(self.PROC) param["defaultprevalence"] = self.getParameterValue(self.DEF_PREV) param["perspeciesresults"] = self.getParameterValue(self.PERSPECRES) # param["applythresholdrule"]= self.getParameterValue(self.APPLY_THRESH) res = [] for option in param.iteritems(): res.append((option[0], str(option[1]).lower())) out = self.getOutputValue(self.OUT_PARAM) species = func.saveToCSV(res, ("command", "value"), out)
def processAlgorithm(self, progress): # Get the location to the maxent jar file maxent = qsdm_settings.maxent() if os.path.basename(maxent) == 'maxent.jar': pass else: maxent = os.path.join(qsdm_settings.maxent(), 'maxent.jar') # If the directory and not the file was chosen # Get location of java, available memory and output path if sys.platform == "win32" or "win64": ex = "java.exe" else: ex = "java" java = os.path.join(qsdm_settings.javaPath(),ex) # the path to java if basic execution fails temp = qsdm_settings.getTEMP()+os.sep+"MAXENT" # folder where reprojected files and such are saved mem = str( qsdm_settings.getMEM() ) # available memory for MAXENT work = qsdm_settings.workPath() # get the name of the folder to save the Maxent model results to env_dir = self.getParameterValue(self.ENV_DIR) progress.setConsoleInfo("Starting Species Layer Preperation") # Check if temp folder exists, otherwise create it if os.path.exists(temp) == False: os.mkdir(temp) ## Species layer preperation # Get the species file to model. Take selected species column and coordinates point = self.getParameterValue(self.SPECIES) v = Processing.getObject(point) scl = self.getParameterValue(self.SPEC_COL) # get names of species from input file if v.source().find("type=csv") != -1 : raise GeoAlgorithmExecutionException("Species point layer should be saved as ESRI Shapefile") else: crs = v.crs() if crs.authid() != "EPSG:4326": progress.setConsoleInfo("Species localities not in WGS84, reprojecting...") # Reproject using ogr func.reprojectLatLong(v,temp) # Then open again as QgsVectorLayer out = temp+os.sep+"localities.shp" if (os.path.exists(out) and os.path.isfile(out)) == False: raise GeoAlgorithmExecutionException("Species point layer data could not be reprojected to WGS84") fileInfo = QFileInfo(out) baseName = fileInfo.baseName() v = QgsVectorLayer(out, baseName, "ogr") if v.isValid() != True: # If this didn't work, try to use the Processing way v = Processing.getObject(out) if v.isValid() != True: # Otherwise return error raise GeoAlgorithmExecutionException("No valid layer could be loaded from the reprojection.") # Get Coordinates from point layer and add the species name coord = func.point2table(v,scl) if coord is None: raise GeoAlgorithmExecutionException("Species point layer data could not be extracted") # Convert coordinates and species name to csv, save in temporary Folder # Get Systemwide temporary folder to save the species csv speciesPath = temp + os.sep +"species.csv" species = func.saveToCSV(coord,("Species","Long","Lat"),speciesPath) specieslist = func.getUniqueAttributeList( v, scl, True) progress.setConsoleInfo("Species data successfully prepared for MAXENT") progress.setConsoleInfo("---") ## Maxent execution # Try if JAVA can be executed like this, otherwise take the binary from the given path try: from subprocess import DEVNULL # python 3k except ImportError: DEVNULL = open(os.devnull, 'wb') proc = subprocess.call(['java', '-version'],stdin=subprocess.PIPE, stdout=DEVNULL, stderr=subprocess.STDOUT) if proc == 0: start = "java -mx" + str(int(mem)) + "m -jar " else: progress.setConsoleInfo("JAVA could not be run by default. Using link to binary from set JAVA folder.") start = java + " -mx" + str(int(mem)) + "m -jar " # if Windows, encapsule jar file in " if platform.system() == "Windows": start += "\"" + maxent + "\"" else: start += maxent myCommand = start + " samplesfile=" + speciesPath myCommand += " environmentallayers=" + env_dir myCommand += " outputdirectory=" + work # finish the command myCommand += " redoifexists" # add a message progress.setConsoleInfo("#### Attempting to start MAXENT ####") # execute the command loglines = [] loglines.append('MAXENT execution console output') # result = os.system(myCommand) proc = subprocess.Popen( myCommand, shell=True, stdout=subprocess.PIPE, stdin=subprocess.PIPE, stderr=subprocess.STDOUT, universal_newlines=True, ).stdout for line in iter(proc.readline, ''): loglines.append(line) ProcessingLog.addToLog(ProcessingLog.LOG_INFO, loglines) # Print all loglines if delivered from MAXENT err = False for line in loglines: progress.setConsoleInfo(line) if line.find("Error") != -1: err = True if err: ProcessingLog.addToLog(ProcessingLog.LOG_ERROR,"MAXENT calculations did not succed! Check the Processing Info output for possible error sources.") print "Used command:" + myCommand else: ProcessingLog.addToLog(ProcessingLog.LOG_INFO,"MAXENT modelling finished.") # Finished, Load all resulting layers in QGIS if successfully run # In order to be compatible with processing copy or link them to the Processing output folder #out_r = self.getOutputValue(self.OUT_PRED) #out_t = self.getOutputValue(self.OUT_PRED_RES) p = work + os.sep + "maxentResults.csv" func.tableInQgis(p,",") #load in only generated Prediction for species in specieslist: t = species.replace(" ","_") p = work + os.sep + t + ".asc" func.rasterInQgis(p)
def processAlgorithm(self, progress): ## Parameter preperation ## # Get the location to the maxent jar file maxent = qsdm_settings.maxent() if os.path.basename(maxent) == 'maxent.jar': pass else: maxent = os.path.join(qsdm_settings.maxent(), 'maxent.jar') # If the directory and not the file was chosen # Get location of java, available memory and output path if sys.platform == "win32" or "win64": ex = "java.exe" else: ex = "java" java = os.path.join(qsdm_settings.javaPath(),ex) # the path to java if basic execution fails mem = str( qsdm_settings.getMEM() ) # available memory for MAXENT work = qsdm_settings.workPath() # get the name of the folder to save the Maxent model results to temp = qsdm_settings.getTEMP()+os.sep+"MAXENT" # folder where reprojected files and such are saved progress.setConsoleInfo("Starting Parameter and File Preperation") # Check if temp folder exists, otherwise create it if os.path.exists(temp) == False: os.mkdir(temp) # Get optional parameters param = self.getParameterValue(self.PARAM) o = Processing.getObject(param) if type(o)==QgsVectorLayer and o.isValid() and os.path.splitext(o.source())[1]==".csv": progress.setConsoleInfo("Using optional parameter file for MAXENT") param = dict() # Format Parameters to dictionary dp = o.dataProvider() for feat in dp.getFeatures(): geom = feat.geometry() com = feat["command"] val = feat["value"] param[com] = val # and make maxent invisible to the modeller param["visible"] = False else: progress.setConsoleInfo("No valid optional Parameter file detected") # Use default parameters param = dict() # per default write a separate maxent results file for each species param["perspeciesresults"] = False # and make maxent invisible to the modeller param["visible"] = False # Progress updater: n = len(self.getParameterValue(self.ENV).split(";"))+5 func.updateProcessing(progress,1,n,"Loaded Parameters.") ## Species layer # Get the species file to model. Take selected species column and coordinates point = self.getParameterValue(self.SPECIES) v = Processing.getObject(point) crs = v.crs() if crs.authid() != "EPSG:4326": progress.setConsoleInfo("Species localities not in WGS84, reprojecting...") # Reproject using ogr func.reprojectLatLong(v,temp) # Then open again as QgsVectorLayer out = temp+os.sep+"localities.shp" if (os.path.exists(out) and os.path.isfile(out)) == False: raise GeoAlgorithmExecutionException("Species point layer data could not be reprojected to WGS84") fileInfo = QFileInfo(out) baseName = fileInfo.baseName() v = QgsVectorLayer(out, baseName, "ogr") if v.isValid() != True: # If this didn't work, try to use the Processing way v = Processing.getObject(out) if v.isValid() != True: # Otherwise return error raise GeoAlgorithmExecutionException("No valid layer could be loaded from the reprojection.") # get names of species from input file scl = self.getParameterValue(self.SPEC_COL) # Get Coordinates from point layer and add the species name coord = func.point2table(v,scl) if coord is None: raise GeoAlgorithmExecutionException("Species point layer data could not be extracted") # Convert coordinates and species name to csv, save in temporary Folder # Get Systemwide temporary folder to save the species csv speciesPath = temp + os.sep +"species.csv" species = func.saveToCSV(coord,("Species","Long","Lat"),speciesPath) specieslist = func.getUniqueAttributeList( v, scl,True) progress.setConsoleInfo("Species data successfully prepared for MAXENT") progress.setConsoleInfo("---") func.updateProcessing(progress,2,n,"Loaded Species data.") ## Environmental Layers # get the selected environmental layers and prepare them for MAXENT progress.setConsoleInfo("Starting preparing the environmental layers") envlayers = self.getParameterValue(self.ENV) env = dict() layers = [] # Project to WGS84 if necessary for lay in envlayers.split(";"): r = Processing.getObject(lay) # QgsRasterLayer object name = str( r.name() ) crs = r.crs() if crs.authid() != "EPSG:4326": # Reproject layer progress.setConsoleInfo("Originial Layer %s not in WGS84, reprojecting..." % (name)) r = func.reprojectRasterLatLong(r,temp,True) if r == False or r.isValid()==False : ProcessingLog.addToLog(ProcessingLog.LOG_ERROR,"Projecting "+name+" to WGS84 failed!") layers.append( r.source() ) if len(layers) == 0: raise GeoAlgorithmExecutionException("Environmental Layers could not be reprojected!") else: func.updateProcessing(progress,3,n,"Reprojection finished.") # Check the extent of all those layers and unify if necessary # Check if necessary -> Do the raster layer have differing extents func.updateProcessing(progress,4,n) uni = [] app = False # Which approach should be used? if len(layers) > 1 and func.unificationNecessary(layers): progress.setConsoleInfo("Input layers have different extents, intersecting...") # The credits of the following approach go to Yury Ryabov - http://ssrebelious.blogspot.com if app == False: # get coordinates of corners for the final raster fin_coordinates = func.finCoordinates(layers) r = gdal.Open(str( layers[0] ) ) main_geo_transform = r.GetGeoTransform() proj = r.GetProjection() no_data = r.GetRasterBand(1).GetNoDataValue() if not no_data: no_data = -9999 for lay in layers: raster = gdal.Open(str(lay)) name = os.path.splitext(os.path.basename(lay))[0] out = temp + os.sep + name + 'warp.tif' result = func.ExtendRaster(raster, fin_coordinates, out, main_geo_transform, proj, no_data) if result: raster = None if os.path.exists(out): # Add output to uni uni.append(out) else: raise GeoAlgorithmExecutionException("Unified layer could not be saved.") else: raise GeoAlgorithmExecutionException("Layers could not be unified. Please set do this manually.") else: # FIXME: Faster Approach down below. Currently not yet working # 1. Build largest extent and geotransform # big_coord has left, top, right, bottom of dataset's bounds in geospatial coordinates. fin_coordinates, main_geo_transform, interp = func.CreateMainGeotransform(layers) # get coordinates and geotransform of corners for the final raster # set number of columns and rows for raster main_cols = (fin_coordinates[2] - fin_coordinates[0]) / abs(main_geo_transform[1]) main_rows = (fin_coordinates[3] - fin_coordinates[1]) / abs(main_geo_transform[5]) progress.setConsoleInfo("Creating new raster based on greatest extent with %s columns and %s rows" % (str(main_cols),str(main_rows))) #FIXME: Check coordinates big_coord = [main_geo_transform[0], main_geo_transform[3], main_geo_transform[0] + (main_geo_transform[1] * main_rows), main_geo_transform[3] + (main_geo_transform[5] * main_cols)] # 2. Loop through rasters and Intersect them export the biggest for lay in layers: name = os.path.splitext(os.path.basename(lay))[0] r = gdal.Open(str( lay ) ) src_p = r.GetProjection() if interp: # Interpolate to biggest cellsize progress.setConsoleInfo("Resolution of Environmental Layers is different. Bilinear interpolation to the coarsest cellsize = xy(%s,%s)" % (abs(main_geo_transform[1]),abs(main_geo_transform[5]))) #FIXME: Maybe interpolate to nearest neighbor if categorical r = func.gridInterpolation(r,temp,main_geo_transform,main_cols,main_rows,src_p, 'Bilinear',False) wide = abs( r.RasterXSize ) high = abs( r.RasterYSize ) geotransform = r.GetGeoTransform() nodata = r.GetRasterBand(1).GetNoDataValue() # should be -9999 if projected correctly if nodata == None: nodata = -9999 # target has left, top, right, bottom of dataset's bounds in geospatial coordinates. target = [geotransform[0], geotransform[3], geotransform[0] + (geotransform[1] * wide), geotransform[3] + (geotransform[5] * high)] #Intersection intersection = [max(big_coord[0], target[0]), min(big_coord[1], target[1]), min(big_coord[2], target[2]), max(big_coord[3], target[3])] # Convert to pixels p1 = func.world2Pixel(geotransform,intersection[0],intersection[1]) p2 = func.world2Pixel(geotransform,intersection[2],intersection[3]) band = r.GetRasterBand(1) result = band.ReadAsArray(p1[0], p1[1], p2[0] - p1[0], p2[1] - p1[1], p2[0] - p1[0], p2[1] - p1[1]) # Write to new raster output = temp + os.sep + name + 'warp.tif' func.createRaster(output,abs(main_geo_transform[1]),abs(main_geo_transform[5]),result,nodata,main_geo_transform,src_p,'GTiff') if os.path.exists(output): # Add output to uni uni.append(output) else: raise GeoAlgorithmExecutionException("Environmental Layers could not be prepared for MAXENT") return None else: uni = layers if len(uni) == 0 or len(uni) != len(layers): raise GeoAlgorithmExecutionException("Environmental Layers with unified extent could not be generated!") else: progress.setConsoleInfo("Environmental Layer successfully unified.") func.updateProcessing(progress,5,n,"Unified environmental Layers.") # Format to asc if necessary for lay in uni: r = Processing.getObject(lay) # QgsRasterLayer object name = os.path.basename(str( r.name() )) out = temp + os.sep + name + '.asc' progress.setConsoleInfo("Convert environmental layers to ESRI ASC format...") # Format to asc proc = func.raster2ASC(r,out) if proc and os.path.isfile(out): env[name] = out else: ProcessingLog.addToLog(ProcessingLog.LOG_ERROR,"Converting/Projecting "+name+" to ESRI asc format failed!") func.updateProcessing(progress,6,n,"Formated to ASC.") # Check if anything is in env, worked if len(env) == 0: raise GeoAlgorithmExecutionException("Environmental Layers could not be prepared for MAXENT") # Check if the number of the original selected layers is equal to if len(envlayers.split(";")) != len(env): ProcessingLog.addToLog(ProcessingLog.LOG_ERROR,"Successfully prepared environmental layers "+str( env.keys() ) ) raise GeoAlgorithmExecutionException("Not all environmental Layers could be prepared for MAXENT. Check Processing Log.") # Test if species csv exists if os.path.exists(speciesPath) == False: raise GeoAlgorithmExecutionException("Species point layer could not be prepared for MAXENT") ## create the maxent command progress.setConsoleInfo("---") progress.setConsoleInfo("All fine so far. Attempting to build MAXENT execution command...") # Try if JAVA can be executed like this, otherwise take the binary from the given path try: from subprocess import DEVNULL # python 3k except ImportError: DEVNULL = open(os.devnull, 'wb') proc = subprocess.call(['java', '-version'],stdin=subprocess.PIPE, stdout=DEVNULL, stderr=subprocess.STDOUT) if proc == 0: start = "java -mx" + str(int(mem)) + "m -jar " else: progress.setConsoleInfo("JAVA could not be run by default. Using link to binary from set JAVA folder.") start = java + " -mx" + str(int(mem)) + "m -jar " # if Windows, encapsule jar file in " if platform.system() == "Windows": start += "\"" + maxent + "\"" else: start += maxent myCommand = start + " samplesfile=" + speciesPath myCommand += " environmentallayers=" + temp # Toggle all selected Layers myCommand += " togglelayertype=" for i in range(0,len(env.keys())): myCommand += os.path.splitext( env.keys()[i] )[0] if i is not len(env.keys())-1: myCommand += "," myCommand += " outputdirectory=" + work # Parse parameters into command for option in param.iteritems(): myCommand += " " + option[0] + "=" + str( option[1] ).lower() # finish the command myCommand += " redoifexists autorun" # add a message progress.setConsoleInfo("#### Attempting to start MAXENT ####") func.updateProcessing(progress,7,n) # execute the command loglines = [] loglines.append('MAXENT execution console output') # result = os.system(myCommand) proc = subprocess.Popen( myCommand, shell=True, stdout=subprocess.PIPE, stdin=subprocess.PIPE, stderr=subprocess.STDOUT, universal_newlines=True, ).stdout for line in iter(proc.readline, ''): loglines.append(line) ProcessingLog.addToLog(ProcessingLog.LOG_INFO, loglines) # Print all loglines if delivered from MAXENT err = False for line in loglines: progress.setConsoleInfo(line) if line.find("Error") != -1: err = True if err: ProcessingLog.addToLog(ProcessingLog.LOG_ERROR,"MAXENT calculations did not succed! Check the Processing Info output for possible error sources.") print "Used command:" + myCommand else: ProcessingLog.addToLog(ProcessingLog.LOG_INFO,"MAXENT modelling finished.") func.updateProcessing(progress,n,n) # Finished, Load all resulting layers in QGIS if successfully run # In order to be compatible with processing copy or link them to the Processing output folder #out_r = self.getOutputValue(self.OUT_PRED) #out_t = self.getOutputValue(self.OUT_PRED_RES) p = work + os.sep + "maxentResults.csv" func.tableInQgis(p,",") #load in only generated Prediction for species in specieslist: t = species.replace(" ","_") p = work + os.sep + t + ".asc" func.rasterInQgis(p) ## Styling and grouping # Freeze the canvas canvas = QgsMapCanvas() canvas.freeze(True) #Add a new group and all new layers to it groups = iface.legendInterface().groups() if ('MAXENT' in groups ) == False: idx = iface.legendInterface().addGroup( "MAXENT" ) groups = iface.legendInterface().groups() layerMap = QgsMapLayerRegistry.instance().mapLayers() for lyr in layerMap.itervalues(): if lyr.name() in specieslist: # Move them to the maxent group iface.legendInterface().moveLayer( lyr, groups.index("MAXENT") ) # Style the output lyr.setDrawingStyle("SingleBandPseudoColor") # The band of classLayer classLyrBnd = 1 # Color list for ramp clrLst = [ QgsColorRampShader.ColorRampItem(0, QColor(224,224,224),"0"), # Grey QgsColorRampShader.ColorRampItem(0.01, QColor(0,0,153),"> 0.01"), # darkblue QgsColorRampShader.ColorRampItem(0.2, QColor(153,204,255),"0.2"), # lightblue QgsColorRampShader.ColorRampItem(0.35,QColor(153,255,153),"0.35"), # lightgreen QgsColorRampShader.ColorRampItem(0.5, QColor(0,153,0),"0.5"), # green QgsColorRampShader.ColorRampItem(0.65, QColor(255,255,0),"0.65"), # yellow QgsColorRampShader.ColorRampItem(0.75, QColor(255,128,0),"0.75"), # orange QgsColorRampShader.ColorRampItem(0.85, QColor(255,0,0),">0.85") ] # red #Create the shader lyrShdr = QgsRasterShader() #Create the color ramp function clrFnctn = QgsColorRampShader() clrFnctn.setColorRampType(QgsColorRampShader.INTERPOLATED) clrFnctn.setColorRampItemList(clrLst) #Set the raster shader function lyrShdr.setRasterShaderFunction(clrFnctn) #Create the renderer lyrRndr = QgsSingleBandPseudoColorRenderer(lyr.dataProvider(), classLyrBnd, lyrShdr) #Apply the renderer to classLayer lyr.setRenderer(lyrRndr) #refresh legend if hasattr(lyr, "setCacheImage"): lyr.setCacheImage(None) lyr.triggerRepaint() iface.legendInterface().refreshLayerSymbology(lyr) #Finally move the Maxent results to the group as well lyr = func.getLayerByName( "MaxentResults" ) iface.legendInterface().moveLayer( lyr, groups.index("MAXENT") ) canvas.freeze(False) canvas.refresh()
def processAlgorithm(self, progress): # Do the stuff metric = self.m[self.getParameterValue(self.METRIC)] envlayers = self.getParameterValue(self.ENV) names = [] env = [] shape = None # Load all arrays into an dictonary for lay in envlayers.split(";"): r = Processing.getObject(lay) # QgsRasterLayer object name = str(r.name()) names.append(name) a = gdal.Open(lay) array = a.GetRasterBand(1).ReadAsArray() # Prepare by removing all no-data values from array NA = a.GetRasterBand(1).GetNoDataValue() if NA != None: array[array == NA] = 0 env.append(array) if shape == None: # Fast check if array are unequal shape = array.shape else: if shape != array.shape: raise GeoAlgorithmExecutionException( "Input layers need to have the same extent and cellsize." ) a = r = None if len(env) == 1: raise GeoAlgorithmExecutionException( "You need at least two layers to calculate overlap statistics." ) progress.setConsoleInfo("Loaded %s arrays for calculation" % (str(len(names)))) func.updateProcessing(progress, 1, 3) results = [] func.updateProcessing(progress, 2, 3) if len(env) > 2: # Iterative Calculation of the metrics for j in range(0, len(env)): for k in range(j + 1, len(env)): progress.setConsoleInfo( "Calculating Overlap of layers %s with %s" % (names[j], names[k])) r = self.Overlap(metric, env[j], env[k]) res = (names[j], names[k], metric, r) results.append(res) else: # Only two input layers r = self.Overlap(metric, env[0], env[1]) res = (names[0], names[1], metric, r) results.append(res) progress.setConsoleInfo("Saving results") func.updateProcessing(progress, 3, 3) output = self.getOutputValue(self.RESULTS) titles = ['Layer1', 'Layer2', 'Metric', 'Overlap'] # Save Output func.saveToCSV(results, titles, output)
def processAlgorithm(self, progress): # Set up the data as sklearn bunch (basically just a dictionary with specific attributes) data = Bunch() # Vector layer vector = self.getParameterValue(self.SPECIES) v = Processing.getObject(vector) v_crs = v.crs() # Environmental layers envlayers = self.getParameterValue(self.ENV) if func.unificationNecessary(envlayers.split(";")): raise GeoAlgorithmExecutionException( "All input environmental layers need to have the same resolution and extent. Use the Unify tool beforehand" ) #TODO: Enable option to do this automatically progress.setConsoleInfo("Loading Coverage Data") # Check Projection and Cellsize for lay in envlayers.split(";"): r = Processing.getObject(lay) # QgsRasterLayer object if r.crs() != v_crs: raise GeoAlgorithmExecutionException( "All input layers need to have the same projection") if round(r.rasterUnitsPerPixelX()) != round( r.rasterUnitsPerPixelY()): raise GeoAlgorithmExecutionException( "Grid Cell size values are not equal. Please be sure that grid cells are squares." ) # Set coverage parameters r = Processing.getObject( envlayers.split(";")[0]) # QgsRasterLayer object ex = r.extent() data["grid_size"] = r.rasterUnitsPerPixelX() data["Nx"] = r.width() data["Ny"] = r.height() data["x_left_lower_corner"] = ex.xMinimum() data["y_left_lower_corner"] = ex.yMinimum() # Load in Coverage values coverage = [] for lay in envlayers.split(";"): raster = gdal.Open(str(lay)) if raster.RasterCount > 1: progress.setConsoleInfo( "Warning: Multiple bands for layer detected. Using only first band." ) array = raster.GetRasterBand(1).ReadAsArray() NA = raster.GetRasterBand(1).GetNoDataValue() if NA == None: raise GeoAlgorithmExecutionException( "Warning: Raster layer has no no-data value. Please specify a no-data value for this dataset." ) else: array[array == NA] = -9999 # Replace nodata-values of array with -9999 coverage.append(array) data["coverages"] = numpy.array( coverage) # Load all the coverage values into the bunch # Setup parameters for output prediction a = gdal.Open(envlayers.split(";")[0]) columns = a.RasterXSize rows = a.RasterYSize driver = a.GetDriver() NA = -9999 gt = a.GetGeoTransform() proj = a.GetProjection() output = self.getOutputValue(self.OUT_PRED) # Set up the data grid xgrid, ygrid = construct_grids(data) # The grid in x,y coordinates X, Y = numpy.meshgrid(xgrid, ygrid[::-1]) # background points (grid coordinates) for evaluation numpy.random.seed(100) background_points = numpy.c_[ numpy.random.randint(low=0, high=data.Ny, size=10000), numpy.random.randint(low=0, high=data.Nx, size=10000)].T # We'll make use of the fact that coverages[6] has measurements at all # land points. This will help us decide between land and water. # FIXME: Assuming that all predictors have a similar distribution. Might be violated land_reference = data.coverages[0] progress.setConsoleInfo("Loading Occurence Data and coverage") # Creating response train = [] for feature in v.getFeatures(): geom = feature.geometry().asPoint() mx = geom.x() my = geom.y() train.append((mx, my)) data["train"] = numpy.array(train) # Add to bunch as training dataset # create species bunch sp_Bunch = Bunch(name="Species") points = dict(train=data.train) for label, pts in points.iteritems(): #determine coverage values for each of the training & testing points ix = numpy.searchsorted(xgrid, pts[0]) iy = numpy.searchsorted(ygrid, pts[1]) bunch['cov_%s' % label] = data.coverages[:, -iy, ix].T progress.setConsoleInfo( "Finished loading coverage data of environmental layers") # Starting modelling progress.setConsoleInfo("Finished preparing the data for the analysis") progress.setConsoleInfo("----") progress.setConsoleInfo("Starting Modelling with support of sklearn") # Standardize features #TODO: Enable different or no Standardization methods mean = sp_Bunch.cov.mean(axis=0) std = sp_Bunch.cov.std(axis=0) train_cover_std = (sp_Bunch.cov - mean) / std # Fit OneClassSVM progress.setConsoleInfo("Fitting Support Vector Machine") # TODO: Allow the user to vary the input clf = svm.OneClassSVM(nu=0.1, kernel="rbf", gamma=0.5) clf.fit(train_cover_std) progress.setConsoleInfo("Fitting done") # Predict species distribution using the training data Z = numpy.ones((data.Ny, data.Nx), dtype=numpy.float64) # We'll predict only for the land points. idx = numpy.where(land_reference > -9999) coverages_land = data.coverages[:, idx[0], idx[1]].T pred = clf.decision_function((coverages_land - mean) / std)[:, 0] Z *= pred.min() Z[idx[0], idx[1]] = pred levels = numpy.linspace(Z.min(), Z.max(), 25) Z[land_reference == -9999] = -9999 result = Z # save the final results scores # Compute AUC w.r.t. background points pred_background = Z[background_points[0], background_points[1]] pred_test = clf.decision_function((species.cov_test - mean) / std)[:, 0] scores = numpy.r_[pred_test, pred_background] y = numpy.r_[numpy.ones(pred_test.shape), numpy.zeros(pred_background.shape)] fpr, tpr, thresholds = metrics.roc_curve(y, scores) roc_auc = metrics.auc(fpr, tpr) # Area under the ROC curve # TODO: Evaluate the availability of other metrics to compute on (average mean error, etc.. ) # Create Output Prediction File output = self.getOutputValue(self.OUT_PRED_RES) titles = ['AUC'] res_pred = [roc_auc] # Save Output func.saveToCSV(res_pred, titles, output) # Create Output for resulting prediction metadata = driver.GetMetadata() if metadata.has_key( gdal.DCAP_CREATE) and metadata[gdal.DCAP_CREATE] == "YES": pass else: progress.setConsoleInfo( "Output creation of input Fileformat is not supported by gdal. Create GTiff by default." ) driver = gdal.GetDriverByName("GTiff") data_type = result.dtype try: outData = driver.Create(output, columns, rows, 1, data_type) except Exception, e: ProcessingLog.addToLog(ProcessingLog.LOG_ERROR, "Output file could not be created!")
def processAlgorithm(self, progress): # Set up the data as sklearn bunch (basically just a dictionary with specific attributes) data = Bunch() # Vector layer vector = self.getParameterValue(self.SPECIES) v = Processing.getObject(vector) v_crs = v.crs() # Environmental layers envlayers = self.getParameterValue(self.ENV) if func.unificationNecessary(envlayers.split(";")): raise GeoAlgorithmExecutionException("All input environmental layers need to have the same resolution and extent. Use the Unify tool beforehand") #TODO: Enable option to do this automatically progress.setConsoleInfo("Loading Coverage Data") # Check Projection and Cellsize for lay in envlayers.split(";"): r = Processing.getObject(lay) # QgsRasterLayer object if r.crs() != v_crs: raise GeoAlgorithmExecutionException("All input layers need to have the same projection") if round(r.rasterUnitsPerPixelX()) != round(r.rasterUnitsPerPixelY()): raise GeoAlgorithmExecutionException("Grid Cell size values are not equal. Please be sure that grid cells are squares.") # Set coverage parameters r = Processing.getObject(envlayers.split(";")[0]) # QgsRasterLayer object ex = r.extent() data["grid_size"] = r.rasterUnitsPerPixelX() data["Nx"] = r.width() data["Ny"] = r.height() data["x_left_lower_corner"] = ex.xMinimum() data["y_left_lower_corner"] = ex.yMinimum() # Load in Coverage values coverage = [] for lay in envlayers.split(";"): raster = gdal.Open(str(lay)) if raster.RasterCount > 1: progress.setConsoleInfo("Warning: Multiple bands for layer detected. Using only first band.") array = raster.GetRasterBand(1).ReadAsArray() NA = raster.GetRasterBand(1).GetNoDataValue() if NA == None: raise GeoAlgorithmExecutionException("Warning: Raster layer has no no-data value. Please specify a no-data value for this dataset.") else: array[array==NA] = -9999 # Replace nodata-values of array with -9999 coverage.append(array) data["coverages"] = numpy.array( coverage ) # Load all the coverage values into the bunch # Setup parameters for output prediction a = gdal.Open(envlayers.split(";")[0]) columns = a.RasterXSize rows = a.RasterYSize driver = a.GetDriver() NA = -9999 gt = a.GetGeoTransform() proj = a.GetProjection() output = self.getOutputValue(self.OUT_PRED) # Set up the data grid xgrid, ygrid = construct_grids(data) # The grid in x,y coordinates X, Y = numpy.meshgrid(xgrid, ygrid[::-1]) # background points (grid coordinates) for evaluation numpy.random.seed(100) background_points = numpy.c_[numpy.random.randint(low=0, high=data.Ny, size=10000), numpy.random.randint(low=0, high=data.Nx, size=10000)].T # We'll make use of the fact that coverages[6] has measurements at all # land points. This will help us decide between land and water. # FIXME: Assuming that all predictors have a similar distribution. Might be violated land_reference = data.coverages[0] progress.setConsoleInfo("Loading Occurence Data and coverage") # Creating response train = [] for feature in v.getFeatures(): geom = feature.geometry().asPoint() mx = geom.x() my = geom.y() train.append((mx,my)) data["train"] = numpy.array(train) # Add to bunch as training dataset # create species bunch sp_Bunch = Bunch(name="Species") points = dict(train=data.train) for label, pts in points.iteritems(): #determine coverage values for each of the training & testing points ix = numpy.searchsorted(xgrid, pts[0]) iy = numpy.searchsorted(ygrid, pts[1]) bunch['cov_%s' % label] = data.coverages[:, -iy, ix].T progress.setConsoleInfo("Finished loading coverage data of environmental layers") # Starting modelling progress.setConsoleInfo("Finished preparing the data for the analysis") progress.setConsoleInfo("----") progress.setConsoleInfo("Starting Modelling with support of sklearn") # Standardize features #TODO: Enable different or no Standardization methods mean = sp_Bunch.cov.mean(axis=0) std = sp_Bunch.cov.std(axis=0) train_cover_std = (sp_Bunch.cov - mean) / std # Fit OneClassSVM progress.setConsoleInfo("Fitting Support Vector Machine") # TODO: Allow the user to vary the input clf = svm.OneClassSVM(nu=0.1, kernel="rbf", gamma=0.5) clf.fit(train_cover_std) progress.setConsoleInfo("Fitting done") # Predict species distribution using the training data Z = numpy.ones((data.Ny, data.Nx), dtype=numpy.float64) # We'll predict only for the land points. idx = numpy.where(land_reference > -9999) coverages_land = data.coverages[:, idx[0], idx[1]].T pred = clf.decision_function((coverages_land - mean) / std)[:, 0] Z *= pred.min() Z[idx[0], idx[1]] = pred levels = numpy.linspace(Z.min(), Z.max(), 25) Z[land_reference == -9999] = -9999 result = Z # save the final results scores # Compute AUC w.r.t. background points pred_background = Z[background_points[0], background_points[1]] pred_test = clf.decision_function((species.cov_test - mean) / std)[:, 0] scores = numpy.r_[pred_test, pred_background] y = numpy.r_[numpy.ones(pred_test.shape), numpy.zeros(pred_background.shape)] fpr, tpr, thresholds = metrics.roc_curve(y, scores) roc_auc = metrics.auc(fpr, tpr) # Area under the ROC curve # TODO: Evaluate the availability of other metrics to compute on (average mean error, etc.. ) # Create Output Prediction File output = self.getOutputValue(self.OUT_PRED_RES) titles = ['AUC'] res_pred = [roc_auc] # Save Output func.saveToCSV(res_pred, titles, output) # Create Output for resulting prediction metadata = driver.GetMetadata() if metadata.has_key( gdal.DCAP_CREATE ) and metadata[ gdal.DCAP_CREATE ] == "YES": pass else: progress.setConsoleInfo("Output creation of input Fileformat is not supported by gdal. Create GTiff by default.") driver = gdal.GetDriverByName("GTiff") data_type = result.dtype try: outData = driver.Create(output, columns, rows, 1, data_type) except Exception, e: ProcessingLog.addToLog(ProcessingLog.LOG_ERROR,"Output file could not be created!")