def threadReadingByFile(rasterfn): ''' each thread reads a separate raster using multiprocess pool ''' import gdal, gdalconst, conf import numpy as np rst = None if 'geo' in rasterfn: rst = raster.Raster(msrlevel=conf.MSR_LEVEL_NOMINAL) else: rst = raster.Raster(msrlevel=conf.MSR_LEVEL_RATIO) rst.readRasterGDAL(rasterfn) return rst
def dempar(dem, logpath=None): rast = raster.Raster(dem) # determine data type dtypes = {"Int16": "INTEGER*2", "UInt16": "INTEGER*2", "Float32": "REAL*4"} if rast.dtype not in dtypes: raise IOError("data type not supported") else: dtype = dtypes[rast.dtype] # format pixel posting and top left coordinate posting = str(rast.geotransform["yres"])+" "+str(rast.geotransform["xres"]) latlon = str(rast.geotransform["ymax"])+" "+str(rast.geotransform["xmin"]) # evaluate projection projections = {"longlat": "EQA", "utm": "UTM"} if rast.proj4args["proj"] not in projections: raise IOError("projection not supported (yet)") else: projection = projections[rast.proj4args["proj"]] # get ellipsoid ellipsoid = rast.proj4args["ellps"] if "ellps" in rast.proj4args else rast.proj4args["datum"] if ellipsoid != "WGS84": raise IOError("ellipsoid not supported (yet)") # create list for GAMMA command input parlist = [projection, ellipsoid, 1, os.path.basename(dem), dtype, 0, 1, rast.cols, rast.rows, posting, latlon] # execute GAMMA command run(["create_dem_par", os.path.splitext(dem)[0] + ".par"], os.path.dirname(dem), logpath, inlist=parlist)
def readEnvDataLayers(rasterdir, rasterfns, multithread=conf.MULTITHREAD_READ): ''' read in environmental data layers ascdir: directory containing .asc files asciifns: list of ascii file names return: list of rasters, standardized to [0, 1] or []-0.5, 0.5] if needed ''' rst_list = None if multithread: def threadReadingByFile(rasterfn): ''' each thread reads a separate raster using multiprocess pool ''' import gdal, gdalconst, conf import numpy as np rst = None if 'geo' in rasterfn: rst = raster.Raster(msrlevel=conf.MSR_LEVEL_NOMINAL) else: rst = raster.Raster(msrlevel=conf.MSR_LEVEL_RATIO) rst.readRasterGDAL(rasterfn) return rst ## multi-thread reading n_threads = len(rasterfns) MP_pool = Pool(n_threads) fns = [] for i in range(len(rasterfns)): fns.append(rasterdir + os.sep + rasterfns[i]) rst_list = MP_pool.map(threadReadingByFile, fns) MP_poo.clear() else: rst_list = [] # hold environmental variables for rasterfn in rasterfns: ## presume all covariates are continuous rst = None if 'geo' in rasterfn: rst = raster.Raster(msrlevel=conf.MSR_LEVEL_NOMINAL) else: rst = raster.Raster(msrlevel=conf.MSR_LEVEL_RATIO) rst.readRasterGDAL(rasterdir + os.sep + rasterfn) #print envmap.getData().size rst_list.append(rst) return rst_list
def mosaic(demlist, outname, byteorder=1, gammapar=True): nodata = str(raster.Raster(demlist[0]).nodata) run(dissolve(["gdalwarp", "-q", "-of", "ENVI", "-srcnodata", nodata, "-dstnodata", nodata, demlist, outname])) if byteorder == 1: swap(outname, outname+"_swap") for item in [outname, outname+".hdr", outname+".aux.xml"]: os.remove(item) os.rename(outname+"_swap", outname) os.rename(outname+"_swap.hdr", outname+".hdr") if gammapar: dempar(outname)
def swap(data, outname): rast = raster.Raster(data) dtype = rast.dtype if rast.format != "ENVI": raise IOError("only ENVI format supported") dtype_lookup = {"Int16": 2, "CInt16": 2, "Int32": 4, "Float32": 4, "CFloat32": 4, "Float64": 8} if dtype not in dtype_lookup: raise IOError("data type " + dtype + " not supported") sp.check_call(["swap_bytes", data, outname, str(dtype_lookup[dtype])], stdout=sp.PIPE) header = HDRobject(data+".hdr") header.byte_order = 1 hdr(header, outname+".hdr")
def readEnvDataLayers(ascdir, asciifns): ''' read in environmental data layers ascdir: directory containing .asc files asciifns: list of ascii file names return: list of rasters, standardized to [0, 1] or []-0.5, 0.5] if needed ''' envmaps = [] # hold environmental variables matrix = [] # hold data for PCA for asciifn in asciifns: ## presume all covariates are continuous envmap = raster.Raster() envmap.readFromAscii(ascdir + os.sep + asciifn) envmaps.append(envmap) return envmaps
def generateImage(width, height, camera, group, dist_min, dist_max, lights, renderMode, bgc=r.Vec3(1, 1, 1)): #create Raster ra = raster.Raster(width, height) #loop over all pixels for x in range(0, width): for y in range(0, height): #create Ray rayX = float(x) / width rayY = float(height - y) / height #ray = camera.screenToPlaneLocation(r.Point2D(rayX,rayY)) ray = camera.generateRay(r.Point2D(rayX, rayY)) #find closest hit h = group.intersect(ray, dist_min, dist_max) #in case no hit is found if h.t != float('inf'): #set pixel color based on render mode if renderMode == RenderMode.COLOR: color = h.color elif renderMode == RenderMode.DISTANCE: gray = (dist_max - h.t) / (dist_max - dist_min) color = r.Vec3(gray, gray, gray) elif renderMode == RenderMode.DIFFUSE: lsum = r.Vec3(0, 0, 0) intersectionPoint = ray.pointAtParameter(h.t) surfaceNormal = h.getNormal().normalize() for l in lights: lco = l.getIntensity(intersectionPoint, surfaceNormal) lsum.x += h.color.x * lco.x lsum.y += h.color.y * lco.y lsum.z += h.color.z * lco.z # Check lsum values (cap at 1) if lsum.x > 1: lsum.x = 1 if lsum.y > 1: lsum.y = 1 if lsum.z > 1: lsum.z = 1 color = lsum elif renderMode == RenderMode.SHADOWS: lsum = r.Vec3(0, 0, 0) intersectionPoint = ray.pointAtParameter(h.t) surfaceNormal = h.getNormal().normalize() for l in lights: lco = l.getIntensity(intersectionPoint, surfaceNormal) h2 = r.Hit() # Use 'try' to assume the light is directional, # so shadows can be calculated try: #mode = 'directional' ray2 = r.Ray(l.direction.normalize(), intersectionPoint) h2 = group.intersect(ray2, 0.001, dist_max) if h2.t == float('inf'): lsum.x += h.color.x * lco.x lsum.y += h.color.y * lco.y lsum.z += h.color.z * lco.z # if code throws an attribute error, light is ambient # (ambient lights have no attribute "direction") except AttributeError: #mode = 'ambient' lsum.x += h.color.x * lco.x lsum.y += h.color.y * lco.y lsum.z += h.color.z * lco.z # Check lsum values (cap at 1) if lsum.x > 1: lsum.x = 1 if lsum.y > 1: lsum.y = 1 if lsum.z > 1: lsum.z = 1 color = lsum else: print("invalid render mode") pass else: color = bgc # color up until this point must be a Vec3 between 0-1 color = color.scalarMult(255) color = (color.x, color.y, color.z) ra.setPixel(x, y, color) ra.display()
# update parameters with dataset-specific values params = default_base.copy() params.update(algo_params) X, y = dataset # normalize dataset for easier parameter selection X = StandardScaler().fit_transform(X) # ============ # Create cluster objects # ============ threshold = 5 min_size = 5 prec = 0.9 algorithm = raster.Raster(threshold, min_size, prec) algorithm.fit(X) if hasattr(algorithm, 'labels_'): y_pred = algorithm.labels_.astype(np.int) else: y_pred = algorithm.predict(X) plt.subplot(len(datasets), 1, plot_num) colors = np.array( list( islice( cycle([ '#377eb8', '#ff7f00', '#4daf4a', '#f781bf', '#a65628',
def predict_opencl(self, predict_class = False, single_cpu = conf.SINGLE_CPU, opencl_config = conf.OPENCL_CONFIG): ''' PyOpenCL implementation of the iPSM approach return: a vector of predictions, each for a row in X ''' print 'predict_opencl() was called' try: conf.HOST_MEM_SIZE / 1024.0**2, 'MB' print 'HOST MEM SIZE:', conf.HOST_MEM_SIZE / 1024.0**2, 'MB' host_mem_avail = dict(psutil.virtual_memory()._asdict())['available'] print 'HOST MEM AVAIL:', host_mem_avail / 1024.0**2, 'MB' host_mem_quota = conf.HOST_MEM_PCT * host_mem_avail / 1024.0**2 ## read in covariates tile by tile to avoid blowing up host memory print 'HOST MEM QUOTA:', host_mem_quota, 'MB' print 'COVARIATE DATA SIZE (EST.):', self.__tileRasterReader.estimate_TotalSize_MB, 'MB' #print 'SINGLE_CPU:', single_cpu print '' if not conf.TILE_READ and self.__tileRasterReader.estimate_TotalSize_MB <= host_mem_quota: template_raster = raster.Raster() template_data = None #print 'started reading in ENTIRE covariates into host memory...' t0 = time.time() data = self.__tileRasterReader.readWholeRaster() t1 = time.time()-t0 conf.TIME_KEEPING_DICT['parts']['read'].append(t1) print 'done reading in ENTIRE covariates took', t1, 's' X = [] if len(data.shape) == 2: # single covariate template_data = data tmp_data = data.flatten() X.append(tmp_data[tmp_data != self.__tileRasterReader.nodata]) else: # 2+ covariates template_data = data[0] for i in range(data.shape[0]): tmp_data = data[i].flatten() X.append(tmp_data[tmp_data != self.__tileRasterReader.nodata]) X = np.array(X).T y = self.predict_opencl_tile(X, predict_class, single_cpu, opencl_config) template_raster.createRaster(template_data, xoff=0, yoff=0, \ geotransform = self.__tileRasterReader.geotransfrom, \ projection = self.__tileRasterReader.projection, \ nodata = self.__tileRasterReader.nodata) ## write predicted soil map template_raster.updateRasterData(y[:,0]) t0 = time.time() template_raster.writeRasterGDAL(self.__outfns[0]) conf.TIME_KEEPING_DICT['parts']['write'].append(time.time()-t0) ## write prediction uncertainty map template_raster.updateRasterData(y[:,1]) t1 = time.time() template_raster.writeRasterGDAL(self.__outfns[1]) conf.TIME_KEEPING_DICT['parts']['write'].append(time.time()-t1) print 'done writing result took', time.time()-t0, 's\n' else: ## covariates cannot fit in host memory ## read in covariates tile by tile if conf.TILE_XSIZE is None or conf.TILE_YSIZE is None: #if conf.MULTITHREAD_READ:# and conf.DEVICE_TYPE == 'CPU': host_mem_quota /= 4 factor = int(host_mem_quota / self.__tileRasterReader.estimateTileSize_MB() * (self.__tileRasterReader.ysize/self.__tileRasterReader.block_ysize_base)) print factor, self.__tileRasterReader.block_ysize_base, \ self.__tileRasterReader.block_ysize_base * factor self.__tileRasterReader.ysize = min(self.__tileRasterReader.block_ysize_base * factor, self.__tileRasterReader.nrows) print 'tile size:', self.__tileRasterReader.xsize, 'x', self.__tileRasterReader.ysize, \ self.__tileRasterReader.estimateTileSize_MB(), 'MB' print 'raster size:', self.__tileRasterReader.ncols, 'x', self.__tileRasterReader.nrows ## writer for writing out tiles of predicted soil map soilmapwriter = gdalwrapper.tiledRasterWriter(self.__outfns[0], \ self.__tileRasterReader.nrows, \ self.__tileRasterReader.ncols, \ self.__tileRasterReader.geotransfrom, \ self.__tileRasterReader.projection, \ self.__tileRasterReader.nodata) ## writer for writing out tiles of uncertainty map uncertmapwriter = gdalwrapper.tiledRasterWriter(self.__outfns[1], \ self.__tileRasterReader.nrows, \ self.__tileRasterReader.ncols, \ self.__tileRasterReader.geotransfrom, \ self.__tileRasterReader.projection, \ self.__tileRasterReader.nodata) ## prediction tile by tile template_raster = raster.Raster() template_data = None X = [] t0 = time.time() data, xoff, yoff, xsize, ysize = self.__tileRasterReader.readNextTile() t1 = time.time() conf.TIME_KEEPING_DICT['parts']['read'].append(t1 - t0) print 'done reading in tile', self.__tileRasterReader.xoff, '/', self.__tileRasterReader.ncols, \ self.__tileRasterReader.yoff, '/', self.__tileRasterReader.nrows,\ 'took', t1 - t0,'s\n' while data is not None: if len(data.shape) == 1: template_data = np.ones((ysize, xsize)) * self.__tileRasterReader.nodata elif len(data.shape) == 2: # single covariate template_data = data tmp_data = data.flatten() X.append(tmp_data[tmp_data != self.__tileRasterReader.nodata]) else: # 2+ covariates template_data = data[0] for i in range(data.shape[0]): tmp_data = data[i].flatten() X.append(tmp_data[tmp_data != self.__tileRasterReader.nodata]) X = np.array(X).T y = None ## prediction if X.size > 0: y = self.predict_opencl_tile(X, predict_class, single_cpu, opencl_config) ############################## t0 = time.time() template_raster.createRaster(template_data, xoff=xoff, yoff=yoff, \ geotransform = self.__tileRasterReader.geotransfrom, \ projection = self.__tileRasterReader.projection, \ nodata = self.__tileRasterReader.nodata) ## write tile of predicted soil map if X.size > 0: template_raster.updateRasterData(y[:,0]) t1 = time.time() soilmapwriter.writeTile(template_raster.getData2D(), xoff, yoff) conf.TIME_KEEPING_DICT['parts']['write'].append(time.time() - t1) ## write tile of prediction uncertainty map if X.size > 0: template_raster.updateRasterData(y[:,1]) t2 = time.time() uncertmapwriter.writeTile(template_raster.getData2D(), xoff, yoff) conf.TIME_KEEPING_DICT['parts']['write'].append(time.time() - t2) print 'done writing out tile took', time.time()-t0,'s\n' ################################# # reset X X = [] t0 = time.time() # read in the next tile data, xoff, yoff, xsize, ysize = self.__tileRasterReader.readNextTile() t1 = time.time() conf.TIME_KEEPING_DICT['parts']['read'].append(t1 - t0) print 'done reading in tile', self.__tileRasterReader.xoff, '/', self.__tileRasterReader.ncols, \ self.__tileRasterReader.yoff, '/', self.__tileRasterReader.nrows,\ 'took', t1 - t0,'s\n' ## have to call this to FlushCache() to disc t0 = time.time() soilmapwriter.close() uncertmapwriter.close() conf.TIME_KEEPING_DICT['parts']['write'].append(time.time() - t0) except Exception as e: raise
params.update(algo_params) X, y = dataset # normalize dataset for easier parameter selection X = StandardScaler().fit_transform(X) # ============ # Create cluster objects # ============ threshold = 5 min_size = 5 prec_1 = 0.9 prec_2 = 0.73 rs_1 = raster.Raster(threshold, min_size, prec_1) rs_2 = raster.Raster(threshold, min_size, prec_2) clustering_algorithms = (('RASTER (prec. = 0.90)', rs_1), ('RASTER (prec. = 0.73)', rs_2)) for name, algorithm in clustering_algorithms: algorithm.fit(X) if hasattr(algorithm, 'labels_'): y_pred = algorithm.labels_.astype(np.int) else: y_pred = algorithm.predict(X) plt.subplot(len(datasets), len(clustering_algorithms), plot_num) if i_dataset == 0: plt.title(name, size=11)
# testdraw.py # # tests the drawing. # # Ashton import raster import GISgraphics import asciiraster rast = raster.Raster(raster.biggerRast, cellsize=3) mapwin = GISgraphics.MapWindow('Raster', winXY=(300, 300), mapextent=rast.envelope) rast.draw(mapwin) mapwin.display() mapwin.clear() rast.draw(mapwin) mapwin.display() # Now some real data. ascfile = '../geocode/data/medora_subu.asc' medoraIn = asciiraster.openAsciiRaster(ascfile) medoraIn[0] medora = raster.Raster(medoraIn[1], \ cellsize=medoraIn[0]['cellsize'], \