def __iter__(self): gdal.TermProgress_nocb(0) total = self.layer.GetFeatureCount() for count, feature in enumerate(self.layer, 1): # geometry geometry = feature.geometry() envelope = geometry.GetEnvelope() order = 'x1', 'x2', 'y1', 'y2' width = int((envelope[1] - envelope[0]) / CELLSIZE) height = int((envelope[3] - envelope[2]) / CELLSIZE) origin = envelope[0], envelope[3] polygon = self.POLYGON.format(**dict(zip(order, envelope))) # path name = feature[NAME] path = join(self.targetdir, name[0:3], name + '.tif') # tile tile = Tile( path=path, width=width, height=height, origin=origin, polygon=polygon, ) yield tile gdal.TermProgress_nocb(count / total)
def __iter__(self): """ Return generator of (source, target, void) tuples. Source and target are views into a larger array. Void is a newly created array containing the footprint of the void. """ if progress: # pragma: no cover gdal.TermProgress_nocb(0) # analyze mask = (self.source == self.no_data_value) labels, total = ndimage.label(mask) items = ndimage.find_objects(labels) # iterate the objects for label, item in enumerate(items, 1): index = self._grow(item) # to include the edge source = self.source[index] # view into source array target = self.target[index] # view into target array void = labels[index] == label # the footprint of this void yield source, target, void if progress: # pragma: no cover gdal.TermProgress_nocb(label / total)
def extract(preparation): """ Extract for a single feature. """ source = preparation.get_source() target = preparation.get_target(source) total = len(target) gdal.TermProgress_nocb(0) batch = (c for b in target for c in b) queue = queues.Queue(maxsize=8) kwargs = {'queue': queue, 'batch': batch} thread1 = threading.Thread(target=filler, kwargs=kwargs) thread1.daemon = True thread1.start() while True: # fetch loaded chunks try: chunk, thread2 = queue.get() thread2.join() # this makes sure the chunk is laoded except TypeError: break # save complete blocks if len(chunk.block.chunks) == len(chunk.block.inputs): chunk.block.save() gdal.TermProgress_nocb((chunk.block.tile.serial + 1) / total) thread1.join()
def __iter__(self): total = len(self) gdal.TermProgress_nocb(0) # this implementation works around an issue in pygdal # https://github.com/nextgis/pygdal/issues/31 for i in range(total): yield self.layer[i] gdal.TermProgress_nocb((i + 1) / total)
def select(self, text): """ Return generator of features for text, e.g. '2/5' """ selected, parts = map(int, text.split('/')) size = len(self) / parts start = int((selected - 1) * size) stop = len(self) if selected == parts else int(selected * size) total = stop - start gdal.TermProgress_nocb(0) for count, fid in enumerate(range(start, stop), 1): yield self.layer[fid] gdal.TermProgress_nocb(count / total)
def __init__(self, operations=0): self.steps = 1 #n. steps per operation self.operations = float(operations) self.progress = 0 self.enabled = operations > 0 if self.enabled: gdal.TermProgress_nocb(0)
def gdal_ogr_mask_union(src_layer, src_field, dst_defn=None): '''`union` a `src_layer`'s features based on `src_field` where `src_field` holds a value of 0 or 1. optionally, specify an output layer defn for the unioned feature. returns the output feature class''' if dst_defn is None: dst_defn = src_layer.GetLayerDefn() multi = ogr.Geometry(ogr.wkbMultiPolygon) feats = len(src_layer) utils.echo_msg('unioning {} features'.format(feats)) for n, f in enumerate(src_layer): gdal.TermProgress_nocb((n + 1 / feats) * 100) if f.GetField(src_field) == 0: src_layer.DeleteFeature(f.GetFID()) elif f.GetField(src_field) == 1: f.geometry().CloseRings() wkt = f.geometry().ExportToWkt() multi.AddGeometryDirectly(ogr.CreateGeometryFromWkt(wkt)) src_layer.DeleteFeature(f.GetFID()) #union = multi.UnionCascaded() ## slow on large multi... out_feat = ogr.Feature(dst_defn) out_feat.SetGeometryDirectly(multi) #union = multi = None return (out_feat)
def boxcar_x(image, bsize): (y, x) = image.shape # outimage = np.zeros([y,x],dtype=float32) outimage = image w = np.ones(bsize) # edge = int((bsize - 1) / 2) for j in range(0, x): gdal.TermProgress_nocb(float(j) / float(x)) outimage[:, j] = np.convolve(w / w.sum(), image[:, j], mode='same') print('100') return outimage
def read(archive, name): """ Read from zip into points. """ logger.debug('Count lines in "{}".'.format(name)) total = 100000 with archive.open(name) as fobj: total = fobj.read().count('\n') points = np.empty((total, 3), dtype='f4') logger.debug('Reading points from "{}".'.format(name)) with archive.open(name) as fobj: for count, line in enumerate(fobj): if count == total: break points[count] = line.split(',') gdal.TermProgress_nocb((count + 1) / total) return points
def extract_model(preparation, fill_zeros): """ Extract for a single feature. """ source = preparation.get_source() target = preparation.get_target(source) total = len(target) gdal.TermProgress_nocb(0) batch = (c for b in target for c in b) queue = queues.Queue(maxsize=8) kwargs = {'queue': queue, 'batch': batch} thread1 = threading.Thread(target=filler, kwargs=kwargs) thread1.daemon = True thread1.start() while True: # fetch loaded chunks try: chunk, thread2 = queue.get() thread2.join() # this makes sure the load method finished except TypeError: break # check if loading was a success if not chunk.loaded: print('Oops, a chunk failed to fetch. Resuming is worth a try!') return # save complete blocks if len(chunk.block.chunks) == len(chunk.block.inputs): chunk.block.save(fill_zeros) gdal.TermProgress_nocb((chunk.block.tile.serial + 1) / total) thread1.join()
ring.AddPoint(xur, yur) ring.AddPoint(xur, yll) ring.AddPoint(xll, yll) poly = ogr.Geometry(ogr.wkbPolygon) poly.AddGeometry(ring) inds1 = [] for k in range(tiles.FeatureCount()): feat = tiles.features[k] if poly.Distance(feat.GetGeometryRef()) <= args.dist: inds1.append(k) # Second, find exactly the tiles within the given distance of the glacier outlines inds2 = [] for k in range(len(inds1)): gdal.TermProgress_nocb(float(k) / len(inds1)) feat = tiles.features[inds1[k]] if union.Distance(feat.GetGeometryRef( )) <= args.dist: #union.Intersect(feat.GetGeometryRef()): inds2.append(inds1[k]) list_tiles = np.sort(np.unique(tiles.fields.values['tile'][inds2])) else: list_tiles = np.sort(np.unique(tiles.fields.values['tile'])) #list_tiles = ['32_34', '32_35', '32_36', '31_34','31_35', '31_36'] ## Create output directory ## if args.outdir != None:
def main(argv): argv = gdal.GeneralCmdLineProcessor(argv) if argv is None: return 0 driver_name = 'GTiff' src_color_filename = None src_greyscale_filename = None dst_color_filename = None quiet = False # Parse command line arguments. i = 1 while i < len(argv): arg = argv[i] if arg == '-of': i = i + 1 driver_name = argv[i] elif arg == '-q' or arg == '-quiet': quiet = True elif src_color_filename is None: src_color_filename = argv[i] elif src_greyscale_filename is None: src_greyscale_filename = argv[i] elif dst_color_filename is None: dst_color_filename = argv[i] else: Usage() i = i + 1 if dst_color_filename is None: Usage() datatype = gdal.GDT_Byte hilldataset = gdal.Open(src_greyscale_filename, gdal.GA_ReadOnly) colordataset = gdal.Open(src_color_filename, gdal.GA_ReadOnly) # check for 3 or 4 bands in the color file if (colordataset.RasterCount != 3 and colordataset.RasterCount != 4): print( 'Source image does not appear to have three or four bands as required.' ) return 1 # define output format, name, size, type and set projection out_driver = gdal.GetDriverByName(driver_name) outdataset = out_driver.Create(dst_color_filename, colordataset.RasterXSize, colordataset.RasterYSize, colordataset.RasterCount, datatype) outdataset.SetProjection(hilldataset.GetProjection()) outdataset.SetGeoTransform(hilldataset.GetGeoTransform()) # assign RGB and hillshade bands rBand = colordataset.GetRasterBand(1) gBand = colordataset.GetRasterBand(2) bBand = colordataset.GetRasterBand(3) if colordataset.RasterCount == 4: aBand = colordataset.GetRasterBand(4) else: aBand = None hillband = hilldataset.GetRasterBand(1) hillbandnodatavalue = hillband.GetNoDataValue() # check for same file size if ((rBand.YSize != hillband.YSize) or (rBand.XSize != hillband.XSize)): print('Color and hillshade must be the same size in pixels.') return 1 # loop over lines to apply hillshade for i in range(hillband.YSize): # load RGB and Hillshade arrays rScanline = rBand.ReadAsArray(0, i, hillband.XSize, 1, hillband.XSize, 1) gScanline = gBand.ReadAsArray(0, i, hillband.XSize, 1, hillband.XSize, 1) bScanline = bBand.ReadAsArray(0, i, hillband.XSize, 1, hillband.XSize, 1) hillScanline = hillband.ReadAsArray(0, i, hillband.XSize, 1, hillband.XSize, 1) # convert to HSV hsv = rgb_to_hsv(rScanline, gScanline, bScanline) # if there's nodata on the hillband, use the v value from the color # dataset instead of the hillshade value. if hillbandnodatavalue is not None: equal_to_nodata = numpy.equal(hillScanline, hillbandnodatavalue) v = numpy.choose(equal_to_nodata, (hillScanline, hsv[2])) else: v = hillScanline # replace v with hillshade hsv_adjusted = numpy.asarray([hsv[0], hsv[1], v]) # convert back to RGB dst_color = hsv_to_rgb(hsv_adjusted) # write out new RGB bands to output one band at a time outband = outdataset.GetRasterBand(1) outband.WriteArray(dst_color[0], 0, i) outband = outdataset.GetRasterBand(2) outband.WriteArray(dst_color[1], 0, i) outband = outdataset.GetRasterBand(3) outband.WriteArray(dst_color[2], 0, i) if aBand is not None: aScanline = aBand.ReadAsArray(0, i, hillband.XSize, 1, hillband.XSize, 1) outband = outdataset.GetRasterBand(4) outband.WriteArray(aScanline, 0, i) # update progress line if not quiet: gdal.TermProgress_nocb((float(i + 1) / hillband.YSize)) return 0
old_ds = gdal.Open(spath) vrt_ds = gdal.AutoCreateWarpedVRT(old_ds, None, srs.ExportToWkt(), gdal.GRA_Bilinear) gdal.GetDriverByName('gtiff').CreateCopy(tpath, vrt_ds) if __name__ == "__main__": if os.path.exists(srcName) is False: print "路径错误 %s" % (srcName) sys.exit() list_of_files = [] # 目录 if os.path.isdir(srcName): for dirpath, dirnames, filenames in os.walk(srcName): for filepath in filenames: if filepath.lower().endswith(".tif"): spath = os.path.join(dirpath, filepath) tpath = tgtName + os.path.join(dirpath[len(srcName):], filepath) # print spath tdir = os.path.dirname(tpath) if os.path.exists(tdir) is False: os.makedirs(tdir) list_of_files.append((spath, tpath)) gdal.TermProgress_nocb(0) for i in range(len(list_of_files)): process_file(list_of_files[i][0], list_of_files[i][1]) gdal.TermProgress_nocb((i + 1) / float(len(list_of_files)))
def command(shape_path, store_path, target_path, cellsize, time): """ Prepare and extract the first feature of the first layer. """ # process store store = GeoInterface(load(store_path)) dtype = np.dtype(store.dtype).type fillvalue = store.fillvalue # process shape datasource = ogr.Open(shape_path) layer = datasource[0] feature = layer[0] geometry = feature.geometry() sr = get_projection(geometry.GetSpatialReference()) if sr is None: print('Error: EPSG projection code missing from shape.') exit() # process target target = create_dataset(dtype=dtype, path=target_path, geometry=geometry, cellsize=cellsize, fillvalue=fillvalue) # prepare gdal.TermProgress_nocb(0) index = Index(target, geometry) no_data_value = target.GetRasterBand(1).GetNoDataValue() # work total = len(index) for count, tile in enumerate(index, 1): # get data kwargs = { 'sr': sr, 'start': time, 'width': tile.width, 'height': tile.height, 'geom': tile.polygon.ExportToWkt() } data = store.get_data(**kwargs) # make source array = data['values'] kwargs = { 'projection': osr.GetUserInputAsWKT(sr), 'geo_transform': tile.geo_transform, 'no_data_value': no_data_value } with datasets.Dataset(array, **kwargs) as source: # set pixels outside geometry to 'no data' outside = tile.polygon.Difference(geometry) burn(dataset=source, geometry=outside, value=no_data_value) # write to target p1, q1 = tile.origin DRIVER_GDAL_MEM.CreateCopy('', source) target.WriteRaster( p1, q1, tile.width, tile.height, source.ReadRaster(0, 0, tile.width, tile.height), ) gdal.TermProgress_nocb(count / total)
def process(self, session, srs, subdomain, time, uuid): """ Extract for a single feature. :param session: requests.Sesssion object, logged in. :param srs: str defining spatial reference system :param time: ISO-8601 timestamp :param uuid: Lizard raster UUID """ completed = self.indicator.get() total = len(self.target) if completed == total: print('Already complete.') return if completed > 0: print('Resuming from chunk %s.' % completed) gdal.TermProgress_nocb(completed / total) # run a thread that starts putting chunks with threads in a queue queue = queues.Queue(maxsize=MAX_THREADS - 1) filler_kwargs = { 'chunks': self.target.get_chunks(start=completed + 1), 'subdomain': subdomain, 'session': session, 'queue': queue, 'uuid': uuid, 'time': time, 'srs': srs, } filler_thread = threading.Thread(target=filler, kwargs=filler_kwargs) filler_thread.daemon = True filler_thread.start() while True: # fetch loaded chunks try: fetch_thread, chunk = queue.get() fetch_thread.join() # this makes sure the chunk is loaded except TypeError: self.indicator.set(completed) break # abort on errors if chunk.response.status_code != 200: # remember last completed chunk self.indicator.set(completed) # abort print('\nFailed to fetch a chunk! The url used was:') print(chunk.response.url) msg = 'The server responded with status code %s (%s).' status_code = chunk.response.status_code print(msg % (status_code, responses[status_code])) exit() # save the chunk to the target self.target.save(chunk) completed = chunk.serial gdal.TermProgress_nocb(completed / total) filler_thread.join()
def progressbar(self, complete=0.0): gdal.TermProgress_nocb(complete)
def update_progress(self): if self.enabled: self.progress += 1.0 gdal.TermProgress_nocb(self.progress / (self.operations * self.steps))
0, 0, 0, 0, 0, 0, 0 ] #Make the values list into a numpy array FOMarray = numpy.array(FOMmap) for iBand in range(1, indataset.RasterCount + 1): inband = indataset.GetRasterBand(iBand) outband = outdataset.GetRasterBand(iBand) for i in range(inband.YSize - 1, -1, -1): scanline = inband.ReadAsArray(0, i, inband.XSize, 1, inband.XSize, 1) #Numpy.choose bug (some versions) which only allows 32 elements - so changed to iteration #scanline = numpy.choose(scanline, FOMmap, mode='clip') #Let numpy iterate and replace... scanline[:] = FOMarray[scanline] outband.WriteArray(scanline, 0, i) #update progress line if not quiet: gdal.TermProgress_nocb( (float(inband.YSize - i + 1) / inband.YSize))
def main(mintpyDict, outshp): ''' Main driver. ''' shpDriver = ogr.GetDriverByName("ESRI Shapefile") ##Check if shape file already exists if os.path.exists(outshp): print('Output shape file {} exists. Will be overwritten ....'.format(outshp)) shpDriver.DeleteDataSource(outshp) ##Start creating shapefile dataset and layer definition ds = shpDriver.CreateDataSource(outshp) srs = ogr.osr.SpatialReference() srs.ImportFromEPSG(4326) layer = ds.CreateLayer( 'mintpy', srs, geom_type=ogr.wkbPoint) #Add code for each point fd = ogr.FieldDefn('CODE', ogr.OFTString) fd.SetWidth(8) layer.CreateField(fd) #Add DEM height for each point - this could be before / after DEM error correction fd = ogr.FieldDefn('HEIGHT', ogr.OFTReal) fd.SetWidth(7) fd.SetPrecision(2) layer.CreateField(fd) #Supposed to represent DEM error estimation uncertainty fd = ogr.FieldDefn('H_STDEV', ogr.OFTReal) fd.SetWidth(5) fd.SetPrecision(2) layer.CreateField(fd) #Estimated LOS velocity fd = ogr.FieldDefn('VEL', ogr.OFTReal) fd.SetWidth(8) fd.SetPrecision(2) layer.CreateField(fd) #Estimated uncertainty in velocity fd = ogr.FieldDefn('V_STDEV', ogr.OFTReal) fd.SetWidth(6) fd.SetPrecision(2) layer.CreateField(fd) #Temporal coherence fd = ogr.FieldDefn('COHERENCE', ogr.OFTReal) fd.SetWidth(5) fd.SetPrecision(3) layer.CreateField(fd) #Effective area - SqueeSAR DS / PS layer.CreateField( ogr.FieldDefn('EFF_AREA', ogr.OFTInteger)) ##Time to load the dates from time-series HDF5 field and create one attribute for each date ts_obj = timeseries(mintpyDict['TimeSeries']) ts_obj.open() for date in ts_obj.dateList: fd = ogr.FieldDefn('D{0}'.format(date), ogr.OFTReal) fd.SetWidth(8) fd.SetPrecision(2) layer.CreateField(fd) layerDefn = layer.GetLayerDefn() ####Total number of points mask = readfile.read(mintpyDict['Mask'])[0] nValid = np.sum(mask != 0) print('Number of points with time-series: ', nValid) gdal.TermProgress_nocb(0.0) ###Loop over all datasets in context managers to skip close statements with h5py.File(mintpyDict['TimeSeries'], 'r') as tsid: nLines = tsid['timeseries'].shape[1] nPixels = tsid['timeseries'].shape[2] with h5py.File(mintpyDict['Velocity'], 'r') as velid: with h5py.File(mintpyDict['Coherence'], 'r') as cohid: with h5py.File(mintpyDict['Geometry'], 'r') as geomid: #Start counter counter = 1 #For each line for line in range(nLines): coh = cohid['temporalCoherence'][line,:].astype(np.float64) vel = velid['velocity'][line,:].astype(np.float64) velstd = velid['velocityStd'][line,:].astype(np.float64) ts = tsid['timeseries'][:,line,:].astype(np.float64) lat = geomid['latitude'][line,:].astype(np.float64) lon = geomid['longitude'][line,:].astype(np.float64) hgt = geomid['height'][line,:].astype(np.float64) for ii in range(nPixels): #If velocity is zero, dont include. What about ref pixel? #Reference point is included in maskTempCoh.h5 if mask[line, ii] == 0: continue #Create metadata dict rdict = { 'CODE' : hex(counter)[2:].zfill(8), 'HEIGHT' : hgt[ii], 'H_STDEV' : 0., 'VEL' : vel[ii]*1000, 'V_STDEV' : velstd[ii]*1000, 'COHERENCE' : coh[ii], 'EFF_AREA' : 1} for ind, date in enumerate(ts_obj.dateList): rdict['D{0}'.format(date)] = ts[ind, ii] * 1000 #Create feature with definition feature = ogr.Feature(layerDefn) addMetadata(feature, [lon[ii], lat[ii]], rdict) layer.CreateFeature(feature) feature = None counter = counter + 1 gdal.TermProgress_nocb(counter / nValid)
def progressbar(self, complete=0.0): """Print progressbar for float value 0..1""" gdal.TermProgress_nocb(complete)
# if there's nodata on the hillband, use the v value from the color # dataset instead of the hillshade value. if hillbandnodatavalue is not None: equal_to_nodata = numpy.equal(hillScanline, hillbandnodatavalue) v = numpy.choose(equal_to_nodata, (hillScanline, hsv[2])) else: v = hillScanline # replace v with hillshade hsv_adjusted = numpy.asarray([hsv[0], hsv[1], v]) # convert back to RGB dst_color = hsv_to_rgb(hsv_adjusted) # write out new RGB bands to output one band at a time outband = outdataset.GetRasterBand(1) outband.WriteArray(dst_color[0], 0, i) outband = outdataset.GetRasterBand(2) outband.WriteArray(dst_color[1], 0, i) outband = outdataset.GetRasterBand(3) outband.WriteArray(dst_color[2], 0, i) if aBand is not None: aScanline = aBand.ReadAsArray(0, i, hillband.XSize, 1, hillband.XSize, 1) outband = outdataset.GetRasterBand(4) outband.WriteArray(aScanline, 0, i) # update progress line if not quiet: gdal.TermProgress_nocb((float(i + 1) / hillband.YSize))
ds.GetRasterBand(i + 1).ComputeStatistics(False, gdal.TermProgress_nocb) # How about using the gdal callback function with my own stuff? Let's just # list all of the files in the current diretory and pretend to do something # with them. def process_file(fn): # Slow things down a bit by counting to 1,000,000 for each file. for i in range(1000000): pass # do nothing list_of_files = os.listdir('.') for i in range(len(list_of_files)): process_file(list_of_files[i]) gdal.TermProgress_nocb(i / float(len(list_of_files))) gdal.TermProgress_nocb(100) ###################### 9.8 Exceptions and error handlers #################### os.chdir(os.path.join(data_dir, 'Switzerland')) # This will fail because the second filename has an extra f at the end. The # first one is the only one that will get statistics calculated. file_list = ['dem_class.tif', 'dem_class2.tiff', 'dem_class3.tif'] for fn in file_list: ds = gdal.Open(fn) ds.GetRasterBand(1).ComputeStatistics(False) # You could check to see if the file could be opened and skip it if not. for fn in file_list: