def close(self): """ Close and flush the dataset, plus calculate stats """ if self.calcStats: calcstats.calcStats(self.ds, ignore=self.nullVal) self.ds.FlushCache() self.ds = None
def close(self): """ Calculate stats etc """ from rios import calcstats if self.mode != basedriver.READ: progress = self.controls.progress ignore = self.userClass.rasterIgnore calcstats.calcStats(self.ds, progress, ignore) self.ds.FlushCache() self.ds = None
def writeOutImg(inputArray, outfile, n, m, c, TLX, TLY, nulVal, proj, dType): # Write the output DEM into an image file with GDAL nBands = 1 drvr = gdal.GetDriverByName('HFA') ds = drvr.Create(outfile, n, m, nBands, dType, ['COMPRESS=YES']) band = ds.GetRasterBand(1) band.WriteArray(inputArray) ds.SetGeoTransform((TLX, c, 0, TLY, 0, -c)) ds.SetProjection(proj) progress = cuiprogress.CUIProgressBar() calcstats.calcStats(ds, progress, ignore=nulVal) del ds
def mainRoutine(): """ Main routine """ cmdargs = getCmdargs() info = sen2meta.Sen2TileMeta(filename=cmdargs.infile) ds = createOutfile(cmdargs.outfile, info) nullValDN = 1000 # Get a sorted list of the Sentinel-2 band names. Note that sometimes this # is an incomplete list of band names, which appears to be due to a bug in # earlier versions of ESA's processing software. I suspect it relates to # Anomaly number 11 in the following page. # https://sentinel.esa.int/web/sentinel/news/-/article/new-processing-baseline-for-sentinel-2-products bandNames = sorted(info.viewAzimuthDict.keys()) # Mean over all bands satAzDeg = numpy.array([info.viewAzimuthDict[i] for i in bandNames]) satAzDegMeanOverBands = satAzDeg.mean(axis=0) satZenDeg = numpy.array([info.viewZenithDict[i] for i in bandNames]) satZenDegMeanOverBands = satZenDeg.mean(axis=0) sunAzDeg = info.sunAzimuthGrid sunZenDeg = info.sunZenithGrid stackDeg = numpy.array( [satAzDegMeanOverBands, satZenDegMeanOverBands, sunAzDeg, sunZenDeg]) stackRadians = numpy.radians(stackDeg) stackDN = numpy.round(stackRadians / SCALE_TO_RADIANS).astype(numpy.int16) nullmask = numpy.isnan(stackDeg) stackDN[nullmask] = nullValDN lnames = ['SatelliteAzimuth', 'SatelliteZenith', 'SunAzimuth', 'SunZenith'] for i in range(ds.RasterCount): b = ds.GetRasterBand(i + 1) b.WriteArray(stackDN[i]) b.SetNoDataValue(nullValDN) b.SetDescription(lnames[i]) calcstats.calcStats(ds, ignore=nullValDN, progress=cuiprogress.SilentProgress()) del ds
def main(): """ Main routine for calling from command line. """ cmdargs = getCmdargs() for filename in cmdargs.imgfile: ds = gdal.Open(filename, gdal.GA_Update) # If no ignore value given, check if one is already defined in the file if cmdargs.ignore is not None: ignore = cmdargs.ignore else: b1 = ds.GetRasterBand(1) ignore = b1.GetNoDataValue() calcstats.calcStats(ds, ignore=ignore, approx_ok=cmdargs.approx) ds.FlushCache() # so entry points return success at command line return 0
def makeTestFile(imgfile, withRat=True): # Make a test image with a simple RAT nRows = 100 nCols = 1 ds = riostestutils.createTestFile(imgfile, numRows=nRows, numCols=nCols) imgArray = numpy.ones((nRows, nCols), dtype=numpy.uint8) imgArray[1:10, 0] = numpy.arange(1, 10) imgArray[50:, 0] = 0 band = ds.GetRasterBand(1) band.WriteArray(imgArray) nullDN = 0 calcstats.calcStats(ds, ignore=nullDN, progress=cuiprogress.SilentProgress()) columnName = 'Value' # Note that the RAT has a row for lots of values which have no corresponding pixel ratValues = (numpy.mgrid[0:nRows] + 10).astype(numpy.int32) ratValues[0] = 500 if withRat: rat.writeColumnToBand(band, columnName, ratValues) band.SetMetadataItem('LAYER_TYPE', 'thematic') del ds
def run(): """ Run a test of statistics calculation """ riostestutils.reportStart(TESTNAME) nullVal = 0 # We repeat the basic test for a number of different GDAL datatypes, with different # ranges of data. Each element of the following list is a tuple of # (gdalDataType, numpyDataType, scalefactor) # for which the test is run. The original data being scaled is in # the range 25-100 (after clobbering half the array as nulls, to ensure that # the nulls are enough to make a difference). dataTypesList = [(gdal.GDT_Byte, numpy.uint8, 1), (gdal.GDT_UInt16, numpy.uint16, 1), (gdal.GDT_Int16, numpy.int16, 300), (gdal.GDT_UInt16, numpy.uint16, 300), (gdal.GDT_Int32, numpy.int32, 30000), (gdal.GDT_UInt32, numpy.uint32, 30000), (gdal.GDT_Float32, numpy.float32, 1), (gdal.GDT_Float32, numpy.float32, 100), (gdal.GDT_Float32, numpy.float32, 0.01)] # We repeat these tests on a number of different drivers, if they are available, # as some stats-related things may work fine on some drivers but not on others. driverTestList = [ ('HFA', ['COMPRESS=YES'], 'img'), ('GTiff', ['COMPRESS=LZW', 'TILED=YES', 'INTERLEAVE=BAND'], 'tif'), ('KEA', [], 'kea') ] # Remove any which current GDAL not suporting driverTestList = [(drvrName, options, ext) for (drvrName, options, ext) in driverTestList if gdal.GetDriverByName(drvrName) is not None] # Loop over all drivers for (driverName, creationOptions, ext) in driverTestList: # Loop over all datatype tuples in the list for (fileDtype, arrDtype, scalefactor) in dataTypesList: imgfile = 'test.' + ext ds = riostestutils.createTestFile(imgfile, dtype=fileDtype, driverName=driverName, creationOptions=creationOptions) rampArr = riostestutils.genRampArray().astype( arrDtype) * scalefactor (nRows, nCols) = rampArr.shape # Set half of it to null rampArr[:, :nCols // 2] = nullVal band = ds.GetRasterBand(1) band.WriteArray(rampArr) del ds # Calculate the stats on the file ds = gdal.Open(imgfile, gdal.GA_Update) calcstats.calcStats(ds, progress=cuiprogress.SilentProgress(), ignore=nullVal) del ds # Read back the data as a numpy array ds = gdal.Open(imgfile) band = ds.GetRasterBand(1) rampArr = band.ReadAsArray() # Get stats from file, and from array, and compare stats1 = getStatsFromBand(band) stats2 = getStatsFromArray(rampArr, nullVal) iterationName = "%s %s scale=%s" % ( driverName, gdal.GetDataTypeName(fileDtype), scalefactor) # This relative tolerance is used for comparing the median and mode, # because those are approximate only, and the likely error depends on the # size of the numbers in question (thus it depends on the scalefactor). # Please do not make it any larger unless you have a really solid reason. relativeTolerance = 0.1 * scalefactor ok = compareStats(stats1, stats2, iterationName, relativeTolerance) del ds if os.path.exists(imgfile): os.remove(imgfile) if ok: riostestutils.report(TESTNAME, "Passed") else: riostestutils.report( TESTNAME, ("Note that the mode and median tests will fail in GDAL < 2.0, " + "unless the GDAL fixes suggested in tickets " + "http://trac.osgeo.org/gdal/ticket/4750 and " + "http://trac.osgeo.org/gdal/ticket/5289 are applied")) return ok
def run(): """ Run a test of RAT statistics calculation """ riostestutils.reportStart(TESTNAME) allOK = True imgfile = 'test.img' nRows = 100 nCols = 1 ds = riostestutils.createTestFile(imgfile, numRows=nRows, numCols=nCols) imgArray = numpy.ones((nRows, nCols), dtype=numpy.uint8) imgArray[1:10, 0] = numpy.arange(1, 10) imgArray[50:, 0] = 0 band = ds.GetRasterBand(1) band.WriteArray(imgArray) nullDN = 0 calcstats.calcStats(ds, ignore=nullDN, progress=cuiprogress.SilentProgress()) columnName = 'Value' # Note that the RAT has a row for lots of values which have no corresponding pixel ratValues = (numpy.mgrid[0:nRows]**2).astype(numpy.float64) ratValues[0] = 500 rat.writeColumnToBand(band, columnName, ratValues) band.SetMetadataItem('LAYER_TYPE', 'thematic') del ds ratStats = fileinfo.RatStats(imgfile, columnlist=[columnName]) # Construct an image of the values, by translating pixel values into RAT values ratValImg = numpy.zeros(imgArray.shape, dtype=numpy.float64) for dn in numpy.unique(imgArray): if dn != 0: mask = (imgArray == dn) ratValImg[mask] = ratValues[dn] ratValImgNonNull = ratValImg[imgArray != 0] # Now find the "true" values of the various stats for this image (i.e. this is the # histogramweighted=True case, which I think will be the most common one) trueMean = ratValImgNonNull.mean() trueStddev = ratValImgNonNull.std() trueMin = ratValImgNonNull.min() trueMax = ratValImgNonNull.max() tolerance = 0.000001 if not equalTol(ratStats.Value.mean, trueMean, tolerance): riostestutils.report( TESTNAME, "Mismatched means: %s, %s" % (repr(ratStats.Value.mean), repr(trueMean))) allOK = False if not equalTol(ratStats.Value.stddev, trueStddev, tolerance): riostestutils.report( TESTNAME, "Mismatched stddevs: %s, %s" % (repr(ratStats.Value.stddev), repr(trueStddev))) allOK = False if not equalTol(ratStats.Value.min, trueMin, tolerance): riostestutils.report( TESTNAME, "Mismatched mins: %s, %s" % (repr(ratStats.Value.min), repr(trueMin))) allOK = False if not equalTol(ratStats.Value.max, trueMax, tolerance): riostestutils.report( TESTNAME, "Mismatched maxes: %s, %s" % (repr(ratStats.Value.max), repr(trueMax))) allOK = False if allOK: riostestutils.report(TESTNAME, "Passed") return allOK