def Ampcoroff2Velo(self, ref_raster=None, datedelta=None, velo_or_pixel='velo'): """ ref_raster: a SingleRaster object that is used for this pixel tracking datedelta: a timedelta object that is the time span between two input images these values will override the settings from self.ini, if self.ini also exists. the final output is 1. self.velo_x -> the x comp of velocity (m/days) at where Ampcor has processed 2. self.velo_y -> the y comp of velocity (m/days) ... 3. self.snr -> the Signal-to-Noise Ratio ... 4. self.err_x -> the x comp of the error of the velocity (m/days) .... 5. self.err_y -> the y comp of the error of the velocity (m/days) .... All of these are in N-by-3 array, and the columns are 1) projected x coor, 2) projected y coor, 3) the desired quantity, respectively. """ if ref_raster is None: ref_raster = SingleRaster(self.ini.imagepair['image1'], date=self.ini.imagepair['image1_date']) if datedelta is None: a = SingleRaster(self.ini.imagepair['image1'], date=self.ini.imagepair['image1_date']) b = SingleRaster(self.ini.imagepair['image2'], date=self.ini.imagepair['image2_date']) datedelta = b.date - a.date geot = ref_raster.GetGeoTransform() ulx = geot[0] uly = geot[3] xres = geot[1] yres = geot[5] if velo_or_pixel == 'velo': self.data[:, 0] = ulx + (self.data[:, 0] - 1) * xres self.data[:, 1] = self.data[:, 1] * abs(xres) / datedelta.days self.data[:, 2] = uly + (self.data[:, 2] - 1) * yres self.data[:, 3] = self.data[:, 3] * abs(yres) / datedelta.days self.data[:, 5] = np.sqrt(self.data[:, 5]) / datedelta.days self.data[:, 6] = np.sqrt(self.data[:, 6]) / datedelta.days self.velo_x = self.data[:, [0, 2, 1]] self.velo_y = self.data[:, [0, 2, 3]] self.velo_y[:, -1] = -self.velo_y[:, -1] # UL-LR system to Cartesian self.snr = self.data[:, [0, 2, 4]] self.err_x = self.data[:, [0, 2, 5]] self.err_y = self.data[:, [0, 2, 6]] elif velo_or_pixel == 'pixel': self.data[:, 0] = ulx + (self.data[:, 0] - 1) * xres self.data[:, 2] = uly + (self.data[:, 2] - 1) * yres self.velo_x = self.data[:, [0, 2, 1]] self.velo_y = self.data[:, [0, 2, 3]] self.velo_y[:, -1] = -self.velo_y[:, -1] # UL-LR system to Cartesian self.snr = self.data[:, [0, 2, 4]] self.err_x = self.data[:, [0, 2, 5]] self.err_y = self.data[:, [0, 2, 6]]
def SetRefGeo(self, refgeo): # ==== Prepare the reference geometry ==== if type(refgeo) is str: self.refgeo = SingleRaster(refgeo) elif type(refgeo) is SingleRaster: self.refgeo = refgeo else: raise ValueError("This refgeo must be either a SingleRaster object or a path to a geotiff file.") self.refgeomask = self.refgeo.ReadAsArray().astype(bool)
def GetImgPair(self, delimiter=','): """ Get ImgPair from the contents of this csv file """ imgpairs = [] with open(self.fpath, self.read_pythonver_dict[self.python_version]) as csvfile: csvcontent = csv.reader(csvfile, skipinitialspace=True, delimiter=delimiter) for row in csvcontent: row_obj = [SingleRaster(i) for i in row[:2]] imgpairs.append(row_obj) return imgpairs
def GetDEM(self, delimiter=','): """ Get DEMs from the contents of this csv file. Return a list of SingleRaster objects. """ dems = [] with open(self.fpath, self.read_pythonver_dict[self.python_version]) as csvfile: csvcontent = csv.reader(csvfile, skipinitialspace=True, delimiter=delimiter) next(csvcontent, None) # Skip the header for row in csvcontent: dems.append(SingleRaster(*row[:3])) return dems
def VeloCorrectionInfo(self): a = SingleRaster(self.ini.imagepair['image1'], date=self.ini.imagepair['image1_date']) b = SingleRaster(self.ini.imagepair['image2'], date=self.ini.imagepair['image2_date']) datedelta = b.date - a.date geot = a.GetGeoTransform() xres = geot[1] yres = geot[5] x_culled = self.z1[self.signal_idx] y_culled = self.z2[self.signal_idx] self.z1.MAD_median = np.median(x_culled) self.z1.MAD_std = np.std(x_culled, ddof=1) self.z1.MAD_mean = np.mean(x_culled) self.z2.MAD_median = np.median(y_culled) self.z2.MAD_std = np.std(y_culled, ddof=1) self.z2.MAD_mean = np.mean(y_culled) vx_zarray_velo = self.z1[:] * abs(xres) / datedelta.days vx_zarray_velo.MAD_median = self.z1.MAD_median * abs( xres) / datedelta.days vx_zarray_velo.MAD_std = self.z1.MAD_std * abs(xres) / datedelta.days vx_zarray_velo.MAD_mean = self.z1.MAD_mean * abs(xres) / datedelta.days vy_zarray_velo = self.z2[:] * abs(yres) / datedelta.days vy_zarray_velo.MAD_median = self.z2.MAD_median * abs( yres) / datedelta.days vy_zarray_velo.MAD_std = self.z2.MAD_std * abs(yres) / datedelta.days vy_zarray_velo.MAD_mean = self.z2.MAD_mean * abs(yres) / datedelta.days with open(self.ini.velocorrection['label_logfile'], 'w') as f: f.write('Total points over bedrock = {:6n}\n'.format( self.z1.size)) f.write('-------- Unit: Pixels --------\n') f.write('median_x_px = {:6.3f}\n'.format( float(self.z1.MAD_median))) f.write('median_y_px = {:6.3f}\n'.format( float(self.z2.MAD_median))) f.write('std_x_px = {:6.3f}\n'.format(float( self.z1.MAD_std))) f.write('std_y_px = {:6.3f}\n'.format(float( self.z2.MAD_std))) f.write('mean_x_px = {:6.3f}\n'.format(float( self.z1.MAD_mean))) f.write('mean_y_px = {:6.3f}\n'.format(float( self.z2.MAD_mean))) f.write( '-------- Unit: Velocity (L/T; most likely m/day) --------\n') f.write('median_x = {:6.3f}\n'.format( float(vx_zarray_velo.MAD_median))) f.write('median_y = {:6.3f}\n'.format( float(vy_zarray_velo.MAD_median))) f.write('std_x = {:6.3f}\n'.format( float(vx_zarray_velo.MAD_std))) f.write('std_y = {:6.3f}\n'.format( float(vy_zarray_velo.MAD_std))) f.write('mean_x = {:6.3f}\n'.format( float(vx_zarray_velo.MAD_mean))) f.write('mean_y = {:6.3f}\n'.format( float(vy_zarray_velo.MAD_mean))) return vx_zarray_velo, vy_zarray_velo
def XYV2Raster(self, xyvfileprefix=None, ref_raster=None): """ xyvfileprefix: the prefix for output xyv file. """ if xyvfileprefix is None: xyvfileprefix = self.ini.rawoutput['label_geotiff'] if ref_raster is None: ref_raster = SingleRaster(self.ini.imagepair['image1'], date=self.ini.imagepair['image1_date']) # vx_xyz = xyvfileprefix + '_vx.xyz' # vy_xyz = xyvfileprefix + '_vy.xyz' # mag_xyz = xyvfileprefix + '_mag.xyz' # vx_gtiff = vx_xyz.replace('xyz', 'tif') # vy_gtiff = vy_xyz.replace('xyz', 'tif') # mag_gtiff = mag_xyz.replace('xyz', 'tif') nodata_val = -9999.0 self.xyv_velo_x[np.isnan(self.xyv_velo_x)] = nodata_val self.xyv_velo_y[np.isnan(self.xyv_velo_y)] = nodata_val self.xyv_mag[np.isnan(self.xyv_mag)] = nodata_val self.xyv_snr[np.isnan(self.xyv_snr)] = nodata_val self.xyv_err_x[np.isnan(self.xyv_err_x)] = nodata_val self.xyv_err_y[np.isnan(self.xyv_err_y)] = nodata_val vx_gtiff = xyvfileprefix + '_vx.tif' vy_gtiff = xyvfileprefix + '_vy.tif' mag_gtiff = xyvfileprefix + '_mag.tif' snr_gtiff = xyvfileprefix + '_snr.tif' errx_gtiff = xyvfileprefix + '_errx.tif' erry_gtiff = xyvfileprefix + '_erry.tif' xraster = SingleRaster(vx_gtiff) yraster = SingleRaster(vy_gtiff) magraster = SingleRaster(mag_gtiff) snrraster = SingleRaster(snr_gtiff) errxraster = SingleRaster(errx_gtiff) erryraster = SingleRaster(erry_gtiff) proj = ref_raster.GetProjection() # print(self.xyv_velo_x) # print(proj) # xraster.XYZ2Raster(vx_xyz, projection=proj) # yraster.XYZ2Raster(vy_xyz, projection=proj) # magraster.XYZ2Raster(mag_xyz, projection=proj) xraster.XYZArray2Raster(self.xyv_velo_x, projection=proj) yraster.XYZArray2Raster(self.xyv_velo_y, projection=proj) magraster.XYZArray2Raster(self.xyv_mag, projection=proj) snrraster.XYZArray2Raster(self.xyv_snr, projection=proj) errxraster.XYZArray2Raster(self.xyv_err_x, projection=proj) erryraster.XYZArray2Raster(self.xyv_err_y, projection=proj) xraster.SetNoDataValue(nodata_val) yraster.SetNoDataValue(nodata_val) magraster.SetNoDataValue(nodata_val) snrraster.SetNoDataValue(nodata_val) errxraster.SetNoDataValue(nodata_val) errxraster.SetNoDataValue(nodata_val)
""" early_datetime = datetime(2016, 7, 18, 20, 37, 6) later_datetime = datetime(2016, 8, 3, 20, 37, 9) day_interval = '16.00003472222222' """ # cmd = "\nxyz2grd " + east_xyz_path + " " + R + " -G" + east_grd_path + " -I" + str(ini.splitampcor['step'] * int(RESOLUTION)) + "=\n"; # cmd += "\nxyz2grd " + north_xyz_path + " " + R + " -G" + north_grd_path + " -I" + str(ini.splitampcor['step'] * int(RESOLUTION)) + "=\n"; """ cmd += "\ngawk '{print $1\" \"$2\" \"$4}' " + north_xyz_path + " | xyz2grd " + R + " \ -G" + snr_grd_path + " -I" + str(ini.splitampcor['step'] * int(RESOLUTION)) + "=\n"; cmd += "\ngrdmath " + east_grd_path + " " + day_interval + " DIV --IO_NC4_CHUNK_SIZE=c = " + east_grd_path + "\n"; cmd += "\ngrdmath " + north_grd_path + " " + day_interval + " DIV --IO_NC4_CHUNK_SIZE=c = " + north_grd_path + "\n"; cmd += "\ngrdmath " + north_grd_path + " " + east_grd_path + " HYPOT --IO_NC4_CHUNK_SIZE=c = " + mag_grd_path + "\n"; subprocess.call(cmd, shell=True) """ north_tif = SingleRaster(north_tif_path) north_tif.XYZ2Raster(north_xyz_path, projection=imgpair[0].GetProjection()) east_tif = SingleRaster(east_tif_path) east_tif.XYZ2Raster(east_xyz_path, projection=imgpair[0].GetProjection()) # please note that xxxxx_snrxyz.grd only counts the error from fitting a cross-correlation peak. # it does not include the offset between ref image and search image, which is also can be a significant source of errors. # # The unit in mag, north, east is pixel/day. # else: # print("\n***** \"" + east_grd_path + "\" already exists, assuming m/day velocity grids already made for this run...\n"); # sys.exit(0)
inipath = sys.argv[1] ini = ConfParams(inipath) ini.ReadParam() ini.VerifyParam() demlist = ini.GetDEM() # ==== warp all DEMs using gdalwarp ==== for dem in demlist: dem.Unify(ini.gdalwarp) # ==== Complie DEM time series (including data, time, and variance) ==== dem_timeseries = TimeSeriesDEM(demlist[0]) for i in range(1, len(demlist)): print('Add DEM: ' + demlist[i].fpath) dem_timeseries = dem_timeseries.AddDEM(demlist[i]) # ==== Weighted regression ==== dem_timeseries.Date2DayDelta() dem_timeseries.SetWeight() print("Start Polyfit; pixel number = " + str(dem_timeseries.shape[0] * dem_timeseries.shape[1])) slope, intercept, slope_err, intercept_err = dem_timeseries.Polyfit(**ini.regression) # ==== Write to file ==== dhdt_dem = SingleRaster('/'.join([ini.result['output_dir'], ini.result['gtiff_slope']])) dhdt_dem.Array2Raster(slope, demlist[0]) dhdt_err_dem = SingleRaster('/'.join([ini.result['output_dir'], ini.result['gtiff_slope_err']])) dhdt_err_dem.Array2Raster(slope_err, demlist[0])
def getUncertaintyDEM(demfpath, pointfilepath): dem = SingleRaster(demfpath) xyzfile_output = dem.GetPointsFromXYZ(pointfilepath) xyz = XYZFile(xyzfile_output, pointfilepath, demfpath) xyz.Read() return xyz.StatisticOutput(demfpath.replace('.tif', '_offset.png'))
parser.add_argument('config_file', help='Configuration file') parser.add_argument('-s', '--step', help='Do a single step', dest='step') args = parser.parse_args() # ==== Read ini file ==== inipath = args.config_file ini = ConfParams(inipath) ini.ReadParam() ini.VerifyParam() # ==== Create two SingleRaster object and make them ready for pixel tracking ==== if args.step == 'ampcor' or args.step is None: a = SingleRaster(ini.imagepair['image1'], date=ini.imagepair['image1_date']) b = SingleRaster(ini.imagepair['image2'], date=ini.imagepair['image2_date']) if ini.pxsettings['gaussian_hp']: a.GaussianHighPass(sigma=3) b.GaussianHighPass(sigma=3) a.AmpcorPrep() b.AmpcorPrep() # ==== Run main processes ==== task = ampcor_task([a, b], ini) writeout_ampcor_task(task, ini) if args.step == 'rawvelo' or args.step is None:
def Fitdata2File(self): # ==== Write to file ==== dhdt_dem = SingleRaster(self.dhdtprefix + '_dhdt.tif') dhdt_error = SingleRaster(self.dhdtprefix + '_dhdt_error.tif') dhdt_res = SingleRaster(self.dhdtprefix + '_dhdt_residual.tif') dhdt_count = SingleRaster(self.dhdtprefix + '_dhdt_count.tif') dhdt_dem.Array2Raster(self.fitdata['slope'], self.refgeo) dhdt_error.Array2Raster(self.fitdata['slope_err'], self.refgeo) dhdt_res.Array2Raster(self.fitdata['residual'], self.refgeo) dhdt_count.Array2Raster(self.fitdata['count'], self.refgeo)
class DemPile(object): """ New class in replace of TimeSeriesDEM. It doesn't use nparray for avoiding huge memory consumption. Instead, it uses a novel method for stacking all DEMs and saves them as a dict array (which is what is stored in the intermediate pickle file. """ def __init__(self, picklepath=None, refgeo=None, refdate=None, dhdtprefix=None): self.picklepath = picklepath self.dhdtprefix = dhdtprefix self.ts = None self.dems = [] self.refdate = None if refdate is not None: self.refdate = datetime.strptime(refdate, '%Y-%m-%d') self.refgeo = refgeo self.refgeomask = None if refgeo is not None: self.refgeomask = refgeo.ReadAsArray().astype(bool) self.fitdata = {'slope': [], 'slope_err': [], 'residual': [], 'count': []} self.maskparam = {'max_uncertainty': 9999, 'min_time_span': 0} def AddDEM(self, dems): # ==== Add DEM object list ==== if type(dems) is list: self.dems = self.dems + dems elif type(dems) is SingleRaster: self.dems.append(dems) else: raise ValueError("This DEM type is neither a SingleRaster object nor a list of SingleRaster objects.") def SortByDate(self): # ==== sort DEMs by date (ascending order) ==== dem_dates = [i.date for i in self.dems] dateidx = np.argsort(dem_dates) self.dems = [self.dems[i] for i in dateidx] def SetRefGeo(self, refgeo): # ==== Prepare the reference geometry ==== if type(refgeo) is str: self.refgeo = SingleRaster(refgeo) elif type(refgeo) is SingleRaster: self.refgeo = refgeo else: raise ValueError("This refgeo must be either a SingleRaster object or a path to a geotiff file.") self.refgeomask = self.refgeo.ReadAsArray().astype(bool) def SetRefDate(self, datestr): self.refdate = datetime.strptime(datestr, '%Y-%m-%d') def SetMaskParam(self, ini): if 'max_uncertainty' in ini.settings: self.maskparam['max_uncertainty'] = float(ini.settings['max_uncertainty']) if 'min_time_span' in ini.settings: self.maskparam['min_time_span'] = float(ini.settings['min_time_span']) def InitTS(self): # ==== Prepare the reference geometry ==== refgeo_Ysize = self.refgeo.GetRasterYSize() refgeo_Xsize = self.refgeo.GetRasterXSize() self.ts = [[{'date': [], 'uncertainty': [], 'value': []} for i in range(refgeo_Xsize)] for j in range(refgeo_Ysize)] print('total number of pixels to be processed: {}'.format(np.sum(self.refgeomask))) def ReadConfig(self, ini): self.picklepath = ini.result['picklefile'] self.dhdtprefix = ini.result['dhdt_prefix'] self.AddDEM(ini.GetDEM()) self.SortByDate() self.SetRefGeo(ini.refgeometry['gtiff']) self.SetRefDate(ini.settings['refdate']) self.SetMaskParam(ini) @timeit def PileUp(self): # ==== Start to read every DEM and save it to our final array ==== for i in range(len(self.dems)): print('{}) {}'.format(i + 1, os.path.basename(self.dems[i].fpath) )) if self.dems[i].uncertainty <= self.maskparam['max_uncertainty']: datedelta = self.dems[i].date - self.refdate znew = Resample_Array(self.dems[i], self.refgeo, resamp_method='linear') znew_mask = np.logical_and(znew > 0, self.refgeomask) fill_idx = np.where(znew_mask) for m,n in zip(fill_idx[0], fill_idx[1]): self.ts[m][n]['date'] += [datedelta.days] self.ts[m][n]['uncertainty'] += [self.dems[i].uncertainty] self.ts[m][n]['value'] += [znew[m, n]] else: print("This one won't be piled up because its uncertainty ({}) exceeds the maximum uncertainty allowed ({}).".format(self.dems[i].uncertainty, self.maskparam['max_uncertainty'])) def DumpPickle(self): pickle.dump(self.ts, open(self.picklepath, "wb")) def LoadPickle(self): self.ts = pickle.load( open(self.picklepath, "rb") ) @timeit def Polyfit(self): # ==== Create final array ==== self.fitdata['slope'] = np.ones_like(self.ts, dtype=float) * -9999 self.fitdata['slope_err'] = np.ones_like(self.ts, dtype=float) * -9999 self.fitdata['residual'] = np.ones_like(self.ts, dtype=float) * -9999 self.fitdata['count'] = np.ones_like(self.ts, dtype=float) * -9999 # ==== Weighted regression ==== print('m total: ', len(self.ts)) for m in range(len(self.ts)): # if m < 600: # continue if m % 100 == 0: print(m) for n in range(len(self.ts[0])): # self.fitdata['count'][m, n] = len(self.ts[m][n]['date']) # if len(self.ts[m][n]['date']) >= 3: date = np.array(self.ts[m][n]['date']) uncertainty = np.array(self.ts[m][n]['uncertainty']) value = np.array(self.ts[m][n]['value']) # pin_value = pin_dem_array[m ,n] # pin_date = pin_dem_date_array[m, n] # date, uncertainty, value = filter_by_slope(date, uncertainty, value, pin_date, pin_value) # date, uncertainty, value = filter_by_redundancy(date, uncertainty, value) # slope_ref = [(value[i] - pin_value) / float(date[i] - pin_date) * 365.25 for i in range(len(value))] # for i in reversed(range(len(slope_ref))): # if (slope_ref[i] > dhdt_limit_upper) or (slope_ref[i] < dhdt_limit_lower): # _ = date.pop(i) # _ = uncertainty.pop(i) # _ = value.pop(i) # self.fitdata['count'][m, n] = len(date) # Whyjay: May 10, 2018: cancelled the min date span (date[-1] - date[0] > 0), previously > 200 # if (len(np.unique(date)) >= 3) and (date[-1] - date[0] > 0): if date.size >= 2 and date[-1] - date[0] > self.maskparam['min_time_span']: slope, slope_err, residual, count = wlr_corefun(date, value, uncertainty) if residual > 100: print(date, value, uncertainty) self.fitdata['slope'][m, n] = slope self.fitdata['slope_err'][m, n] = slope_err self.fitdata['residual'][m, n] = residual self.fitdata['count'][m, n] = count # else: # self.fitdata['count'] = len(ref_dem_TS[m][n]['date']) # elif (date[-1] - date[0] > 0): # slope_arr[m, n] = (value[1] - value[0]) / float(date[1] - date[0]) * 365.25 # self.fitdata['count'][~self.refgeomask] = -9999 def Fitdata2File(self): # ==== Write to file ==== dhdt_dem = SingleRaster(self.dhdtprefix + '_dhdt.tif') dhdt_error = SingleRaster(self.dhdtprefix + '_dhdt_error.tif') dhdt_res = SingleRaster(self.dhdtprefix + '_dhdt_residual.tif') dhdt_count = SingleRaster(self.dhdtprefix + '_dhdt_count.tif') dhdt_dem.Array2Raster(self.fitdata['slope'], self.refgeo) dhdt_error.Array2Raster(self.fitdata['slope_err'], self.refgeo) dhdt_res.Array2Raster(self.fitdata['residual'], self.refgeo) dhdt_count.Array2Raster(self.fitdata['count'], self.refgeo)
def splitAmpcor(ref_path, search_path, pair_dir='.', nproc=8, ref_x=32, ref_y=32, search_x=32, search_y=32, step=8): ref_img = SingleRaster(ref_path) ref_samples = ref_img.GetRasterXSize() ref_lines = ref_img.GetRasterYSize() ref_ulx, ref_xres, _, ref_uly, _, ref_yres = ref_img.GetGeoTransform() search_img = SingleRaster(search_path) search_samples = search_img.GetRasterXSize() search_lines = search_img.GetRasterXSize() search_ulx, _, _, search_uly, _, _ = ref_img.GetGeoTransform() # here we use the resolution from ref image. # some problems may happen if the resolution of ref and search img are not the same. # this replaces findOffset.py, since we only need to get the ul and res, which can be found in an image itself # without header files. mean_x = round((ref_ulx - search_ulx) / ref_xres) mean_y = round((search_uly - ref_uly) / ref_yres) # It is now integer # print(type(round((ref_ulx - search_ulx) / ref_xres))) # print(mean_x) # output = findOffset(ref_hdr, search_hdr, resolution); # print(output) # mean_x = str(output[4]); # mean_y = str(output[5]); ampcor_label = "r{:d}x{:d}_s{:d}x{:d}".format(ref_x, ref_y, search_x, search_y) for i in range(1, nproc + 1): # lines_proc = ref_lines // nproc // ref_y * ref_y lines_proc = ref_lines // nproc firstpix = 1 if mean_x >= 0 else 1 - mean_x finalpix = ref_samples if mean_x <= 0 else ref_samples - mean_x yoffset = 0 if mean_y >= 0 else -mean_y firstline = (i - 1) * lines_proc + 1 + yoffset lastline = firstline + lines_proc - 1 # WhyJ if i == nproc: lastline = ref_lines if mean_y <= 0 else ref_lines - mean_y ampcor_in_file = pair_dir + "/ampcor_" + ampcor_label + "_" + str( i) + ".in" ampcor_off_file = "ampcor_" + ampcor_label + "_" + str(i) + ".off" with open(ampcor_in_file, "w") as outfile: outfile.write(" AMPCOR INPUT FILE\n") outfile.write("\n") outfile.write("DATA TYPE\n") outfile.write("\n") outfile.write( "Data Type for Reference Image Real or Complex (-) = Real ![Complex , Real]\n" ) outfile.write( "Data Type for Search Image Real or Complex (-) = Real ![Complex , Real]\n" ) outfile.write("\n") outfile.write("INPUT/OUTPUT FILES\n") outfile.write("\n") outfile.write( "Reference Image Input File (-) = " + ref_path.split('/')[-1] + "\n") outfile.write( "Search Image Input File (-) = " + search_path.split('/')[-1] + "\n") outfile.write( "Match Output File (-) = " + ampcor_off_file + "\n") outfile.write("\n") outfile.write("MATCH REGION\n") outfile.write("\n") outfile.write( "Number of Samples in Reference/Search Images (-) = {:d} {:d}\n" .format(ref_samples, search_samples)) outfile.write( "Start, End and Skip Lines in Reference Image (-) = {:d} {:d} {:d}\n" .format(firstline, lastline, step)) outfile.write( "Start, End and Skip Samples in Reference Image (-) = {:d} {:d} {:d}\n" .format(firstpix, finalpix, step)) outfile.write("\n") outfile.write("MATCH PARAMETERS\n") outfile.write("\n") outfile.write( "Reference Window Size Samples/Lines (-) = {:d} {:d}\n" .format(ref_x, ref_y)) outfile.write( "Search Pixels Samples/Lines (-) = {:d} {:d}\n" .format(search_x, search_y)) outfile.write( "Pixel Averaging Samples/Lines (-) = 1 1\n" ) outfile.write( "Covariance Surface Oversample Factor and Window Size (-) = 64 16\n" ) outfile.write( "Mean Offset Between Reference and Search Images Samples/Lines (-) = {:d} {:d}\n" .format(mean_x, mean_y)) outfile.write("\n") outfile.write("MATCH THRESHOLDS AND DEBUG DATA\n") outfile.write("\n") outfile.write( "SNR and Covariance Thresholds (-) = 0 10000000000\n" ) outfile.write( "Debug and Display Flags T/F (-) = f f\n" ) return ampcor_label