def run(self): input_path = os.path.join(self.datapath, "input", self.input_dataset, "images", self.filename + ".tif") gdal_translate_args = [ '-srcwin', self.min_x, self.min_y, self.tile_width, self.tile_height, input_path, self.output().path ] sh.gdal_translate(gdal_translate_args)
def gif2tif(self, start, end): """ Convert `prediction output`_ from GIF to TIFF. Will use extent.json and gdal_translate to recover geospatial information lost to GIF format. Parameters: - `start`: beginning of prediction range. - `end`: ending of prediction range, inclusive. Start and end parameters are necessary to figure out paths. .. _`prediction output`: http://www.ncgia.ucsb.edu/projects/gig/About/dtImOut.htm """ predict_dir = join(self.output_path, 'predict') with open(join(self.input_path, 'extent.json')) as extent_file: extent = json.load(extent_file) columnas = str(int(extent["columns"]) - 1) renglones = str(int(extent["rows"]) - 1) epsg = 'EPSG:' + extent["epsg"] xmin = str(extent["xmin"]) xmax = str(extent["xmax"]) ymin = str(extent["ymin"]) ymax = str(extent["ymax"]) self.save_status('gif2tif') for year in range(start + 1, end + 1): gif = join(predict_dir, "%s_urban_%s.gif" % (self.location, year)) tmp_tif = join(predict_dir, "%s_urban_%s_tmp.tif" % (self.location, year)) tmp_xml = join( predict_dir, "%s_urban_%s_tmp.tif.aux.xml" % (self.location, year)) tif = join(predict_dir, "%s_urban_%s.tif" % (self.location, year)) gdal_translate('-a_srs', epsg, '-ot', 'Float64', '-of', 'GTiff', '-gcp', '0', renglones, xmin, ymin, '-gcp', columnas, renglones, xmax, ymin, '-gcp', columnas, '0', xmax, ymax, '-gcp', '0', '0', xmin, ymax, gif, tmp_tif) otbcli_BandMath('-il', tmp_tif, '-out', tif, '-exp', 'im1b1 < 9 ? im1b1 : 0') try: os.remove(tmp_tif) os.remove(tmp_xml) except: pass
def process_layer(self, typename): fname_in = '%s.shp' % typename fname_out = '%s_rasterized.tiff' % typename pwd = str(sh.pwd()).strip() try: sh.cd('/tmp/layer') sh.rm("-rf",sh.glob('*')) sh.unzip('../layer.zip') saga_cmd.shapes_points("Points Filter", POINTS=fname_in, FIELD="MASA_HUMEDO", FILTER="tmp1.shp", RADIUS=100, MINNUM=25, MAXNUM=200, METHOD=4, PERCENT=15) saga_cmd.shapes_points("Points Filter", POINTS="tmp1.shp", FIELD="MASA_HUMEDO", FILTER="tmp2.shp", RADIUS=100, MINNUM=25, MAXNUM=200, METHOD=5, PERCENT=90) saga_cmd.grid_gridding("Shapes to Grid", INPUT="tmp2.shp", FIELD="MASA_HUMEDO", MULTIPLE=4, LINE_TYPE=0, GRID_TYPE=3, USER_SIZE=0.0001, TARGET=0, USER_GRID="tmp3.sgrd") saga_cmd.grid_tools("Close Gaps", INPUT="tmp3.sgrd", RESULT="tmp4.sgrd") saga_cmd.shapes_points("Convex Hull", SHAPES="tmp2.shp", HULLS="tmphull.shp", POLYPOINTS=0) saga_cmd.shapes_grid("Clip Grid with Polygon", INPUT="tmp4.sgrd", OUTPUT="tmp5.sgrd", POLYGONS="tmphull.shp") saga_cmd.grid_filter("Gaussian Filter", INPUT="tmp5.sgrd", RESULT="tmp6", SIGMA=3, MODE=1, RADIUS=50) sh.gdal_translate("-of", "gtiff", "tmp6.sdat", fname_out) finally: sh.cd(pwd) return '/tmp/layer/%s' % fname_out
def downsample(input: Path, factor: int, anti_alias: bool): # Fail early if h5repack cli command is not available. from sh import h5repack, gdal_translate granule_name = find_a_granule_name(input) fmask_image = input.with_name(f"{granule_name}.fmask.img") nbar_size = None with h5py.File(input) as f: image_paths = find_h5_paths(f, "IMAGE") for i, image_path in enumerate(image_paths): old_image: Optional[h5py.Dataset] = f[image_path] def info(msg: str): secho( f"{i: 4}/{len(image_paths)} {style(repr(image_path), fg='blue')}: {msg}" ) old_shape = old_image.shape if all(dim_size < factor for dim_size in old_shape): info(f"Skipping") continue attrs = dict(old_image.attrs.items()) old_geotransform = attrs["geotransform"] new_data = old_image[()][::factor, ::factor] new_shape = new_data.shape info(f"New shape: {new_shape!r}") del old_image del f[str(image_path)] folder, name = image_path.rsplit("/", 1) parent: h5py.Group = f[str(folder)] image = parent.create_dataset(name, new_shape, data=new_data) new_geotransform = list(old_geotransform) new_geotransform[1] *= old_shape[1] / new_shape[1] new_geotransform[5] *= old_shape[0] / new_shape[0] attrs["geotransform"] = new_geotransform image.attrs.update(attrs) # Update any res group with the new resolution. res_group_path = _get_res_group_path(image_path) if res_group_path: res_group = f[res_group_path] res_group.attrs["resolution"] = [ abs(new_geotransform[5]), abs(new_geotransform[1]), ] if "/NBAR/" in image_path: nbar_size = new_shape if nbar_size is None: raise ValueError("No nbar image found?") # We need to repack the file to actually free up the space. repacked = input.with_suffix(".repacked.h5") h5repack("-f", "GZIP=5", input, repacked) repacked.rename(input) if fmask_image.exists(): secho(f"Scaling fmask {fmask_image}") tmp = fmask_image.with_suffix(f".tmp.{fmask_image.suffix}") gdal_translate("-outsize", nbar_size[1], nbar_size[0], fmask_image, tmp) tmp.rename(fmask_image)