def compute_sketches(): count = 0 height_maps = [] sketch_maps = [] for filename in Path('./data_downsampled_blurred').glob('**/*.tif'): file_path = str(filename) file_id = file_path.split('/') detailed_data = gr.from_file('./data/' + file_id[-1]) data = gr.from_file(str(filename)) if data.mean() < 5: continue ridges, peaks = compute_ridges(filename) rivers, basins = compute_rivers(filename) height_map = np.array(detailed_data.raster, dtype=np.float32) height_map = np.expand_dims(height_map, axis=-1) height_map = (height_map - np.amin(height_map)) / \ (np.amax(height_map) - np.amin(height_map)) height_map = height_map * 2 - 1 sketch_map = np.stack((ridges, rivers, peaks, basins), axis=2) sketch_map = np.squeeze(sketch_map, axis=-1) print(sketch_map.shape) height_maps.append(height_map) sketch_maps.append(sketch_map) training_output = np.array(height_maps, dtype=np.float32) training_input = np.array(sketch_maps, dtype=np.float32) np.savez('training_data.npz', x=training_input, y=training_output)
def read_images(self): self.im_red = gr.from_file(self.path_red) self.im_green = gr.from_file(self.path_green) self.im_blue = gr.from_file(self.path_blue) self.im_nir = gr.from_file(self.path_nir) self.im_rededge = gr.from_file(self.path_rededge) self.load_List_P()
def load_tile(basedirs, region, skip_truth=False): d, city, id = region.split('.') BASEDIR = [x for x in basedirs if city in x][0] fname = '{}/MUL-PanSharpen/MUL-PanSharpen_{}_img{}.tif'.format(BASEDIR, city, id) data1 = georasters.from_file(fname).raster data1 = data1.filled(0) data1 = numpy.transpose(data1, (2, 1, 0)) fname = '{}/PAN/PAN_{}_img{}.tif'.format(BASEDIR, city, id) data2 = georasters.from_file(fname).raster data2 = data2.filled(0) data2 = numpy.transpose(data2, (1, 0)) input_im = numpy.zeros((1300, 1300, 9), dtype='uint8') for i in xrange(8): input_im[:, :, i] = (data1[:, :, i] / 8).astype('uint8') input_im[:, :, 8] = (data2 / 8).astype('uint8') if not skip_truth: fname = '{}/{}.png'.format(TARGETS, region) if not os.path.isfile(fname): return None output_im = scipy.ndimage.imread(fname) if len(output_im.shape) == 3: output_im = 255 - output_im[:, :, 0:1] output_im = (output_im > 1).astype('uint8') * 255 else: output_im = numpy.expand_dims(output_im, axis=2) else: output_im = numpy.zeros((1300 / OUTPUT_SCALE, 1300 / OUTPUT_SCALE, OUTPUT_CHANNELS), dtype='uint8') return input_im, numpy.swapaxes(output_im, 0, 1)
def test_raster_diff(self): height = gr.from_file( os.path.expanduser( "~/Downloads/datasets/elevation/one_deg_height.tif")).raster stddev = gr.from_file( os.path.expanduser( "~/Downloads/datasets/elevation/one_deg_stddev.tif.tif") ).raster out = height - stddev print(np.ma.min(out), np.ma.max(out), np.ma.average(out), np.ma.sum(out)) plt.imshow(out) plt.show()
def test_get_dims(self): DATA = "~/Downloads/datasets/elevation/viewfinder_dem3/15-J.tif" # from http://www.viewfinderpanoramas.org/dem3.html DATA = os.path.expanduser(DATA) data = gr.from_file(DATA) print(data.geot) NDV, xsize, ysize, GeoT, Projection, DataType = gr.get_geo_info(DATA) print(NDV, xsize, ysize, GeoT, DataType)
def aggregate(input_rst, output_rst, scale): """ Downsample (upscale) a raster by a given factor and replace no_data value with 0. Args: input_rst: path to the input raster in a format supported by georaster output_rst: path to the scaled output raster in a format supported by georaster scale: The scale (integer) by which the raster in upsampeld. Returns: Save the output raster to disk. # https://github.com/pasquierjb/GIS_RS_utils/blob/master/aggregate_results.py """ import georasters as gr input_gr = gr.from_file(input_rst) # No data values are replaced with 0 to prevent summing them in each block. print( len( input_gr.raster.data.astype(np.float32) == np.float32( input_gr.nodata_value))) input_gr.raster.data[input_gr.raster.data.astype(np.float32) == np.float32( input_gr.nodata_value)] = 0 input_gr.nodata_value = 0 output_gr = input_gr.aggregate(block_size=(scale, scale)) output_gr.to_tiff(output_rst.replace(".tif", ""))
def test_main(): import georasters as gr raster = os.path.join(DATA, 'pre1500.tif') A = gr.from_file(raster) assert A.count() == 2277587 assert A.min() == 0 assert A.projection.ExportToProj4() == '+proj=longlat +datum=WGS84 +no_defs '
def extract_patches_from_raster(): count = 0 for raster_file in Path('./world_map').glob('**/*.tif'): data = gr.from_file(str(raster_file)) raster_blocks = view_as_blocks(data.raster, (225, 225)) for i in range(raster_blocks.shape[0]): for j in range(raster_blocks.shape[1]): raster_data = raster_blocks[i, j] src = cv2.pyrDown(raster_data, dstsize=(raster_data.shape[1] // 2, raster_data.shape[0] // 2)) data_out_downsampled = gr.GeoRaster( src, data.geot, nodata_value=data.nodata_value, projection=data.projection, datatype=data.datatype, ) data_out_downsampled.to_tiff( './data_downsampled_blurred/data_q' + str(count) + str(i) + str(j)) data_out = gr.GeoRaster( raster_data, data.geot, nodata_value=data.nodata_value, projection=data.projection, datatype=data.datatype, ) data_out.to_tiff('./data/data_q' + str(count) + str(i) + str(j)) count += 1
def test_main(): import georasters as gr raster = os.path.join(DATA, 'pre1500.tif') A = gr.from_file(raster) assert A.count() == 2277587 assert A.min() == 0 assert A.projection.ExportToProj4( ) == '+proj=longlat +datum=WGS84 +no_defs '
def test_br(self): DATA = "~/Downloads/datasets/elevation/viewfinder_dem3/15-J.tif" # from http://www.viewfinderpanoramas.org/dem3.html DATA = os.path.expanduser(DATA) data = gr.from_file(DATA) aggregated = br_wrapper(data, 1, 1) print(aggregated) plt.imshow(aggregated.raster) plt.show()
def get(self, date): try: data = gr.from_file("./dados/Daily/prec4kmclim_" + str(date)[0:2] + "_" + str(date)[2:4] + "_1998.tif") df = data.to_pandas().head(100) return jsonify(df.to_dict()) except: return jsonify({'info': 'data inválida para o merge daily'})
def get(self, date): try: data = gr.from_file("./dados/Yearly/prec4km_masked_02_01_" + str(date) + ".tif") df = data.to_pandas().head(100) return jsonify(df.to_dict()) except: return jsonify({'info': 'ano inválido para merge yearly'})
def test_extract(): import georasters as gr raster = os.path.join(DATA, 'pre1500.tif') data = gr.from_file(raster) (xmin,xsize,x,ymax,y,ysize)=data.geot (x,y)=(xmin+2507*xsize, ymax+1425*ysize) assert data.raster[gr.map_pixel(x,y,data.x_cell_size,data.y_cell_size,data.xmin,data.ymax)]==data.extract(x,y).max() assert data.raster[gr.map_pixel(x,y,data.x_cell_size,data.y_cell_size,data.xmin,data.ymax)]==data.map_pixel(x,y)
def rast_to_df(p): y = p.split("/")[-1].split(".")[0][-4:] rast = gr.from_file(p) # centroid of each cell df2 = rast.to_pandas() df2['x'] = df2.x + rast.x_cell_size/2 df2['y'] = df2.y + rast.y_cell_size/2 df2['year'] = int(y) return df2
def get(self, date): try: mes = str(date) if int(mes) < 10: mes = "0" + mes data = gr.from_file("./dados/Monthly/prec4kmclim_masked_02_" + mes + "_1998.tif") df = data.to_pandas().head(100) return jsonify(df.to_dict()) except: return jsonify({'info': 'mês inválido para o merge monthly'})
def test_aggregate(self): DATA = "~/Downloads/datasets/elevation/viewfinder_dem3/15-J.tif" # from http://www.viewfinderpanoramas.org/dem3.html DATA = os.path.expanduser(DATA) x_ranges = list(gen_ranges(0, 60, 1)) y_ranges = list(gen_ranges(45, 0, 1)) data = gr.from_file(DATA) aggregated = aggregate_grid(data, x_ranges, y_ranges) print(aggregated) plt.imshow(aggregated) plt.show()
def read_tiff2df(f,bound = (-180,180,-90,90)): """ 读tiff文件为dataframe, bound: 边界框 """ minx,maxx,miny,maxy = bound[0], bound[1], bound[2], bound[3] data = gr.from_file(f) df = data.to_pandas() if bound == (-180,180,-90,90): return df else: return df[(df['x']>=minx)&(df['x']<=maxx)&(df['y']>=miny)&(df['y']<=maxy)]
def test_extract(): import georasters as gr raster = os.path.join(DATA, 'pre1500.tif') data = gr.from_file(raster) (xmin, xsize, x, ymax, y, ysize) = data.geot (x, y) = (xmin + 2507 * xsize, ymax + 1425 * ysize) assert data.raster[gr.map_pixel(x, y, data.x_cell_size, data.y_cell_size, data.xmin, data.ymax)] == data.extract(x, y).max() assert data.raster[gr.map_pixel(x, y, data.x_cell_size, data.y_cell_size, data.xmin, data.ymax)] == data.map_pixel(x, y)
def fileToDataframe(file, columnName): if '.xyz' in file: df = pandas.DataFrame( pandas.read_csv(file, delim_whitespace=True, encoding="utf-8-sig", dtype=numpy.float64)) df = df.interpolate() else: df = gr.from_file(file).to_pandas() df = df[["x", "y", "value"]].copy() df.columns = ['Lat', 'Long', columnName] return df
def load_tile(basedirs, region, skip_truth=False): d, city, id = region.split('.') BASEDIR = [x for x in basedirs if city in x][0] fname = '{}/MUL-PanSharpen/MUL-PanSharpen_{}_img{}.tif'.format( BASEDIR, city, id) data1 = georasters.from_file(fname).raster data1 = data1.filled(0) data1 = numpy.transpose(data1, (2, 1, 0)) fname = '{}/PAN/PAN_{}_img{}.tif'.format(BASEDIR, city, id) data2 = georasters.from_file(fname).raster data2 = data2.filled(0) data2 = numpy.transpose(data2, (1, 0)) input_im = numpy.zeros((1300, 1300, 9), dtype='uint8') for i in xrange(8): input_im[:, :, i] = (data1[:, :, i] / 8).astype('uint8') input_im[:, :, 8] = (data2 / 8).astype('uint8') if not skip_truth: fname = '{}/{}.png'.format(TARGETS, region) if not os.path.isfile(fname): return None output_im = scipy.ndimage.imread(fname) if len(output_im.shape) == 3: output_im = 255 - output_im[:, :, 0:1] output_im = (output_im > 1).astype('uint8') * 255 else: output_im = numpy.expand_dims(output_im, axis=2) else: output_im = numpy.zeros( (1300 / OUTPUT_SCALE, 1300 / OUTPUT_SCALE, OUTPUT_CHANNELS), dtype='uint8') return input_im, numpy.swapaxes(output_im, 0, 1)
def main(): # DATA = "../data/relief_san_andres.tif" DATA = "~/Downloads/datasets/elevation/viewfinder_dem3/15-J.tif" # from http://www.viewfinderpanoramas.org/dem3.html DATA = os.path.expanduser(DATA) data = gr.from_file(DATA) (xmin, xsize, x, ymax, y, ysize) = data.geot print(data.geot) NDV, xsize, ysize, GeoT, Projection, DataType = gr.get_geo_info(DATA) print(NDV, xsize, ysize, GeoT, DataType) print(Projection) # ok, when looking again it looks like the max and min are the edges + half the difference. # I might just try indexing for regions that are a multiple of the dimensions into the raster. # find top coords of grid cell in map for lat/long dx = 1.0 # in degrees dy = 1.0 # in degrees nw_corner = (round_to_nearest(xmin, dx), round_to_nearest(ymax, dy)) print(nw_corner) se_corner = (round_to_nearest(nw_corner[0]+dx, dx), round_to_nearest(nw_corner[1]-dy, dy)) print(se_corner) # data.plot() # plt.show() # get the array indexes for the map print(type(data.raster)) print(data.raster.shape) print(data.raster) print(GeoT) x_indexes = int(dx / GeoT[1]) y_indexes = int(dy / -GeoT[5]) print(x_indexes, y_indexes) # determine the desired final raster size. # wait, i want to figure out how to divide this up to give each chunk its own list of data to take stats on # so I need to determine the next chunk. Or just iterate through the whole damn image and append the values to # a dict for that chunk # yeah let's do that, it's easy. # lol never mind just get the map pixels for the corners and iterate over them # col, row = gr.map_pixel(x,y,GeoT[1],GeoT[-1], GeoT[0],GeoT[3]) # col, row = gr.map_pixel() print(data.map_pixel_location(13, 13)) row, col = data.map_pixel_location(13,13) print(row, col)
def get_full_pop_raster(path='.'): url = "https://www.dropbox.com/s/l9iphmawfjzt4lf/brazil_pop.tif.tar.xz?dl=1" fn = os.path.join(path, 'brazil_pop.tif.tar.xz') wget.download(url=url, out=path) fn = os.path.join(path, 'brazil_pop.tif.tar.xz') with lzma.open('brazil_pop.tif.tar.xz') as f: with tarfile.open(fileobj=f) as tar: tar.extractall() # with open('brazil_pop.tif', 'wb') as brr: # brr.write(tar.extractall(path=path)) os.unlink('brazil_pop.tif.tar.xz') raster = gr.from_file('brazil_pop.tif.tif') os.unlink('brazil_pop.tif.tif') return raster
def add_elevation(df, file, indexes): try: table = gr.from_file(file) for index in indexes: try: row = df.loc[index] val = table.map_pixel(row['lon'], row['lat']) df.loc[index, 'elevation'] = float(val) except: df.loc[index, 'elevation'] = -9999 except: for index in indexes: df.loc[index, 'elevation'] = -9999 return df
def get(self): try: dataByYear = [] for i in range(21): dataByYear.append( gr.from_file("./dados/Yearly/prec4km_masked_02_01_" + str(1998 + i) + ".tif").to_pandas().head(100)) df = {} for i in range(21): soma = 0 for value in dataByYear[i]["value"]: soma = soma + float(value) df[str(1998 + i)] = str(soma / 100) return jsonify(df) except: return jsonify({'info': 'no data in data series'})
def test_load_chelsea(self): DATA = "~/Downloads/datasets/chelsea/CHELSA_prec_01_V1.2_land.tif" # from http://chelsa-climate.org/downloads/ data = gr.from_file( os.path.expanduser( "~/Downloads/datasets/chelsea/CHELSA_prec_01_V1.2_land.tif")) # aggregated = aggregate_grid(data, x_ranges, y_ranges) print(data.raster.shape) data.raster.fill one_deg = br_wrapper(data, 1, 1) """ What's happening is that the mask is being removed. I need to aggregate the mask to the same 1 degree grid, then decide how to use it. I can dither it, or I can use a hard cutoff. I might end up just using my own aggregation function for this because of how the y min and maxes work here. """ plt.imshow(one_deg.raster) plt.show()
def test_union(): import georasters as gr raster = os.path.join(DATA, 'pre1500.tif') data = gr.from_file(raster) (xmin, xsize, x, ymax, y, ysize) = data.geot data1 = gr.GeoRaster(data.raster[:data.shape[0] / 2, :], data.geot, nodata_value=data.nodata_value, projection=data.projection, datatype=data.datatype) data2 = gr.GeoRaster( data.raster[data.shape[0] / 2:, :], (xmin, xsize, x, ymax + ysize * data.shape[0] / 2, y, ysize), nodata_value=data.nodata_value, projection=data.projection, datatype=data.datatype) ''' import matplotlib.pyplot as plt plt.figure() data1.plot() plt.savefig(os.path.join(DATA,'data1.png')) plt.figure() data2.plot() plt.savefig(os.path.join(DATA,'data2.png')) from rasterstats import zonal_stats import geopandas as gp import pandas as pd # Import shapefiles pathshp = os.path.join(DATA, 'COL.shp') dfcol=gp.GeoDataFrame.from_file(pathshp) pathshp = os.path.join(DATA, 'TUR.shp') dftur=gp.GeoDataFrame.from_file(pathshp) # Joint geopandas df df=dfcol.append(dftur) df.reset_index(drop=True,inplace=True) stats = zonal_stats(df, raster, copy_properties=True, all_touched=True, raster_out=True, opt_georaster=True) dfcol=pd.merge(dfcol,pd.DataFrame(data=stats), ''' assert (data1.union(data2).raster == data.raster).sum() == data.count()
def get(self): try: dataByMonth = [] for mes in range(12): mes = str(mes + 1) if int(mes) < 10: mes = "0" + mes dataByMonth.append( gr.from_file("./dados/Monthly/prec4kmclim_masked_02_" + mes + "_1998.tif").to_pandas().head(100)) df = {} for i in range(12): soma = 0 for value in dataByMonth[i]["value"]: soma = soma + float(value) df[str(i + 1)] = str(soma / 100) return jsonify(df) except: return jsonify({'info': 'no data in data series'})
def main(): # ET = ((18 * MWT) - (10 * MCM))/(MWM - MCM + 8) # MWT = Mean temperature of warmest month of year # MCT = Mean Temperature of coldest month of year # load each chelsea mean temperature file # reduce to the min and max of these files mct = np.full((20880, 43200), 2000, dtype=np.int16) mwt = np.full((20880, 43200), -5000, dtype=np.int16) x_ranges = list(gen_ranges(-180, 180, 1)) y_ranges = list(gen_ranges(90, -90, 1)) temps = [] for file in tqdm( glob.glob( os.path.expanduser( "~/Downloads/datasets/chelsea/CHELSA_temp10_*.tif"))): temp01 = gr.from_file(os.path.expanduser(file)) # temps.append(temp01) # temp_ag = aggregate_grid(temp01, x_ranges, y_ranges) mwt = np.ma.maximum(mwt, temp01.raster) mct = np.ma.minimum(mct, temp01.raster) print("saving mwt and mct") np.save("./data/mean_warmest_temperature.npy", mwt.data) np.save("./data/mean_coldest_temperature.npy", mct.data) exit() # ET = ((18 * MWT) - (10 * MCM))/(MWM - MCM + 8) # do a bunch of jank to keep memory usage down temp01 = None lower = mwt - mct lower += 80 mwt *= 18 mct *= 10 mwt -= mct mct = None effective_temperature = mwt / lower np.save("./data/effective_temperature_large.npy", effective_temperature.data) np.save("./data/effective_temperature_large_mask.npy", effective_temperature.mask)
def main(): files = glob.glob(os.path.expanduser("~/Downloads/datasets/elevation/viewfinder_dem3/*.tif")) # files = files[:2] agg_first = True how = np.ma.std # how = np.ma.mean # fake a no data value ndv = -1000 print("loading files") rasters = [] for filename in tqdm(files): raster = gr.from_file(filename) if agg_first: raster.ndv = ndv # standard deviation goes to a double if how == np.ma.std: raster.datatype = "Float64" # print(raster.shape) if raster.shape[0] % 2 == 1: raster = clip_gr(raster) aggregated = br_wrapper(raster, 1, 1, how) rasters.append(aggregated) else: rasters.append(raster) # exit() print("starting merging") merged = gr.merge(rasters) print(merged.shape) print("starting aggregation") if not agg_first: one_deg = br_wrapper(merged, 1, 1, how) one_deg = clip_gr(one_deg) else: one_deg = merged print("Saving") one_deg.to_tiff(os.path.expanduser("~/Downloads/datasets/elevation/one_deg_stddev.tif"))
def process_pop_data(pop_file, country): """Takes a tif file for an individual country and converts it to a csv with the country name appended to every row""" start_time = time.time() pop_data = gr.from_file(pop_file) pop_df = pop_data.to_pandas() print("DataFrame created!") pop_df['value'] = pop_df['value'].apply(lambda x: round(x, 3)) print("Total population of", country, ":", '{:,}'.format(pop_df['value'].sum())) #Sanity Check pop_df['lat'] = pop_df['y'] pop_df['long'] = pop_df['x'] print("Lat/Long Created!") pop_df['country'] = country print("Converting to CSV..") pop_df.to_csv(country + '_pop_data.csv', index_label='key') del pop_df #clear memory end_time = time.time() time_taken = round((end_time - start_time) / 60, 1) print("Done!") print("Time taken:", time_taken, "minutes")
def aggregate(input_rst, output_rst, scale): """ Downsample (upscale) a raster by a given factor and replace no_data value with 0. Args: input_rst: path to the input raster in a format supported by georaster output_rst: path to the scaled output raster in a format supported by georaster scale: The scale (integer) by which the raster in upsampeld. Returns: Save the output raster to disk. """ import georasters as gr input_gr = gr.from_file(input_rst) # No data values are replaced with 0 to prevent summing them in each block. input_gr.raster.data[input_gr.raster.data == input_gr.nodata_value] = 0 input_gr.nodata_value = 0 output_gr = input_gr.aggregate(block_size=(scale, scale)) output_gr.to_tiff(output_rst.replace(".tif", ""))
def add_slope_aspect_curvature(df, file, indexes): for attr in ['slope_percentage', 'aspect', 'profile_curvature']: table = None try: table = rd.TerrainAttribute(rd.LoadGDAL(file, no_data=-9999), attrib=attr) rd.SaveGDAL("./temp.tif", table) table = None table = gr.from_file("./temp.tif") for index in indexes: try: row = df.loc[index] val = table.map_pixel(row['lon'], row['lat']) df.loc[index, attr] = float(val) except: df.loc[index, attr] = np.nan os.remove("./temp.tif") except: for index in indexes: df.loc[index, attr] = np.nan return df
def aggregate(self, scale): """ Downsample (upscale) a raster by a given factor and replace no_data value with 0. Args: scale: The scale (integer) by which the raster in upsampeld. Returns: Save the output raster to disk. # https://github.com/pasquierjb/GIS_RS_utils/blob/master/aggregate_results.py """ import georasters as gr input_gr = gr.from_file(self.path_to_raster) # No data values are replaced with 0 to prevent summing them in each block. input_gr.raster.data[input_gr.raster.data.astype(np.float32) == np.float32(input_gr.nodata_value)] = 0 input_gr.nodata_value = 0 output_gr = input_gr.aggregate(block_size=(scale, scale)) output_gr.to_tiff(self.path_agg_raster.replace(".tif", "")) return BaseLayer(self.path_agg_raster, self.lon, self.lat)
def test_union(): import georasters as gr raster = os.path.join(DATA, 'pre1500.tif') data = gr.from_file(raster) (xmin,xsize,x,ymax,y,ysize)=data.geot data1 = gr.GeoRaster(data.raster[:data.shape[0]/2,:], data.geot, nodata_value=data.nodata_value, projection=data.projection, datatype=data.datatype) data2 = gr.GeoRaster(data.raster[data.shape[0]/2:,:], (xmin,xsize,x,ymax+ysize*data.shape[0]/2,y,ysize), nodata_value=data.nodata_value, projection=data.projection, datatype=data.datatype) ''' import matplotlib.pyplot as plt plt.figure() data1.plot() plt.savefig(os.path.join(DATA,'data1.png')) plt.figure() data2.plot() plt.savefig(os.path.join(DATA,'data2.png')) from rasterstats import zonal_stats import geopandas as gp import pandas as pd # Import shapefiles pathshp = os.path.join(DATA, 'COL.shp') dfcol=gp.GeoDataFrame.from_file(pathshp) pathshp = os.path.join(DATA, 'TUR.shp') dftur=gp.GeoDataFrame.from_file(pathshp) # Joint geopandas df df=dfcol.append(dftur) df.reset_index(drop=True,inplace=True) stats = zonal_stats(df, raster, copy_properties=True, all_touched=True, raster_out=True, opt_georaster=True) dfcol=pd.merge(dfcol,pd.DataFrame(data=stats), ''' assert (data1.union(data2).raster==data.raster).sum()==data.count()
def test_stats4(): import georasters as gr raster = os.path.join(DATA, 'pre1500.tif') data = gr.from_file(raster) assert data.max() == data.raster.max()
def test_stats6(): import georasters as gr raster = os.path.join(DATA, 'pre1500.tif') data = gr.from_file(raster) assert data.median() == np.ma.median(data.raster)
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see <http://www.gnu.org/licenses/>. ''' from __future__ import division import georasters as gr import geopandas as gp import os # Path to data pathtestdata = gr.__path__[0]+'/../tests/data/' # Load raster data data = gr.from_file(pathtestdata+'pre1500.tif') # Load country geometries col = gp.read_file(pathtestdata+'COL.shp') df = data.clip(col, keep=True) print(df) # Select clipped raster colraster = df.GeoRaster[0] colraster.plot(cmap='Reds') # Compute Global autocorrelation stats colraster.pysal_G() colraster.pysal_Gamma() colraster.pysal_Geary() colraster.pysal_Join_Counts() colraster.pysal_Moran()
Merge all the geotiff files into one big image: $ mkdir nh_riks_WGS84_geotiff $ gdal_merge.py -o nh_riks_Sweref_99_TM_geotiff/out.tif nh_riks_Sweref_99_TM_geotiff/* Now warp the images from SWEREF99TM to WGS84 $ gdalwarp -t_srs "EPSG:4326" nh_riks_Sweref_99_TM_geotiffout.tif nh_riks_WGS84_geotiff/out.tif And lastly produce the elevation file from that data: `python build_imagedata.py` """ import georasters as gr import numpy as np DATA_IN = "nh_riks_WGS84_geotiff/out.tif" # DATA_IN = "nh_riks_WGS84_geotiff/nh_61_3.tif" OUTFILE = "elevation_data.npz" data = gr.from_file(DATA_IN).raster # Data from lantmateriet has 10 decimals, none of them are significant data = data.round(decimals=0) # MaskedArrays can't be save to disk, convert to ndarray data = data.filled(101) # We don't care about data larger than 255 meters, which will convert to white data = data.clip(0, 101) np.savez_compressed(OUTFILE, data)