Пример #1
0
        def get_flowlines_ascii(self, tmp_dir):
            cols = self.shp.columns.to_list()
            cols.remove('geometry')
            flow = self.shp.drop(cols, axis=1)
            flow['value'] = 1
            self.raster_path = tmp_dir + '/flow_raster.asc'

            dem_file = str(config.dem_velma)

            with rasterio.open(dem_file, 'r') as src:
                in_arr = src.read(1)
                in_arr[:] = 0
                meta = src.meta.copy()
                meta = src.meta
                with rasterio.open(self.raster_path, 'w+', **meta) as out:
                    shapes = (
                        (geom, value)
                        for geom, value in zip(flow.geometry, flow.value))
                    burned = features.rasterize(shapes=shapes,
                                                fill=np.nan,
                                                out=in_arr,
                                                transform=out.transform)
                    out.write_band(1, burned)

            self.raster_header = readHeader(self.raster_path)
            self.raster = np.loadtxt(self.raster_path, skiprows=6)
Пример #2
0
dem_file = str(config.dem_velma)

roi_raster = config.dem_velma.parents[1] / 'roi.asc'

# Take DEM and set all values to NaN, then burn species shp into empty DEM raster
with rasterio.open(dem_file, 'r') as src:
    in_arr = src.read(1)
    in_arr[:] = 0
    meta = src.meta.copy()
    meta = src.meta
    with rasterio.open(roi_raster, 'w+', **meta) as out:
        shapes = ((geom, value) for geom, value in zip(roi.geometry, roi.value))
        burned = features.rasterize(shapes=shapes, fill=np.nan, out=in_arr, transform=out.transform)
        out.write_band(1, burned)

roi_header = readHeader(roi_raster)
roi_asc = np.loadtxt(roi_raster, skiprows=6)


# =======================================================================
# Vectorize delineated DEM (area upstream of outpour point, as exported from JPDEM)
# =======================================================================

dem_path = config.dem_velma.parents[0] / 'delineated_dem.asc'
dem = np.loadtxt(dem_path, skiprows=6)

dem_simple = dem.astype('int16')
dem_simple[dem_simple > 1] = 1
dem_simple[dem_simple < 1] = 0
dem_simple = binary_fill_holes(dem_simple).astype(int)
Пример #3
0
# Replace bare, bpa, and nf that were in stands with conifer
ccap[(ccap == bare_id)] = conifer_id
ccap[(ccap == bpa_id)] = conifer_id
ccap[(ccap == nf_id)] = conifer_id

# Overlay NLCD deciduous forests on CCAP
ccap[(nlcd == nlcd_forest_decid)] = nlcd_forest_decid

# Replace CCAP shrub, herbaceous, dirt, and water with conifer
ccap[(ccap == ccap_herby)] = conifer_id
ccap[(ccap == ccap_shrub)] = conifer_id
ccap[(ccap == ccap_dirt)] = conifer_id
ccap[(ccap == ccap_water)] = conifer_id

header = readHeader(stands_path)
f = open(outfile, "w")
f.write(header)
np.savetxt(f, ccap, fmt="%i")
f.close()

# Create cover type map that is just conifer
conifer = (ccap * 0) + 1
outfile = config.cover_type_ccap_merge_velma.parents[0] / 'conifer.asc'
f = open(outfile, "w")
f.write(header)
np.savetxt(f, conifer, fmt='%i')
f.close()

# # Merge key files
# ccap_key = pd.read_csv(config.ccap_out.parents[0] / 'ccap_classes.csv')
Пример #4
0
                                     out=template,
                                     transform=src.transform,
                                     default_value=1)

    protected = ((stand_id == 0) + (murrelet == 1))
    harvest[protected.astype('bool')] = 0
    if np.sum(harvest) > 0:
        yearly_clearcuts.append(harvest)

# Export clearcut harvests
filter_dir = config.stand_id_velma.parents[0] / 'filter_maps'
try:
    filter_dir.mkdir(parents=True)
except FileExistsError:
    pass
header = readHeader(dem_file)
for i, harvest in enumerate(yearly_clearcuts):
    outfile = filter_dir / 'random_35yr_clearcut_10pct_{}.asc'.format(i + 1)
    f = open(outfile, "w")
    f.write(header)
    np.savetxt(f, harvest, fmt="%i")
    f.close()

# # To check sizes of each yearly harvest
# areas = []
# for cut in yearly_clearcuts:
#     area_sqm = np.sum(cut) * 100
#     area_sqkm = area_sqm * 1e-6
#     areas.append(area_sqkm)
# pd.DataFrame(areas, columns=['data']).describe()
Пример #5
0
# Marbled murrelet habitat is a protected area that can't be harvested
murrelet = np.loadtxt(config.data_path / 'landcover' / 'murrelet_no_harvest.asc', skiprows=6)
murrelet[murrelet == -9999] = np.nan

# =======================================================================
# Create (binary) disturbance filter maps for each forest management scenario

# ===================================
disturbance = 'industrial_clearcut'
# All stands can be cut except protected areas
filter_map = ((stand_id == 0) + (murrelet == 1))  # The excluded cells here are TRUE
filter_map = np.invert(filter_map) * 1  # TRUE cells are inverted to false, and then binarized
outfile = filter_dir / '{}.asc'.format(disturbance)
f = open(outfile, 'w')
header = readHeader(stand_id_path)
f.write(header)
np.savetxt(f, filter_map, fmt='%i')
f.close()

# ===================================
disturbance = 'active_all'
# All stands can be cut except protected areas
filter_map = ((stand_id == 0) + (murrelet == 1))
filter_map = np.invert(filter_map) * 1
outfile = filter_dir / '{}.asc'.format(disturbance)
f = open(outfile, 'w')
header = readHeader(stand_id_path)
f.write(header)
np.savetxt(f, filter_map, fmt='%i')
f.close()
Пример #6
0
filter_map_dir = config.velma_data / 'landcover' / 'filter_maps'
for file in os.listdir(str(filter_map_dir)):
    if file.endswith('.asc'):
        old_paths.append(filter_map_dir / file)

# Resample categorical data files to desired resolution
new_files = []
for file in old_paths:
    with rasterio.open(file, 'r') as ds:
        data = ds.read(out_shape=(ds.count, int(ds.height * upscale_factor),
                                  int(ds.width * upscale_factor)),
                       resampling=Resampling.nearest)
        new_files.append(data)

# Export resampled files
header = readHeader(old_paths[0])
#for file in new_files:
with rasterio.open(new_paths[0], 'w', 'AAIGrid') as dst:
    dst.write(new_files[0], 1)

# =======================================================================================
# Resample DEM files if necessary

orig_res_dem = 3
new_res_dem = 5
upscale_factor_dem = orig_res_dem / new_res_dem

old_dem_files = []

dem_dir = config.velma_data.parents[0] / 'ellsworth_3m_velma' / 'topography'
for file in os.listdir(str(dem_dir)):
Пример #7
0
nlcd_dev_openspace = 121
nlcd_dev_low = 122
nlcd_dev_med = 123
nlcd_dev_high = 124
nlcd_barren = 131
nlcd_forest_decid = 141
nlcd_forest_evergreen = 142
nlcd_forest_mixed = 143
nlcd_shrub = 152
nlcd_herby = 171
nlcd_woody_wet = 190
nlcd_emerg_herb_wet = 195

# Erode NLCD roads by 1 pixel - they look to be about 10-20m, not 30m
road_mask = (nlcd == nlcd_dev_openspace) + (nlcd == nlcd_dev_low)
roads = ndimage.binary_erosion(road_mask, iterations=1)

# Combine CCAP developed with NLCD roads
roads_merge = roads + (ccap == ccap_developed) + (ccap == ccap_dirt)

# Convert to % permeability
perm_fraction = 0.5  # % permeability of roads
perm = np.invert(roads_merge) * 1
perm = np.where(perm == 0, 0.5, perm)

header = readHeader(nlcd_path)
f = open(outfile, "w")
f.write(header)
np.savetxt(f, perm, fmt="%f")
f.close()
cover_age = np.loadtxt(config.cover_age_velma, skiprows=6)
age_count = cover_age - age_diff

# Subtract age difference between start and end period from current age raster.
# Age+1 each year, and cells are cut down when cells=0
prehansen_cuts = []
prehansen_years = []
for year in range(start, end):
    loss_map = (age_count == 0)
    prehansen_cuts.append(loss_map)
    prehansen_years.append(year)
    age_count += 1

# Export historical clearcut filter maps
header = readHeader(config.cover_age_velma)

for i, year in enumerate(range(start, end)):
    prehansen_loss = prehansen_cuts[prehansen_years.index(year)]
    hansen_loss = (hansen_yearly_loss == year % 100) * 1
    total_loss = hansen_loss + prehansen_loss
    if total_loss.sum() > 0:
        print(year)
        outfile = filter_dir / 'historical_clearcut_{}.asc'.format(year)
    f = open(outfile, "w")
    f.write(header)
    np.savetxt(f, total_loss, fmt="%i")
    f.close()