示例#1
0
if (os.path.isdir(path2fig) == False):
    os.mkdir(path2fig)
"""
#===============================================================================
PART A: LOAD IN DATA AND SUBSET THE TRAINING DATA
Load data
Filter areas where we have LiDAR estimates
Subsample if desired/required
#-------------------------------------------------------------------------------
"""
print('Loading data')

# load a template raster
lidar_agb_file = '../data/lidar_calibration/%sm/kiuic_lidar_agb_%s_median.tif' % (
    resolution.zfill(3), version_trials)
lidar = io.load_geotiff(lidar_agb_file, option=1)
target = lidar.values.copy()
target[target < 0] = np.nan

# Load predictors & target
data_layers, data_mask, labels = io.load_predictors(
    layers=['sentinel2', 'alos'], resolution=resolution)
#layers_to_remove = ['ASM','homogeneity','correlation']
#layers_to_remove = ['ASM','homogeneity','correlation','contrast','dissimilarity']
layers_to_remove = []
n_predictors = data_layers.shape[0]
layer_mask = np.ones(n_predictors, dtype='bool')
labels_update = []
for ii, lab in enumerate(labels):
    for layer in layers_to_remove:
        if layer in lab:
示例#2
0
# Import custom libaries
import sys
sys.path.append('./data_io/')
import data_io as io
"""
Project Info
"""
path2data = '/exports/csce/datastore/geos/groups/gcel/YucatanBiomass/data/'
"""
#===============================================================================
PART A: LOAD DATA
Load the raster data and inventory data
#-------------------------------------------------------------------------------
"""
raster_file = '%s/sentinel/processed/kiuic_b3_texture_mean.tif' % (path2data)
raster = io.load_geotiff(raster_file, option=1)
raster.values[raster.values == -9999] = np.nan

# inventory
inventory = fiona.open('%s/field_inventory/PUNTOS.shp' % path2data)

# Get the coordinate information for the raster datasets
X_raster = raster.coords['x'].values
Y_raster = raster.coords['y'].values
dX = X_raster[1] - X_raster[0]
dY = Y_raster[1] - Y_raster[0]
rad = np.sqrt(2. * max((dX / 2.)**2, (dY / 2.)**2))

# split inventory based on inside vs. outside lidar extent
raster_data = []
field_data = []
示例#3
0
    # Save random forest model for future use
    rf_dict = {}
    rf_dict['rf1']=rf1
    rf_dict['rf2']=rf2
    joblib.dump(rf_dict,'%s%s_%s_optimised_rfbc_sentinel_alos_lidar_%s.pkl' % (path2alg,site_id,version,str(ii+1).zfill(3)))
X=None

"""
#===============================================================================
PART C: MONTECARLO UPSCALING
Fit RF model for 100 AGB maps
Save RFs for future reference
#-------------------------------------------------------------------------------
"""
# We'll load in an existing dataset to get the georeferencing information
template = io.load_geotiff(agb_list[0],option=1)
rows,cols=template.shape
agb_stack = np.zeros((N_iter,rows,cols))
#pca_predictors = pca.transform(predictors)
#predictors=None
for ii, agb_file in enumerate(agb_list):
    print('Iteration %i of %i' % (ii+1,N_iter))
    rf_dict = joblib.load('%s%s_%s_optimised_rfbc_sentinel_alos_lidar_%s.pkl' % (path2alg,site_id,version,str(ii+1).zfill(3)))
    agb_mod = rff.rfbc_predict(rf_dict['rf1'],rf_dict['rf2'],predictors)

    #let's copy to a new xarray for AGBpot
    agb = io.copy_xarray_template(template)
    agb.values[forest_mask] = agb_mod.copy()
    agb.values[agb.values==-9999]=np.nan
    agb.values[agb.values<0]=0