Ejemplo n.º 1
0
def merge_data_products(ddict,
                        file_out,
                        merge_data_in=None,
                        left_on=None,
                        right_on=None,
                        suffixes=None,
                        mode='nearest'):
    data = raslib.pd_sample_raster_gdal(ddict, include_nans=False, mode=mode)

    # if 'dce' in ddict.keys():
    #     data.dce = np.rint(data.dce * 10)/10

    if merge_data_in is not None:
        # nest in list if not already
        if isinstance(merge_data_in, str):
            merge_data_in = [merge_data_in]
        if isinstance(right_on, str):
            right_on = [right_on]
        if isinstance(left_on, str):
            left_on = [left_on]

        for ii in range(len(merge_data_in)):
            # merge with hemisfer outputs
            if suffixes is not None:
                suffixes_itter = ("", suffixes[ii])
            else:
                suffixes_itter = ("_x", "_y")

            print('Merging with ' + suffixes_itter[1] + '... ', end='')
            sup_data = pd.read_csv(merge_data_in[ii])
            data = data.merge(sup_data,
                              how='left',
                              left_on=left_on[ii],
                              right_on=right_on[ii],
                              suffixes=suffixes_itter)
            print('done')

        # data = data.drop(columns='id')

    # save to file
    print('saving to file... ', end='')
    data.to_csv(file_out, index=False)
    print('done')

    return data
Ejemplo n.º 2
0
# define canopy binary
canopy = np.full([ras.rows, ras.cols], 0)
canopy[ras.data >= canopy_min_elev] = 1

# smoothe
kernel = np.full([kernel_dim, kernel_dim], 1)
convolved = convolve(canopy, kernel) / np.sum(kernel)

# points to search:
ids_in = 'C:\\Users\\Cob\\index\\educational\\usask\\research\\masters\\data\\lidar\\site_library\\hemi_grid_points\\mb_65_r.25m_snow_off_offset.25\\dem_r.25_point_ids.tif'
ddict = {'chm': ras_in,
         'lrs_id': ids_in,
         'uf': 'C:\\Users\\Cob\\index\\educational\\usask\\research\\masters\\data\\lidar\\site_library\\hemi_grid_points\\mb_65_r.25m_snow_off_offset0\\uf_plot_r.25m.tif',
         }
df = raslib.pd_sample_raster_gdal(ddict, include_nans=False, mode="nearest")

# filter to uf only... for now....
df = df.loc[df.uf == 1, :]

can_dist = np.full((len(df), angle_count), np.nan)

for ii in range(len(df)):
    coords = np.array([df.y_index.iloc[ii], df.x_index.iloc[ii]])
    for jj in range(angle_count):
        aa = jj * 2 * np.pi / angle_count
        displacement = np.array([np.cos(aa), np.sin(aa)])
        hit = False
        step_count = 0

        while ~hit & (step_count < max_step):
Ejemplo n.º 3
0
### export 1m subset of .25m grid
point_batch_dir = "C:\\Users\\Cob\\index\\educational\\usask\\research\\masters\\data\\lidar\\site_library\\hemi_grid_points\\mb_65_r.25m_snow_off_offset.25\\"

data_25_in = point_batch_dir + 'dem_r.25_points.csv'
data_25 = pd.read_csv(data_25_in)

# ddict = {
#     'uf_15': 'C:\\Users\\Cob\\index\\educational\\usask\\research\\masters\\data\\lidar\\synthetic_hemis\\hemi_grid_points\\mb_65_1m\\uf_plot_r1.00m.tif',
#     'hemi_id': 'C:\\Users\\Cob\\index\\educational\\usask\\research\\masters\\data\\lidar\\synthetic_hemis\\hemi_grid_points\\mb_65_1m\\1m_dem_point_ids.tif',
#     'id': 'C:\\Users\\Cob\\index\\educational\\usask\\research\\masters\\data\\lidar\\synthetic_hemis\\hemi_grid_points\\mb_65_r.25m\\dem_r.25_point_ids.tif'
# }
ddict = {
    'uf_15': point_batch_dir + 'uf_plot_r1.00m.tif',
    #'hemi_id': point_batch_dir + '1m_dem_point_ids.tif',
    'id': point_batch_dir + 'dem_r.25_point_ids.tif'
}

data_1 = raslib.pd_sample_raster_gdal(ddict,
                                      include_nans=False,
                                      mode='nearest')
lrs_id = data_1.id

merged = pd.merge(lrs_id, data_25, on="id", how="left")

merged.id = merged.id.astype(int)

file_out = point_batch_dir + "dem_r.25_point_ids_1m subset.csv"
merged.to_csv(file_out, index=False)

# lala = pd.read_csv('C:\\Users\\Cob\\index\\educational\\usask\\research\\masters\\data\\lidar\\ray_sampling\\batches\\lrs_uf_r.25_px181_snow_off\\outputs\\rshmetalog_footprint_products.csv')
    'swe_fcon_19_045':
    'C:\\Users\\Cob\\index\\educational\\usask\\research\\masters\\data\\lidar\\19_045\\19_045_las_proc\\OUTPUT_FILES\\SWE\\fcon\\interp_2x\\masked\\swe_fcon_19_045_r.05m_interp2x_masked.tif',
    'swe_fcon_19_050':
    'C:\\Users\\Cob\\index\\educational\\usask\\research\\masters\\data\\lidar\\19_050\\19_050_las_proc\\OUTPUT_FILES\\SWE\\fcon\\interp_2x\\masked\\swe_fcon_19_050_r.05m_interp2x_masked.tif',
    'swe_fcon_19_052':
    'C:\\Users\\Cob\\index\\educational\\usask\\research\\masters\\data\\lidar\\19_052\\19_052_las_proc\\OUTPUT_FILES\\SWE\\fcon\\interp_2x\\masked\\swe_fcon_19_052_r.05m_interp2x_masked.tif',
    # 'dswe_fnsd_19_045-19_050': 'C:\\Users\\Cob\\index\\educational\\usask\\research\\masters\\data\\lidar\\products\\mb_65\\dSWE\\fnsd\\interp_2x\\19_045-19_050\\masked\\dswe_fnsd_19_045-19_050_r.05m_interp2x_masked.tif',
    # 'dswe_fnsd_19_050-19_052': 'C:\\Users\\Cob\\index\\educational\\usask\\research\\masters\\data\\lidar\\products\\mb_65\\dSWE\\fnsd\\interp_2x\\19_050-19_052\\masked\\dswe_fnsd_19_050-19_052_r.05m_interp2x_masked.tif',
    'dswe_ucgo_19_045-19_050':
    'C:\\Users\\Cob\\index\\educational\\usask\\research\\masters\\data\\lidar\\products\\mb_65\\dSWE_bias_corrected\\ucgo\\interp_2x\\19_045-19_050\\masked\\dswe_ucgo_19_045-19_050_r.05m_interp2x_masked.tif',
    'dswe_ucgo_19_050-19_052':
    'C:\\Users\\Cob\\index\\educational\\usask\\research\\masters\\data\\lidar\\products\\mb_65\\dSWE_bias_corrected\\ucgo\\interp_2x\\19_050-19_052\\masked\\dswe_ucgo_19_050-19_052_r.05m_interp2x_masked.tif',

    # 'covariant': var_in
}
var = raslib.pd_sample_raster_gdal(ddict, include_nans=False, mode="median")

# var = var.loc[~np.isnan(var.covariant), :]  # drop nans in covariant

# if date == "045-050":
#     var.loc[:, "min_pc"] = np.nanmin((var.count_045, var.count_050, var.count_149), axis=0) * (.25 ** 2)
# elif date == "050-052":
#     var.loc[:, "min_pc"] = np.nanmin((var.count_050, var.count_052, var.count_149), axis=0) * (.25 ** 2)
# elif date == "19_045":
#     var.loc[:, "min_pc"] = np.nanmin((var.count_045, var.count_149), axis=0) * (.25 ** 2)
# elif date == "19_050":
#     var.loc[:, "min_pc"] = np.nanmin((var.count_050, var.count_149), axis=0) * (.25 ** 2)
# elif date == "19_052":
#     var.loc[:, "min_pc"] = np.nanmin((var.count_052, var.count_149), axis=0) * (.25 ** 2)
# elif date == "19_107":
#     var.loc[:, "min_pc"] = np.nanmin((var.count_107, var.count_149), axis=0) * (.25 ** 2)