# ============================================================ #
# Process cfg data
# ============================================================ #
start_time = pd.to_datetime(cfg['TIME_INDEX']['start_time'])
end_time = pd.to_datetime(cfg['TIME_INDEX']['end_time'])

start_year = start_time.year
end_year = end_time.year


# ============================================================ #
# Setup postprocess output directories
# ============================================================ #
basedir = setup_output_dirs(
    os.path.join(
        cfg['CONTROL']['root_dir'],
        cfg['OUTPUT']['output_basedir']),
    mkdirs=['test.truth_swe_orig_forcing'])['test.truth_swe_orig_forcing']
dirs = setup_output_dirs(basedir,
                         mkdirs=['global', 'states', 'history', 'logs'])


# ============================================================ #
# Load data
# ============================================================ #
# Construct time points for synthetic measurement (daily, at a certain hour)
# (1) Determine first and last measurement time point
if start_time.hour >= cfg['TIME_INDEX']['synthetic_meas_hour']:
    next_day = start_time + pd.DateOffset(days=1)
    meas_start_time = pd.datetime(next_day.year, next_day.month, next_day.day,
                                  cfg['TIME_INDEX']['synthetic_meas_hour'])
Пример #2
0
    random_state_file = os.path.join(
        cfg['CONTROL']['root_dir'],
        cfg['OUTPUT']['output_EnKF_basedir'],
        'restart_log',
        '{}.after_update.random_state.pickle'.format(
            restart_time.strftime("%Y%m%d-%H-%M-%S")))
    with open(random_state_file, 'rb') as f:
        random_state = pickle.load(f)
    np.random.set_state(random_state)


# ============================================================ #
# Prepare output directories
# ============================================================ #
dirs = setup_output_dirs(os.path.join(cfg['CONTROL']['root_dir'],
                                      cfg['OUTPUT']['output_EnKF_basedir']),
                         mkdirs=['global', 'history', 'states',
                                 'logs', 'plots', 'temp', 'restart_log'])


# ============================================================ #
# Prepare VIC exe and MPI exe
# ============================================================ #
vic_exe = VIC(os.path.join(cfg['CONTROL']['root_dir'], cfg['VIC']['vic_exe']))
mpi_exe = cfg['VIC']['mpi_exe']


# ============================================================ #
# Prepare and run EnKF
# ============================================================ #
print('Preparing for running EnKF...')
Пример #3
0
else:  # If restart, load in the saved random state
    restart_time = pd.to_datetime(restart)
    random_state_file = os.path.join(
        cfg['CONTROL']['root_dir'], cfg['OUTPUT']['output_EnKF_basedir'],
        'restart_log', '{}.after_update.random_state.pickle'.format(
            restart_time.strftime("%Y%m%d-%H-%M-%S")))
    with open(random_state_file, 'rb') as f:
        random_state = pickle.load(f)
    np.random.set_state(random_state)

# ============================================================ #
# Prepare output directories
# ============================================================ #
dirs = setup_output_dirs(os.path.join(cfg['CONTROL']['root_dir'],
                                      cfg['OUTPUT']['output_EnKF_basedir']),
                         mkdirs=[
                             'global', 'history', 'states', 'logs', 'plots',
                             'temp', 'restart_log'
                         ])

# ============================================================ #
# Prepare VIC exe and MPI exe
# ============================================================ #
vic_exe = VIC(os.path.join(cfg['CONTROL']['root_dir'], cfg['VIC']['vic_exe']))
mpi_exe = cfg['VIC']['mpi_exe']

# ============================================================ #
# Prepare and run EnKF
# ============================================================ #
print('Preparing for running EnKF...')

# --- Process linear model substitute, if specified --- #
Пример #4
0
# Process some input variables
# ============================================================ #
start_time = pd.to_datetime(cfg['SMART_RUN']['start_time'])
end_time = pd.to_datetime(cfg['SMART_RUN']['end_time'])
start_year = start_time.year
end_year = end_time.year
time_step = cfg['SMART_RUN']['time_step']  # [hour]
window_size = cfg['SMART_RUN']['window_size']  # number of timesteps


# ============================================================ #
# Set up output directory
# ============================================================ #
output_dir = setup_output_dirs(
                    os.path.join(cfg['CONTROL']['root_dir'],
                                 cfg['OUTPUT']['output_basedir']),
                    mkdirs=['plots.{}'.format(cfg['PLOT']['smart_output_from'])])\
             ['plots.{}'.format(cfg['PLOT']['smart_output_from'])]

output_subdir_maps = setup_output_dirs(
                            output_dir,
                            mkdirs=['maps'])['maps']
output_subdir_ts = setup_output_dirs(
                            output_dir,
                            mkdirs=['time_series'])['time_series']
output_subdir_data = setup_output_dirs(
                            output_dir,
                            mkdirs=['data'])['data']


# ============================================================ #
Пример #5
0
vic_exe = VIC(os.path.join(cfg['CONTROL']['root_dir'], cfg['VIC']['exe']))

# ============================================================ #
# Process cfg data
# ============================================================ #
start_time = pd.to_datetime(cfg['TIME_INDEX']['start_time'])
end_time = pd.to_datetime(cfg['TIME_INDEX']['end_time'])

start_year = start_time.year
end_year = end_time.year

# ============================================================ #
# Setup postprocess output directories
# ============================================================ #
basedir = setup_output_dirs(os.path.join(cfg['CONTROL']['root_dir'],
                                         cfg['OUTPUT']['output_basedir']),
                            mkdirs=['test.truth_sm2_orig_forcing'
                                    ])['test.truth_sm2_orig_forcing']
dirs = setup_output_dirs(basedir,
                         mkdirs=['global', 'states', 'history', 'logs'])

# ============================================================ #
# Load data
# ============================================================ #
# Construct time points for synthetic measurement (daily, at a certain hour)
# (1) Determine first and last measurement time point
if start_time.hour >= cfg['TIME_INDEX']['synthetic_meas_hour']:
    next_day = start_time + pd.DateOffset(days=1)
    meas_start_time = pd.datetime(next_day.year, next_day.month, next_day.day,
                                  cfg['TIME_INDEX']['synthetic_meas_hour'])
else:
    meas_start_time = pd.datetime(start_time.year, start_time.month,
Пример #6
0
                      propagate_linear_model)

# ============================================================ #
# Process command line arguments
# ============================================================ #
# Read config file
cfg = read_configobj(sys.argv[1])

# Number of processors for each VIC run
mpi_proc = int(sys.argv[2])

# ============================================================ #
# Prepare output directories
# ============================================================ #
dirs = setup_output_dirs(
    os.path.join(cfg['CONTROL']['root_dir'],
                 cfg['OUTPUT']['output_openloop_basedir']),
    mkdirs=['global', 'history', 'states', 'logs', 'plots'])

# ============================================================ #
# Prepare VIC exe and MPI exe
# ============================================================ #
vic_exe = VIC(os.path.join(cfg['CONTROL']['root_dir'], cfg['VIC']['vic_exe']))
mpi_exe = cfg['VIC']['mpi_exe']

# ============================================================ #
# Open-loop run
# ============================================================ #
# --- Determine open-loop run period --- #
vic_run_start_time = pd.to_datetime(cfg['OPENLOOP']['start_time'])
vic_run_end_time = pd.to_datetime(cfg['OPENLOOP']['end_time'])
Пример #7
0
if cfg['REMAP']['prec_source'] == 'post_SMART_spatial_downscale':
    prec_input_dir = os.path.join(
        cfg['CONTROL']['root_dir'],
        cfg['OUTPUT']['output_basedir'],
        'post_spatial_downscaled')
elif cfg['REMAP']['prec_source'] == 'post_SMART':
    prec_input_dir = os.path.join(
        cfg['CONTROL']['root_dir'],
        cfg['OUTPUT']['output_basedir'],
        'post_SMART')
else:
    raise ValueError('Unsupported option for prec_source!')

# Set up output dir    
out_remapped_dir = setup_output_dirs(
    os.path.join(cfg['CONTROL']['root_dir'],
                 cfg['OUTPUT']['output_basedir']),
    mkdirs=['post_final_remapped'])['post_final_remapped']


# ============================================================ #
# Load input precipitation fields, remap, and save
# ============================================================ #
print('Remapping...')
# --- Load domain files --- #
da_domain_target = xr.open_dataset(
    os.path.join(cfg['CONTROL']['root_dir'],
                 cfg['REMAP']['target_domain_nc']))['mask']
da_domain_source = xr.open_dataset(
    os.path.join(cfg['CONTROL']['root_dir'],
                 cfg['REMAP']['source_domain_nc']))['mask']
                            out_time_coord=range(nwindow))['prec_corr_window']
# Process SMART prec ensemble
if filter_flag == 2 or filter_flag == 6:
    dict_prec_SMART_window_ens = {}
    for i in range(cfg['SMART_RUN']['NUMEN']):
        dict_prec_SMART_window_ens[i+1] = run_SMART_prec_corr_ens[:, i, :]
    dict_da_prec_corr_window_ens = da_2D_to_3D_from_SMART(
            dict_array_2D=dict_prec_SMART_window_ens,
            da_mask=da_mask,
            out_time_varname='window',
            out_time_coord=range(nwindow))

# --- Save window-averaged SMART-corrected prec --- #
# Set up output subdir
out_dir = setup_output_dirs(os.path.join(cfg['CONTROL']['root_dir'],
                                         cfg['OUTPUT']['output_basedir']),
                            mkdirs=['post_SMART'])['post_SMART']
# Save mean window-averaged prec
ds_prec_corrected_window = xr.Dataset({'prec_corrected_window': da_prec_corr_window})
to_netcdf_forcing_file_compress(
    ds_force=ds_prec_corrected_window,
    out_nc=os.path.join(out_dir, 'prec_corrected_window.nc'),
    time_dim='window')


# ============================================================ #
# Rescale orig. prec at orig. timestep based on SMART outputs
# and save to netCDF file
# ============================================================ #
print('Rescaling original prec. and saving to netCDF...')
# --- Rescale SMART-corrected precip --- #
Пример #9
0
        dims=['time', 'lat', 'lon'])
    da_std_3D[:] = da_std
    # Convert 3D da to [npixel, ntime]
    sm_error = da_3D_to_2D_for_SMART({'sm_error': da_std_3D},
                                     da_mask,
                                     time_varname='time')['sm_error']

# Put in final dictionary
dict_array_active['sm_error'] = sm_error

# ============================================================ #
# Save datasets to .mat file
# ============================================================ #
# Set up output subdir for all outputs from this preparation script
out_dir = setup_output_dirs(os.path.join(cfg['CONTROL']['root_dir'],
                                         cfg['OUTPUT']['output_basedir']),
                            mkdirs=['prep_SMART'])['prep_SMART']

# Save datasets to .mat file
savemat(os.path.join(out_dir, 'SMART_input.mat'), dict_array_active)

# ============================================================ #
# Prepare Matlab running bash script
# ============================================================ #

# Set up SMART run output subdir
smart_run_outdir = setup_output_dirs(os.path.join(
    cfg['CONTROL']['root_dir'], cfg['OUTPUT']['output_basedir']),
                                     mkdirs=['run_SMART'])['run_SMART']

# Open file for writing
Пример #10
0
                            cfg['OUTPUT']['output_EnKF_basedir'], 'history',
                            'EnKF_ensemble_concat')
meas_nc = os.path.join(cfg['CONTROL']['root_dir'], cfg['EnKF']['meas_nc'])
meas_sm1_varname = cfg['EnKF']['meas_var_name']
R = cfg['EnKF']['R']  # Measurement error variance
N = cfg['EnKF']['N']  # Number of ensemble members

# Output
output_basedir = os.path.join(cfg['CONTROL']['root_dir'],
                              cfg['OUTPUT']['output_postprocess_basedir'],
                              'plots')

# ======================================================== #
# Setup output directory
# ======================================================== #
output_dir = setup_output_dirs(output_basedir, mkdirs=['maps'])['maps']

# ======================================================== #
# Load and process data
# ======================================================== #
print('Loading data...')
# --- Load data --- #
ds_openloop = xr.open_dataset(openloop_hist_nc)
ds_postprocess = xr.open_dataset(postprocess_hist_nc)
ds_truth = xr.open_dataset(truth_hist_nc)
ds_meas = xr.open_dataset(meas_nc)
# Load ensemble data
list_ds_ens = []
for i in range(N):
    list_ds_ens.append(
        xr.open_dataset(
Пример #11
0
cfg = read_configobj(sys.argv[1])


# ============================================================ #
# Parameter setting
# ============================================================ #
start_date = pd.to_datetime(cfg['TIME']['start_date'])
end_date = pd.to_datetime(cfg['TIME']['end_date'])

output_dir = cfg['OUTPUT']['output_dir']


# ============================================================ #
# Setup output subdirs
# ============================================================ #
output_subdir_plots = setup_output_dirs(output_dir, mkdirs=['plots'])['plots']
output_subdir_data_unscaled = setup_output_dirs(output_dir, mkdirs=['data_unscaled'])['data_unscaled']
output_subdir_data_scaled = setup_output_dirs(output_dir, mkdirs=['data_scaled'])['data_scaled']
output_subdir_tmp = setup_output_dirs(output_dir, mkdirs=['tmp'])['tmp']


# ============================================================ #
# Determine SMAP domain needed based on VIC domain
# ============================================================ #
print('Determing SMAP domain...')
# --- Load VIC domain --- #
ds_vic_domain = xr.open_dataset(cfg['DOMAIN']['vic_domain_nc'])
da_vic_domain = ds_vic_domain[cfg['DOMAIN']['mask_name']]
# --- Load one example SMAP file --- #
da_smap_example, da_flag_example = extract_smap_multiple_days(
    os.path.join(cfg['INPUT']['smap_dir'], 'SMAP_L3_SM_P_{}_*.h5'),
Пример #12
0
# =========================================================== #
# Set random generation seed
# =========================================================== #
np.random.seed(cfg['CONTROL']['seed'])

# =========================================================== #
# Process some config parameters
# =========================================================== #
print('Processing config parameters...')
# Simulation time
start_time = pd.to_datetime(cfg['TIME_INDEX']['start_time'])
end_time = pd.to_datetime(cfg['TIME_INDEX']['end_time'])

# Set up output sub-directories
dirs = setup_output_dirs(os.path.join(cfg['CONTROL']['root_dir'],
                                      cfg['OUTPUT']['output_basedir']),
                         mkdirs=['truth', 'synthetic_meas', 'plots'])
truth_subdirs = setup_output_dirs(
    dirs['truth'], mkdirs=['global', 'history', 'states', 'logs'])

# Construct time points for synthetic measurement (daily, at a certain hour)
# (1) Determine first and last measurement time point
if start_time.hour >= cfg['TIME_INDEX']['synthetic_meas_hour']:
    next_day = start_time + pd.DateOffset(days=1)
    meas_start_time = pd.datetime(next_day.year, next_day.month, next_day.day,
                                  cfg['TIME_INDEX']['synthetic_meas_hour'])
else:
    meas_start_time = pd.datetime(start_time.year, start_time.month,
                                  start_time.day,
                                  cfg['TIME_INDEX']['synthetic_meas_hour'])
if end_time.hour <= cfg['TIME_INDEX']['synthetic_meas_hour']:
# =========================================================== #
# Set random generation seed
# =========================================================== #
np.random.seed(cfg['CONTROL']['seed'])

# =========================================================== #
# Process some config parameters
# =========================================================== #
print('Processing config parameters...')
# Simulation time
start_time = pd.to_datetime(cfg['TIME_INDEX']['start_time'])
end_time = pd.to_datetime(cfg['TIME_INDEX']['end_time'])

# Set up output sub-directories
dirs = setup_output_dirs(os.path.join(cfg['CONTROL']['root_dir'],
                                      cfg['OUTPUT']['output_basedir']),
                         mkdirs=['truth', 'synthetic_meas', 'plots'])
truth_subdirs = setup_output_dirs(dirs['truth'],
                                  mkdirs=['global', 'history', 'states',
                                          'logs'])

# Construct time points for synthetic measurement (daily, at a certain hour)
# (1) Determine first and last measurement time point
if start_time.hour >= cfg['TIME_INDEX']['synthetic_meas_hour']:
    next_day = start_time + pd.DateOffset(days=1)
    meas_start_time = pd.datetime(next_day.year, next_day.month, next_day.day,
                                  cfg['TIME_INDEX']['synthetic_meas_hour'])
else:
    meas_start_time = pd.datetime(start_time.year, start_time.month, start_time.day,
                                  cfg['TIME_INDEX']['synthetic_meas_hour'])
if end_time.hour <= cfg['TIME_INDEX']['synthetic_meas_hour']:
    # Convert 3D da to [npixel, ntime]
    sm_error = da_3D_to_2D_for_SMART(
        {'sm_error': da_std_3D},
        da_mask,
        time_varname='time')['sm_error']
    
# Put in final dictionary
dict_array_active['sm_error'] = sm_error


# ============================================================ #
# Save datasets to .mat file
# ============================================================ #
# Set up output subdir for all outputs from this preparation script
out_dir = setup_output_dirs(os.path.join(cfg['CONTROL']['root_dir'],
                                         cfg['OUTPUT']['output_basedir']),
                            mkdirs=['prep_SMART'])['prep_SMART']

# Save datasets to .mat file
savemat(os.path.join(out_dir, 'SMART_input.mat'), dict_array_active)


# ============================================================ #
# Prepare Matlab running bash script
# ============================================================ #

# Set up SMART run output subdir
smart_run_outdir = setup_output_dirs(
                        os.path.join(cfg['CONTROL']['root_dir'],
                                     cfg['OUTPUT']['output_basedir']),
                        mkdirs=['run_SMART'])['run_SMART']
Пример #15
0
# ============================================================ #
# Process command line arguments
# ============================================================ #
# Read config file
cfg = read_configobj(sys.argv[1])

# Number of processors for each VIC run
mpi_proc = int(sys.argv[2])


# ============================================================ #
# Prepare output directories
# ============================================================ #
dirs = setup_output_dirs(os.path.join(cfg['CONTROL']['root_dir'],
                                      cfg['OUTPUT']['output_openloop_basedir']),
                         mkdirs=['global', 'history', 'states',
                                 'logs', 'plots'])


# ============================================================ #
# Prepare VIC exe and MPI exe
# ============================================================ #
vic_exe = VIC(os.path.join(cfg['CONTROL']['root_dir'], cfg['VIC']['vic_exe']))
mpi_exe = cfg['VIC']['mpi_exe']


# ============================================================ #
# Open-loop run
# ============================================================ #
# --- Determine open-loop run period --- #
vic_run_start_time = pd.to_datetime(cfg['OPENLOOP']['start_time'])
Пример #16
0
# =========================================================== #
np.random.seed(cfg['CONTROL']['seed'])

# =========================================================== #
# Process some config parameters
# =========================================================== #
print('Processing config parameters...')
# Simulation time
start_time = pd.to_datetime(cfg['TIME_INDEX']['start_time'])
end_time = pd.to_datetime(cfg['TIME_INDEX']['end_time'])
start_year = start_time.year
end_year = end_time.year

# Identify output sub-directories
dirs = setup_output_dirs(os.path.join(cfg['CONTROL']['root_dir'],
                                      cfg['OUTPUT']['output_basedir']),
                         mkdirs=['truth', 'synthetic_meas', 'plots'])
truth_subdirs = setup_output_dirs(
    dirs['truth'], mkdirs=['global', 'history', 'states', 'logs'])
# VIC global template file
global_template = os.path.join(cfg['CONTROL']['root_dir'],
                               cfg['VIC']['vic_global_template'])

# =========================================================== #
# Setup output directory for rescaled truth and measurements
# =========================================================== #
truth_rescaled_dir = setup_output_dirs(
    os.path.join(cfg['CONTROL']['root_dir'], cfg['OUTPUT']['output_basedir']),
    ['truth_rescaled_v1_direct2ndMoment'])['truth_rescaled_v1_direct2ndMoment']
truth_rescaled_subdirs = setup_output_dirs(
    truth_rescaled_dir, mkdirs=['global', 'history', 'states', 'logs'])
Пример #17
0
# Process some input variables
# ============================================================ #
start_time = pd.to_datetime(cfg['SMART_RUN']['start_time'])
end_time = pd.to_datetime(cfg['SMART_RUN']['end_time'])
start_year = start_time.year
end_year = end_time.year


# ============================================================ #
# Identify and set up subdirs
# ============================================================ #
out_post_dir = os.path.join(cfg['CONTROL']['root_dir'],
                            cfg['OUTPUT']['output_basedir'],
                            'post_SMART')
out_post_regridded_dir = setup_output_dirs(
    os.path.join(cfg['CONTROL']['root_dir'],
                 cfg['OUTPUT']['output_basedir']),
    mkdirs=['post_spatial_downscaled'])['post_spatial_downscaled']


# ============================================================ #
# Load original and corrected (postprocessed, original timestep) prec data
# ============================================================ #
print('Loading data...')

# --- Load original prec (original resolution to regrid to) --- #
da_prec_orig = load_nc_and_concat_var_years(
                    basepath=os.path.join(cfg['CONTROL']['root_dir'],
                                          cfg['SPATIAL_DOWNSCALE']['prec_orig_resolution_basepath']),
                    start_year=start_year,
                    end_year=end_year,
                    dict_vars={'prec_orig': cfg['SPATIAL_DOWNSCALE']['prec_orig_varname']})\
Пример #18
0
if cfg['RUN_VIC']['smart_output_from'] == 'post':
    smart_outdir = os.path.join(
        cfg['CONTROL']['root_dir'], cfg['OUTPUT']['output_basedir'], 'post_SMART')
elif cfg['RUN_VIC']['smart_output_from'] == 'spatial_downscale':
    smart_outdir = os.path.join(
        cfg['CONTROL']['root_dir'], cfg['OUTPUT']['output_basedir'], 'post_spatial_downscaled')
elif cfg['RUN_VIC']['smart_output_from'] == 'remap':
    smart_outdir = os.path.join(
        cfg['CONTROL']['root_dir'], cfg['OUTPUT']['output_basedir'], 'post_final_remapped')

if 'run_vic_subdir' in cfg['RUN_VIC']:
    run_vic_subdir = cfg['RUN_VIC']['run_vic_subdir']
else:
    run_vic_subdir = 'run_vic'
output_basedir = setup_output_dirs(
                         smart_outdir,
                         mkdirs=[run_vic_subdir])[run_vic_subdir]

dirs = setup_output_dirs(output_basedir,
                         mkdirs=['global', 'history', 'forcings',
                                 'logs', 'plots'])


# ============================================================ #
# Generate forcings for vic run - combine SMART-corrected
# precip with other met variables
# ============================================================ #
# ----------------------------------------------------------------- #
print('Replacing precip data...')
# Set flag for whether to delete the forcing file after running
forcing_delete = 0
Пример #19
0
# ============================================================ #
# Read config file
cfg = read_configobj(sys.argv[1])

# ============================================================ #
# Parameter setting
# ============================================================ #
start_date = pd.to_datetime(cfg['TIME']['start_date'])
end_date = pd.to_datetime(cfg['TIME']['end_date'])

output_dir = cfg['OUTPUT']['output_dir']

# ============================================================ #
# Setup output subdirs
# ============================================================ #
output_subdir_data = setup_output_dirs(output_dir, mkdirs=['data'])['data']
output_subdir_plots = setup_output_dirs(output_dir, mkdirs=['plots'])['plots']
output_subdir_tmp = setup_output_dirs(output_dir, mkdirs=['tmp'])['tmp']

# ============================================================ #
# Load and process SMAP data
# ============================================================ #
print('Loading and processing SMAP data...')
# --- Load data --- #
print('Extracting SMAP data')
# If SMAP data is already processed before, directly load
if cfg['INPUT']['smap_exist'] is True:
    # --- Load processed SMAP data --- #
    da_smap = xr.open_dataset(
        cfg['INPUT']['smap_unscaled_nc'])['soil_moisture']
    # --- Extract AM and PM time points --- #
Пример #20
0
np.random.seed(cfg['CONTROL']['seed'])


# =========================================================== #
# Process some config parameters
# =========================================================== #
print('Processing config parameters...')
# Simulation time
start_time = pd.to_datetime(cfg['TIME_INDEX']['start_time'])
end_time = pd.to_datetime(cfg['TIME_INDEX']['end_time'])
start_year = start_time.year
end_year = end_time.year

# Identify output sub-directories
dirs = setup_output_dirs(os.path.join(cfg['CONTROL']['root_dir'],
                                      cfg['OUTPUT']['output_basedir']),
                         mkdirs=['truth', 'synthetic_meas', 'plots'])
truth_subdirs = setup_output_dirs(dirs['truth'],
                                  mkdirs=['global', 'history', 'states',
                                          'logs'])
# VIC global template file
global_template = os.path.join(cfg['CONTROL']['root_dir'],
                               cfg['VIC']['vic_global_template'])


# =========================================================== #
# Setup output directory for rescaled truth and measurements
# =========================================================== #
truth_rescaled_dir = setup_output_dirs(
    os.path.join(cfg['CONTROL']['root_dir'],
                 cfg['OUTPUT']['output_basedir']),
Пример #21
0
            cfg['CONTROL']['root_dir'],
            cfg['EnKF']['meas_nc'])
meas_sm1_varname = cfg['EnKF']['meas_var_name']
R = cfg['EnKF']['R']  # Measurement error variance
N = cfg['EnKF']['N']  # Number of ensemble members

# Output
output_basedir = os.path.join(
            cfg['CONTROL']['root_dir'],
            cfg['OUTPUT']['output_postprocess_basedir'],
            'plots')

# ======================================================== #
# Setup output directory
# ======================================================== #
output_dir = setup_output_dirs(output_basedir,
                                mkdirs=['maps'])['maps']

# ======================================================== #
# Load and process data
# ======================================================== #
print('Loading data...')
# --- Load data --- #
ds_openloop = xr.open_dataset(openloop_hist_nc)
ds_postprocess = xr.open_dataset(postprocess_hist_nc)
ds_truth = xr.open_dataset(truth_hist_nc)
ds_meas = xr.open_dataset(meas_nc)
# Load ensemble data
list_ds_ens = []
for i in range(N):
    list_ds_ens.append(xr.open_dataset(os.path.join(
                ens_hist_dir,
Пример #22
0
    out_time_coord=range(nwindow))['prec_corr_window']
# Process SMART prec ensemble
if filter_flag == 2 or filter_flag == 6:
    dict_prec_SMART_window_ens = {}
    for i in range(cfg['SMART_RUN']['NUMEN']):
        dict_prec_SMART_window_ens[i + 1] = list_run_SMART_prec_corr_ens[i]
    dict_da_prec_corr_window_ens = da_2D_to_3D_from_SMART(
        dict_array_2D=dict_prec_SMART_window_ens,
        da_mask=da_mask,
        out_time_varname='window',
        out_time_coord=range(nwindow))

# --- Save window-averaged SMART-corrected prec --- #
# Set up output subdir
out_dir = setup_output_dirs(os.path.join(cfg['CONTROL']['root_dir'],
                                         cfg['OUTPUT']['output_basedir']),
                            mkdirs=['post_SMART'])['post_SMART']
# Save mean window-averaged prec
ds_prec_corrected_window = xr.Dataset(
    {'prec_corrected_window': da_prec_corr_window})
to_netcdf_forcing_file_compress(ds_force=ds_prec_corrected_window,
                                out_nc=os.path.join(
                                    out_dir, 'prec_corrected_window.nc'),
                                time_dim='window')

# ============================================================ #
# Save perturbed prec ensemble
# ============================================================ #
print('Saving perturbed precip ensemble...')
# --- Precess perturbed prec data to da --- #
if filter_flag == 2 or filter_flag == 6:
# ============================================================ #
# Process cfg data
# ============================================================ #
start_time = pd.to_datetime(cfg['EnKF']['start_time'])
end_time = pd.to_datetime(cfg['EnKF']['end_time'])

start_year = start_time.year
end_year = end_time.year


# ============================================================ #
# Setup postprocess output directories
# ============================================================ #
dirs = setup_output_dirs(os.path.join(
                            cfg['CONTROL']['root_dir'],
                            cfg['POSTPROCESS']['output_postprocess_basedir']),
                         mkdirs=['global', 'history', 'forcings',
                                 'logs', 'plots'])


# ============================================================ #
# Load data
# ============================================================ #
# --- Load measurement data --- #
ds_meas_orig = xr.open_dataset(os.path.join(cfg['CONTROL']['root_dir'],
                                            cfg['EnKF']['meas_nc']))
da_meas_orig = ds_meas_orig[cfg['EnKF']['meas_var_name']] # Only select out the period within the EnKF run period
da_meas = da_meas_orig.sel(time=slice(start_time, end_time))
# Convert da_meas dimension to [time, lat, lon, m] (currently m = 1)
time = da_meas['time']
lat = da_meas['lat']

# ============================================================ #
# Process some input variables
# ============================================================ #
start_date = pd.datetime.strptime(cfg['SMART_RUN']['start_date'], "%Y-%m-%d")
end_date = pd.datetime.strptime(cfg['SMART_RUN']['end_date'], "%Y-%m-%d")
start_year = start_date.year
end_year = end_date.year


# ============================================================ #
# Set up output directory
# ============================================================ #
output_dir = setup_output_dirs(
                    os.path.join(cfg['CONTROL']['root_dir'],
                                 cfg['OUTPUT']['output_basedir']),
                    mkdirs=['plots'])['plots']

output_subdir_maps = setup_output_dirs(
                            output_dir,
                            mkdirs=['maps'])['maps']
output_subdir_ts = setup_output_dirs(
                            output_dir,
                            mkdirs=['time_series'])['time_series']


# ============================================================ #
# Load data
# ============================================================ #
print('Loading data...')
Пример #25
0
# ============================================================ #
# Process cfg data
# ============================================================ #
N = cfg['EnKF']['N']  # number of ensemble members
start_time = pd.to_datetime(cfg['EnKF']['start_time'])
end_time = pd.to_datetime(cfg['EnKF']['end_time'])

start_year = start_time.year
end_year = end_time.year

# ============================================================ #
# Setup postprocess output directories
# ============================================================ #
dirs = setup_output_dirs(
    os.path.join(cfg['CONTROL']['root_dir'],
                 cfg['POSTPROCESS']['output_postprocess_basedir']),
    mkdirs=['global', 'history', 'forcings', 'logs', 'plots'])

# ============================================================ #
# Load data
# ============================================================ #
# --- Load measurement data --- #
ds_meas_orig = xr.open_dataset(
    os.path.join(cfg['CONTROL']['root_dir'], cfg['EnKF']['meas_nc']))
da_meas_orig = ds_meas_orig[cfg['EnKF']['meas_var_name']]
# Only select out the period within the EnKF run period
da_meas = da_meas_orig.sel(time=slice(start_time, end_time))
# Convert da_meas dimension to [time, lat, lon, m] (currently m = 1)
time = da_meas['time']
lat = da_meas['lat']
lon = da_meas['lon']
Пример #26
0
# Set random generation seed
# =========================================================== #
np.random.seed(cfg['CONTROL']['seed'])


# =========================================================== #
# Process some config parameters
# =========================================================== #
print('Processing config parameters...')
# Simulation time
start_time = pd.to_datetime(cfg['TIME_INDEX']['start_time'])
end_time = pd.to_datetime(cfg['TIME_INDEX']['end_time'])

# Set up output sub-directories
dirs = setup_output_dirs(os.path.join(cfg['CONTROL']['root_dir'],
                                      cfg['OUTPUT']['output_basedir']),
                         mkdirs=['truth', 'synthetic_meas', 'plots'])
truth_subdirs = setup_output_dirs(dirs['truth'],
                                  mkdirs=['global', 'history', 'states',
                                          'logs'])
# Construct time points for synthetic measurement (daily, at a certain hour)
# (1) Determine first and last measurement time point
if start_time.hour >= cfg['TIME_INDEX']['synthetic_meas_hour']:
    next_day = start_time + pd.DateOffset(days=1)
    meas_start_time = pd.datetime(next_day.year, next_day.month, next_day.day,
                                  cfg['TIME_INDEX']['synthetic_meas_hour'])
else:
    meas_start_time = pd.datetime(start_time.year, start_time.month, start_time.day,
                                  cfg['TIME_INDEX']['synthetic_meas_hour'])
if end_time.hour <= cfg['TIME_INDEX']['synthetic_meas_hour']:
    last_day = end_time - pd.DateOffset(days=1)