예제 #1
0
def _run(args):
    """Top level driver"""
    print('running now...')

    if args.create_batch:
        # ------------------------------------------------------------ #
        # Create batch files and exit
        batch(args.config_file, args.create_batch, args.batch_dir)
        # ------------------------------------------------------------ #
    else:
        # ------------------------------------------------------------ #
        # Read Configuration files
        config_dict = read_config(args.config_file,
                                  default_config=default_config)
        options = config_dict.pop('OPTIONS')
        global_atts = config_dict.pop('GLOBAL_ATTRIBUTES')
        if not options['regular_grid']:
            domain_dict = config_dict.pop('DOMAIN')
        else:
            domain_dict = None

        # set aside fields dict
        fields = config_dict

        vic2nc(options, global_atts, domain_dict, fields)
        # ------------------------------------------------------------ #
    return
예제 #2
0
파일: netcdf2vic.py 프로젝트: tbohn/tonic
def _run(args):

    config = read_config(args.config)
    files = config["options"]["files"]
    var_keys = config["options"]["var_keys"]
    output = config["options"]["output"]
    binary_mult = config["options"]["binary_mult"]
    binary_type = (config["options"]["binary_type"],)
    paths = config["options"]["paths"]
    out_prefix = config["options"]["out_prefix"]
    verbose = config["options"]["verbose"]

    mask = read_netcdf(paths["mask_path"], nc_vars=["mask"])["mask"]
    yi, xi = np.nonzero(mask)
    print("found {0} points in mask file.".format(len(yi)))

    xlist = []
    ylist = []
    pointlist = []
    append = False

    for i, fname in enumerate(files):
        d = read_netcdf(os.path.join(paths["in_path"], fname), verbose=verbose)

        if i == 0:

            # find point locations
            xs = d["xc"]
            ys = d["yc"]
            posinds = np.nonzero(xs > 180)
            xs[posinds] -= 360
            print("adjusted xs lon minimum")

            for y, x in pyzip(yi, xi):
                active_flag = False
                for key in var_keys:
                    if (d[key][:, y, x].all() is np.ma.masked) or (mask[y, x] == 0):
                        active_flag = True
                if not active_flag:
                    point = (ys[y, x], xs[y, x])
                    xlist.append(x)
                    ylist.append(y)
                    pointlist.append(point)

        else:
            append = True

        for y, x, point in pyzip(ylist, xlist, pointlist):

            data = np.empty((d[var_keys[0]].shape[0], len(var_keys)))

            for j, key in enumerate(var_keys):
                data[:, j] = d[key][:, y, x]

            if output["Binary"]:
                write_binary(data * binary_mult, point, binary_type, out_prefix, paths["BinaryoutPath"], append)
            if output["ASCII"]:
                write_ascii(data, point, out_prefix, paths["ASCIIoutPath"], append)
    return
예제 #3
0
def batch(config_file, create_batch, batch_dir):
    """Create a set of batch configuration files"""

    # Read Configuration files
    config_dict = read_config(config_file)
    options = config_dict.pop('OPTIONS')
    global_atts = config_dict.pop('GLOBAL_ATTRIBUTES')
    domain_dict = config_dict.pop('DOMAIN', None)
    fields = config_dict

    config = SafeConfigParser()
    config.optionxform = str

    # Figure out what to call the new files
    nameprefix = os.path.splitext(os.path.split(config_file)[1])[0]

    if create_batch == 'variables':
        # batch by variables
        # options section
        config.add_section('OPTIONS')
        for option, value in options.items():
            if type(value) == list:
                try:
                    value = ", ".join(value)
                except TypeError:
                    value = ", ".join(repr(e) for e in value)
            elif type(value) != str:
                value = str(value)
            config.set('OPTIONS', option, str(value))

        # global_atts section
        config.add_section('GLOBAL_ATTRIBUTES')
        for option, value in global_atts.items():
            if type(value) == list:
                try:
                    value = ", ".join(value)
                except TypeError:
                    value = ", ".join(repr(e) for e in value)
            elif type(value) != str:
                value = str(value)
            config.set('GLOBAL_ATTRIBUTES', option, str(value))

        # domain dict section
        if domain_dict:
            config.add_section('DOMAIN')
            for option, value in domain_dict.items():
                if type(value) == list:
                    try:
                        value = ", ".join(value)
                    except TypeError:
                        value = ", ".join(repr(e) for e in value)
                elif type(value) != str:
                    value = str(value)

                config.set('DOMAIN', option, value.strip("'"))

        for var, field in fields.items():
            suffix = "_{0}.cfg".format(var)
            new_cfg_file = os.path.join(batch_dir, nameprefix + suffix)

            # this var
            config.add_section(var)
            for option, value in field.items():
                if type(value) == list:
                    try:
                        value = ", ".join(value)
                    except TypeError:
                        value = ", ".join(repr(e) for e in value)
                elif type(value) != str:
                    value = str(value)
                config.set(var, option, str(value))

            # write that config
            with open(new_cfg_file, 'wb') as cf:
                config.write(cf)

            # clear the var section
            config.remove_section(var)

    else:
        # start with existing config
        config.read(config_file)

        # by time
        start_date = datetime.strptime(options['start_date'], TIMESTAMPFORM)
        end_date = datetime.strptime(options['end_date'], TIMESTAMPFORM)

        t0 = start_date

        if create_batch == 'years':
            td = relativedelta.relativedelta(years=1)
            t1 = datetime(t0.year, 12, 31, end_date.hour)
        elif create_batch == 'months':
            td = relativedelta.relativedelta(months=1)
        elif create_batch == 'days':
            # days option is only valid for gregorian calendar
            td = relativedelta.relativedelta(days=1)

        hour = relativedelta.relativedelta(hours=-1)

        i = 0
        while t0 < end_date:
            i += 1
            t1 = t0 + td
            if t1 > end_date:
                t1 = end_date
            else:
                t1 += hour

            suffix = '_{0}'.format(i)
            new_cfg_file = os.path.join(batch_dir, nameprefix + suffix)

            # Write config replacing start and end dates
            config.set('OPTIONS', 'start_date', t0.strftime(TIMESTAMPFORM))
            config.set('OPTIONS', 'end_date', t1.strftime(TIMESTAMPFORM))

            with open(new_cfg_file, 'wb') as cf:
                config.write(cf)

            t0 += td
    return
예제 #4
0
파일: run_tests.py 프로젝트: argansos/VIC
def run_examples(config_file, vic_exe, test_data_dir, out_dir, driver):
    '''Run examples tests from config file

    Parameters
    ----------
    config_file : str
        Configuration file for example tests.
    vic_exe : VIC (object)
        VIC executable object (see tonic documentation).
    test_data_dir : str
        Path to test data sets.
    out_dir : str
        Path to output location
    driver : {'classic', 'image'}
        Driver to run tests on.

    Returns
    -------
    test_results : dict
        Test results for all tests in config_file.

    See Also
    --------
    run_unit_tests
    run_system
    run_science
    run_release
    '''

    # Print test set welcome
    print('\n-'.ljust(OUTPUT_WIDTH + 1, '-'))
    print('Running Examples')
    print('-'.ljust(OUTPUT_WIDTH, '-'))

    # Get setup
    config = read_config(config_file)

    # drop invalid driver tests
    config = drop_tests(config, driver)

    test_results = OrderedDict()

    # Run individual examples
    for i, (testname, test_dict) in enumerate(config.items()):

        # print out status info
        print('Running test {0}/{1}: {2}'.format(i + 1, len(config.items()),
                                                 testname))

        # Setup directories for test
        dirs = setup_test_dirs(testname, out_dir,
                               mkdirs=['results', 'state', 'logs', 'plots'])

        # read template global parameter file
        infile = os.path.join(test_dir, 'examples',
                              test_dict['global_parameter_file'])

        with open(infile, 'r') as global_file:
            global_param = global_file.read()

        # create template string
        s = string.Template(global_param)

        # fill in global parameter options
        global_param = s.safe_substitute(test_data_dir=test_data_dir,
                                         result_dir=dirs['results'],
                                         state_dir=dirs['state'],
                                         testname=testname,
                                         test_root=test_dir)

        test_global_file = os.path.join(dirs['test'],
                                        '{0}_globalparam.txt'.format(testname))

        # write global parameter file
        with open(test_global_file, 'w') as f:
            f.write(global_param)

        # Get optional kwargs for run executable
        run_kwargs = pop_run_kwargs(test_dict)

        # run VIC
        test_complete = False
        test_passed = False
        test_comment = ''
        error_message = ''

        try:
            # Run the VIC simulation
            returncode = vic_exe.run(test_global_file, logdir=dirs['logs'],
                                     **run_kwargs)
            test_complete = True

            # Check return code
            check_returncode(vic_exe)

            # check output files (different tests depending on driver)
            if test_dict['check']:
                fnames = glob.glob(os.path.join(dirs['results'], '*'))

                # Check that the simulation completed for all grid cells
                if 'complete' in test_dict['check'] and driver == 'classic':
                    test_classic_driver_all_complete(fnames)

                # check for nans in all example files
                if 'output_file_nans' in test_dict['check']:
                    if driver == 'classic':
                        test_classic_driver_no_output_file_nans(fnames)
                    elif driver == 'image':
                        domain_file = os.path.join(test_data_dir,
                                                   test_dict['domain_file'])
                        test_image_driver_no_output_file_nans(fnames,
                                                              domain_file)
                    else:
                        raise ValueError('unknown driver')

            # if we got this far, the test passed.
            test_passed = True

        # Handle errors
        except Exception as e:
            test_comment, error_message = process_error(e, vic_exe)

        # record the test results
        test_results[testname] = TestResults(testname,
                                             test_complete=test_complete,
                                             passed=test_passed,
                                             comment=test_comment,
                                             error_message=error_message,
                                             returncode=returncode)

    # Print examples footer
    print('-'.ljust(OUTPUT_WIDTH, '-'))
    print('Finished testing examples.')
    print('-'.ljust(OUTPUT_WIDTH, '-'))

    return test_results
예제 #5
0
def nc_to_vic(config_file):
    ''' This function converts netCDF files to VIC ascii format files.
        (This function is adapted from tonic)
        
        Parameters
        ----------
        config_file: <str>
            Path of config file for nc_to_vic
        
        Returns
        ----------
        
        Requires
        ----------
        write_binary
    '''
    
    import numpy as np
    import struct
    import os
    from tonic.io import read_netcdf, read_config
    from tonic.pycompat import pyzip
    
    config = read_config(config_file)
    files = config['options']['files']  # should contain "{}", which will be replaced by YYYY
    var_keys = config['options']['var_keys']
    output_format = config['options']['output_format']  # Binary or ASCII
    out_prefix = config['options']['out_prefix']
    verbose = config['options']['verbose']
    coord_keys = config['options']['coord_keys']  # varname of lon and lat in netCDF files
    lon_name = coord_keys[0]
    lat_name = coord_keys[1]
    start_year = config['options']['start_year']
    end_year = config['options']['end_year']
    latlon_precision = config['options']['latlon_precision']
    
    paths = config['paths']
    mask_varname = paths['mask_varname']

    mask = read_netcdf(paths['mask_path'], variables=['mask'])[0][mask_varname]
    yi, xi = np.nonzero(mask)
    print('found {0} points in mask file.'.format(len(yi)))

    xlist = []
    ylist = []
    pointlist = []
    append = False

    for i, year in enumerate(range(start_year, end_year+1)):
        print('Year {}'.format(year))
        fname = files.format(year)
        d = read_netcdf(os.path.join(paths['in_path'], fname),
                        verbose=verbose)[0]

        if i == 0:

            # find point locations
            xs = d[lon_name]
            ys = d[lat_name]
            posinds = np.nonzero(xs > 180)
            xs[posinds] -= 360
            print('adjusted xs lon minimum')

            for y, x in pyzip(yi, xi):
                active_flag = False
                for key in var_keys:
                    if (d[key][:, y, x].all() is np.ma.masked) \
                            or (mask[y, x] == 0):
                        active_flag = True
                if not active_flag:
                    point = (ys[y], xs[x])
                    xlist.append(x)
                    ylist.append(y)
                    pointlist.append(point)

        else:
            append = True

        for y, x, point in pyzip(ylist, xlist, pointlist):

            data = np.empty((d[var_keys[0]].shape[0], len(var_keys)))

            for j, key in enumerate(var_keys):
                data[:, j] = d[key][:, y, x]

            if output_format == 'Binary':
                write_binary(data * binary_mult, point, binary_type,
                             out_prefix, paths['BinaryoutPath'], append)
            if output_format == 'ASCII':
                write_ascii(data, point, out_prefix, paths['ASCIIoutPath'],
                            latlon_precision, append)
    return
예제 #6
0
def main(cfg_file, nproc=1):
    ''' Main function

    Parameters
    ----------
    cfg_file: <str>
        Input config file
    nproc: <int>
        Number of processors to use
    '''
    
    # ====================================================== #
    # Load in config file
    # ====================================================== #
    cfg = read_configobj(cfg_file)
   
 
    # ====================================================== #
    # Process some cfg variables
    # ====================================================== #
    start_date = pd.to_datetime(cfg['FORCING']['start_date'])
    end_date = pd.to_datetime(cfg['FORCING']['end_date'])
    
    start_year = start_date.year
    end_year = end_date.year

    ens_list = range(cfg['FORCING']['ens_start'],
                     cfg['FORCING']['ens_end'] + 1)
    
    
    # ====================================================== #
    # Set up output directories
    # ====================================================== #
    dirs = setup_output_dirs(cfg['OUTPUT']['out_basedir'],
                             mkdirs=['forc_orig_nc', 'forc_orig_asc',
                                     'forc_disagg_asc', 'forc_disagg_nc',
                                     'config_files', 'logs_vic'])
    # Subdirs for config files for ensemble
    subdirs_config = setup_output_dirs(
                            dirs['config_files'],
                            mkdirs=['netcdf2vic', 'vic4', 'vic2nc'])
    
    # ====================================================== #
    # Load in domain file
    # ====================================================== #
    ds_domain = xr.open_dataset(cfg['DOMAIN']['domain_nc'])
    da_domain = ds_domain[cfg['DOMAIN']['mask_name']]
    
    lat_min = da_domain['lat'].min().values
    lat_max = da_domain['lat'].max().values
    lon_min = da_domain['lon'].min().values
    lon_max = da_domain['lon'].max().values
    
    
    # ====================================================== #
    # Load in and process Newman ensemble forcings (for prec, Tmax and Tmin)
    # and orig. Maurer forcing (for wind speed)
    # ====================================================== #
    
    # --- Load Maurer forcings --- #
    print('Processing Maurer forcings...')
    
    # Loop over each year
    list_da_wind = []
    for year in range(start_year, end_year+1):
        print('Year {}'.format(year))
        # --- Load in netCDF file for this year --- #
        da_wind = xr.open_dataset(os.path.join(
                        cfg['FORCING']['maurer_dir'],
                        'nldas_met_update.obs.daily.wind.{}.nc'.format(year)))['wind']
        # --- Mask out the target area --- #
        da_wind = da_wind.sel(latitude=slice(lat_min, lat_max),
                              longitude=slice(lon_min, lon_max))
        da_wind = da_wind.where(da_domain.values)
        # --- Rename lat and lon --- #
        da_wind = da_wind.rename({'latitude': 'lat', 'longitude': 'lon'})
        # --- Put in list --- #
        list_da_wind.append(da_wind)
    
    # Concat all years together
    da_wind_allyears = xr.concat(list_da_wind, dim='time')
   
    # --- Load Newman forcings --- #
    print('Processing Newman forcings...')

    # If 1 processor, do a regular process
    if nproc == 1:
        # Loop over each ensemble member
        for ens in ens_list:
            load_and_process_Newman(ens, cfg, da_domain, lat_min, lat_max,
                                    lon_min, lon_max, start_date, end_date,
                                    dirs, da_wind_allyears)
    # If multiple processors, use mp
    elif nproc > 1:
        # Set up multiprocessing
        pool = mp.Pool(processes=nproc)
        # Loop over each ensemble member
        for ens in ens_list:
            pool.apply_async(load_and_process_Newman,
                             (ens, cfg, da_domain, lat_min, lat_max, lon_min,
                              lon_max, start_date, end_date, dirs,
                              da_wind_allyears,))
        # Finish multiprocessing
        pool.close()
        pool.join()
    
    # ====================================================== #
    # Convert orig. forcings to ascii format
    # ====================================================== #
    
    print('Converting orig. netCDF forcings to VIC ascii...')

    # --- Setup subdirs for asc VIC orig. forcings for each ensemble member
    # --- #
    list_ens = []
    for ens in ens_list:
        list_ens.append('ens_{}'.format(ens))
    subdirs_output = setup_output_dirs(
                        dirs['forc_orig_asc'],
                        mkdirs=list_ens)
    
    # --- Prepare netcdf2vic config file --- #
    dict_cfg_file = {}
    for ens in ens_list:
        cfg_file = os.path.join(subdirs_config['netcdf2vic'],
                                'ens_{}.cfg'.format(ens))
        dict_cfg_file[ens] = cfg_file

        with open(cfg_file, 'w') as f:
            f.write('[options]\n')
            f.write('files: forc_orig.{}.nc\n')
            f.write('verbose: True\n')
            f.write('output_format: ASCII\n')
            f.write('out_prefix: forc_orig_\n')
            f.write('coord_keys: lon,lat\n')
            f.write('var_keys: pr,tasmax,tasmin,wind\n')
            f.write('start_year: {}\n'.format(start_year))
            f.write('end_year: {}\n'.format(end_year))
            f.write('latlon_precision: {}\n'.format(
                            cfg['OUTPUT']['latlon_precision']))

            f.write('\n[paths]\n')
            f.write('in_path: {}\n'.format(os.path.join(
                                        dirs['forc_orig_nc'],
                                        'ens_{}'.format(ens))))
            f.write('mask_path: {}\n'.format(cfg['DOMAIN']['domain_nc']))
            f.write('mask_varname: {}\n'.format(cfg['DOMAIN']['mask_name']))
            f.write('ASCIIoutPath: {}\n'.format(
                        subdirs_output['ens_{}'.format(ens)]))
        
    # --- Run nc_to_vic --- #
    # If 1 processor, do a regular process
    if nproc == 1:
        for ens in ens_list:
            nc_to_vic(dict_cfg_file[ens])
    # If multiple processors, use mp
    elif nproc > 1:
        # Set up multiprocessing
        pool = mp.Pool(processes=nproc)
        # Loop over each ensemble member
        for ens in ens_list:
            pool.apply_async(nc_to_vic, (dict_cfg_file[ens],))
        # Finish multiprocessing
        pool.close()
        pool.join()
    
    # ====================================================== #
    # Run VIC forcing disaggregator
    # ====================================================== #
    
    print('Running VIC as a disaggregator...')
    
    # --- Setup subdirs for asc VIC disagg. forcings and VIC log files for
    # each ensemble member --- #
    list_ens = []
    for ens in ens_list:
        list_ens.append('ens_{}'.format(ens))
    subdirs_output = setup_output_dirs(
                        dirs['forc_disagg_asc'],
                        mkdirs=list_ens)
    subdirs_logs = setup_output_dirs(
                        dirs['logs_vic'],
                        mkdirs=list_ens)
 
    # --- Prepare VIC global file for the disaggregation run --- #
    # Load in global file template
    with open(cfg['VIC_DISAGG']['global_template'], 'r') as f:
         global_param = f.read()
    # Create string template
    s = string.Template(global_param)
    # Loop over each ensemble member
    dict_global_file = {}
    for ens in ens_list:
        # Fill in variables in the template
        global_param = s.safe_substitute(
                            time_step=cfg['VIC_DISAGG']['time_step'],
                            startyear=start_year,
                            startmonth=start_date.month,
                            startday=start_date.day,
                            endyear=end_year,
                            endmonth=end_date.month,
                            endday=end_date.day,
                            forcing1=os.path.join(dirs['forc_orig_asc'],
                                                  'ens_{}'.format(ens),
                                                  'forc_orig_'),
                            grid_decimal=cfg['OUTPUT']['latlon_precision'],
                            prec='PREC',
                            tmax='TMAX',
                            tmin='TMIN',
                            wind='WIND',
                            forceyear=start_year,
                            forcemonth=start_date.month,
                            forceday=start_date.day,
                            result_dir=subdirs_output['ens_{}'.format(ens)])
        # Write global param file
        global_file = os.path.join(subdirs_config['vic4'],
                                   'vic.global.ens_{}.txt'.format(ens))
        dict_global_file[ens] = global_file
        with open(global_file, mode='w') as f:
            for line in global_param:
                f.write(line)
            
    # --- Run VIC --- #
    # Prepare VIC exe
    vic_exe = VIC(cfg['VIC_DISAGG']['vic4_exe'])

    # If 1 processor, do a regular process
    if nproc == 1:
        for ens in ens_list:
            vic_exe.run(dict_global_file[ens],
                        logdir=subdirs_logs['ens_{}'.format(ens)])
    # If multiple processors, use mp
    elif nproc > 1:
        # Set up multiprocessing
        pool = mp.Pool(processes=nproc)
        # Loop over each ensemble member
        for ens in ens_list:
            pool.apply_async(run_vic_for_multiprocess,
                             (vic_exe, dict_global_file[ens],
                              subdirs_logs['ens_{}'.format(ens)],))
        # Finish multiprocessing
        pool.close()
        pool.join()
    
    # ====================================================== #
    # Convert disaggregated forcings to netCDF format
    # ====================================================== #
    
    # --- Prepare config file for vic2nc --- #
    print('Converting disaggregated forcings to netCDF...')
    
    # --- Setup subdirs for VIC disagg. netCDF forcings for each ensemble
    # member --- #
    list_ens = []
    for ens in ens_list:
        list_ens.append('ens_{}'.format(ens))
    subdirs_output = setup_output_dirs(
                        dirs['forc_disagg_nc'],
                        mkdirs=list_ens)

    # --- Prepare netcdf2vic config file --- #
    # Extract disaggregated forcing variable names and order
    with open(cfg['VIC_DISAGG']['global_template'], 'r') as f:
         global_param = f.read()
    outvar_list = find_outvar_global_param(global_param)
    for i, var in enumerate(outvar_list):
        outvar_list[i] = var.strip('OUT_')
   
    # Extract end date and hour 
    end_date_with_hour = end_date + pd.DateOffset(days=1) -\
                         pd.DateOffset(hours=cfg['VIC_DISAGG']['time_step'])

    # Loop over each ensemble member 
    dict_cfg_file = {}
    for ens in ens_list:
        cfg_file = os.path.join(subdirs_config['vic2nc'],
                                'ens_{}.cfg'.format(ens))
        dict_cfg_file[ens] = cfg_file
        
        with open(cfg_file, 'w') as f:
            f.write('[OPTIONS]\n')
            f.write('input_files: {}\n'.format(
                        os.path.join(dirs['forc_disagg_asc'],
                                     'ens_{}'.format(ens),
                                     'force_*')))
            f.write('input_file_format: ascii\n')
            f.write('bin_dt_sec: {}\n'.format(cfg['VIC_DISAGG']['time_step']*3600))
            f.write('bin_start_date: {}\n'.format(start_date.strftime("%Y-%m-%d-%H")))
            f.write('bin_end_date: {}\n'.format(end_date_with_hour.strftime("%Y-%m-%d-%H")))
            f.write('regular_grid: False\n')
            f.write('out_directory: {}\n'.format(subdirs_output['ens_{}'.format(ens)]))
            f.write('memory_mode: big_memory\n')
            f.write('chunksize: 100\n')
            f.write('out_file_prefix: force\n')
            f.write('out_file_format: NETCDF4\n')
            f.write('precision: single\n')
            f.write('start_date: {}\n'.format(start_date.strftime("%Y-%m-%d-%H")))
            f.write('end_date: {}\n'.format(end_date_with_hour.strftime("%Y-%m-%d-%H")))
            f.write('calendar: proleptic_gregorian\n')
            f.write('time_segment: year\n')
            f.write('snow_bands: False\n')
            f.write('veg_tiles: False\n')
            f.write('soil_layers: False\n')
            
            f.write('\n[DOMAIN]\n')
            f.write('filename: {}\n'.format(cfg['DOMAIN']['domain_nc']))
            f.write('longitude_var: {}\n'.format(cfg['DOMAIN']['lon_name']))
            f.write('latitude_var: {}\n'.format(cfg['DOMAIN']['lat_name']))
            f.write('y_x_dims: {}, {}\n'.format(cfg['DOMAIN']['lat_name'],
                                                cfg['DOMAIN']['lon_name']))
            f.write('copy_vars: {}, {}, {}\n'.format(cfg['DOMAIN']['mask_name'],
                                                     cfg['DOMAIN']['lat_name'],
                                                     cfg['DOMAIN']['lon_name']))
            
            f.write('\n[GLOBAL_ATTRIBUTES]\n')
            f.write('title: VIC forcings\n')
            f.write('version: VIC4.2\n')
            f.write('grid: 1/8\n')
            
            for i, var in enumerate(outvar_list):
                if var == 'AIR_TEMP':
                    f.write('\n[AIR_TEMP]\n')
                    f.write('column: {}\n'.format(i))
                    f.write('units: C\n')
                    f.write('standard_name: air_temperature\n')
                    f.write('description: air temperature\n')
                elif var == 'PREC':
                    f.write('\n[PREC]\n')
                    f.write('column: {}\n'.format(i))
                    f.write('units: mm/step\n')
                    f.write('standard_name: precipitation\n')
                    f.write('description: precipitation\n')
                elif var == 'PRESSURE':
                    f.write('\n[PRESSURE]\n')
                    f.write('column: {}\n'.format(i))
                    f.write('units: kPa\n')
                    f.write('standard_name: surface_air_pressure\n')
                    f.write('description: near-surface atmospheric pressure\n')
                elif var == 'SHORTWAVE':
                    f.write('\n[SHORTWAVE]\n')
                    f.write('column: {}\n'.format(i))
                    f.write('units: W m-2\n')
                    f.write('standard_name: incoming_shortwave_radiation\n')
                    f.write('description: incoming shortwave radiation\n')
                elif var == 'LONGWAVE':
                    f.write('\n[LONGWAVE]\n')
                    f.write('column: {}\n'.format(i))
                    f.write('units: W m-2\n')
                    f.write('standard_name: incoming_longwave_radiation\n')
                    f.write('description: incoming longwave radiation\n')
                elif var == 'VP':
                    f.write('\n[VP]\n')
                    f.write('column: {}\n'.format(i))
                    f.write('units: kPa\n')
                    f.write('standard_name: water_vapor_pressure\n')
                    f.write('description: near surface vapor pressure\n')
                elif var == 'WIND':
                    f.write('\n[WIND]\n')
                    f.write('column: {}\n'.format(i))
                    f.write('units: m/s\n')
                    f.write('standard_name: surface_air_pressure\n')
                    f.write('description: near-surface wind speed\n')
   
    # --- Run vic2nc --- #
    # If 1 processor, do a regular process
    if nproc == 1:
        for ens in ens_list:
            cfg_vic2nc = read_config(dict_cfg_file[ens])
            options = cfg_vic2nc.pop('OPTIONS')
            global_atts = cfg_vic2nc.pop('GLOBAL_ATTRIBUTES')
            if not options['regular_grid']:
                domain_dict = cfg_vic2nc.pop('DOMAIN')
            else:
                domain_dict = None
            # Set aside fields dict
            fields = cfg_vic2nc
            # Run vic2nc 
            vic2nc(options, global_atts, domain_dict, fields)

    # If multiple processors, use mp
    elif nproc > 1:
        # Set up multiprocessing
        pool = mp.Pool(processes=nproc)
        # Loop over each ensemble member
        for ens in ens_list:
            cfg_vic2nc = read_config(dict_cfg_file[ens])
            options = cfg_vic2nc.pop('OPTIONS')
            global_atts = cfg_vic2nc.pop('GLOBAL_ATTRIBUTES')
            if not options['regular_grid']:
                domain_dict = cfg_vic2nc.pop('DOMAIN')
            else:
                domain_dict = None
            # set aside fields dict
            fields = cfg_vic2nc
            pool.apply_async(vic2nc,
                             (options, global_atts, domain_dict, fields,))
        # Finish multiprocessing
        pool.close()
        pool.join()
예제 #7
0
def _run(args):

    config = read_config(args.config)
    files = config['options']['files']
    var_keys = config['options']['var_keys']
    output = config['options']['output']
    binary_mult = config['options']['binary_mult']
    binary_type = config['options']['binary_type'],
    paths = config['options']['paths']
    out_prefix = config['options']['out_prefix']
    verbose = config['options']['verbose']

    mask = read_netcdf(paths['mask_path'], nc_vars=['mask'])['mask']
    yi, xi = np.nonzero(mask)
    print('found {0} points in mask file.'.format(len(yi)))

    xlist = []
    ylist = []
    pointlist = []
    append = False

    for i, fname in enumerate(files):
        d = read_netcdf(os.path.join(paths['in_path'], fname), verbose=verbose)

        if i == 0:

            # find point locations
            xs = d['xc']
            ys = d['yc']
            posinds = np.nonzero(xs > 180)
            xs[posinds] -= 360
            print('adjusted xs lon minimum')

            for y, x in pyzip(yi, xi):
                active_flag = False
                for key in var_keys:
                    if (d[key][:, y, x].all() is np.ma.masked) \
                            or (mask[y, x] == 0):
                        active_flag = True
                if not active_flag:
                    point = (ys[y, x], xs[y, x])
                    xlist.append(x)
                    ylist.append(y)
                    pointlist.append(point)

        else:
            append = True

        for y, x, point in pyzip(ylist, xlist, pointlist):

            data = np.empty((d[var_keys[0]].shape[0], len(var_keys)))

            for j, key in enumerate(var_keys):
                data[:, j] = d[key][:, y, x]

            if output['Binary']:
                write_binary(data * binary_mult, point, binary_type,
                             out_prefix, paths['BinaryoutPath'], append)
            if output['ASCII']:
                write_ascii(data, point, out_prefix, paths['ASCIIoutPath'],
                            append)
    return
예제 #8
0
파일: run_tests.py 프로젝트: hhelgason/VIC
def run_examples(config_file, vic_exe, test_data_dir, out_dir, driver):
    '''Run examples tests from config file

    Parameters
    ----------
    config_file : str
        Configuration file for example tests.
    vic_exe : VIC (object)
        VIC executable object (see tonic documentation).
    test_data_dir : str
        Path to test data sets.
    out_dir : str
        Path to output location
    driver : {'classic', 'image'}
        Driver to run tests on.

    Returns
    -------
    test_results : dict
        Test results for all tests in config_file.

    See Also
    --------
    run_unit_tests
    run_system
    run_science
    run_release
    '''

    # Print test set welcome
    print('\n-'.ljust(OUTPUT_WIDTH + 1, '-'))
    print('Running Examples')
    print('-'.ljust(OUTPUT_WIDTH, '-'))

    # Get setup
    config = read_config(config_file)

    # drop invalid driver tests
    config = drop_tests(config, driver)

    test_results = OrderedDict()

    # Run individual examples
    for i, (testname, test_dict) in enumerate(config.items()):

        # print out status info
        print('Running test {0}/{1}: {2}'.format(i + 1, len(config.items()),
                                                 testname))

        # Setup directories for test
        dirs = setup_test_dirs(testname,
                               out_dir,
                               mkdirs=['results', 'state', 'logs', 'plots'])

        # read template global parameter file
        infile = os.path.join(test_dir, 'examples',
                              test_dict['global_parameter_file'])

        with open(infile, 'r') as global_file:
            global_param = global_file.read()

        # create template string
        s = string.Template(global_param)

        # fill in global parameter options
        global_param = s.safe_substitute(test_data_dir=test_data_dir,
                                         result_dir=dirs['results'],
                                         state_dir=dirs['state'],
                                         testname=testname,
                                         test_root=test_dir)

        test_global_file = os.path.join(dirs['test'],
                                        '{0}_globalparam.txt'.format(testname))

        # write global parameter file
        with open(test_global_file, 'w') as f:
            f.write(global_param)

        # Get optional kwargs for run executable
        run_kwargs = pop_run_kwargs(test_dict)

        # run VIC
        test_complete = False
        test_passed = False
        test_comment = ''
        error_message = ''

        try:
            # Run the VIC simulation
            returncode = vic_exe.run(test_global_file,
                                     logdir=dirs['logs'],
                                     **run_kwargs)
            test_complete = True

            # Check return code
            check_returncode(vic_exe)

            # check output files (different tests depending on driver)
            if test_dict['check']:
                fnames = glob.glob(os.path.join(dirs['results'], '*'))

                # Check that the simulation completed for all grid cells
                if 'complete' in test_dict['check'] and driver == 'classic':
                    test_classic_driver_all_complete(fnames)

                # check for nans in all example files
                if 'output_file_nans' in test_dict['check']:
                    if driver == 'classic':
                        test_classic_driver_no_output_file_nans(fnames)
                    elif driver == 'image':
                        domain_file = os.path.join(test_data_dir,
                                                   test_dict['domain_file'])
                        test_image_driver_no_output_file_nans(
                            fnames, domain_file)
                    else:
                        raise ValueError('unknown driver')

            # if we got this far, the test passed.
            test_passed = True

        # Handle errors
        except Exception as e:
            test_comment, error_message = process_error(e, vic_exe)

        # record the test results
        test_results[testname] = TestResults(testname,
                                             test_complete=test_complete,
                                             passed=test_passed,
                                             comment=test_comment,
                                             error_message=error_message,
                                             returncode=returncode)

    # Print examples footer
    print('-'.ljust(OUTPUT_WIDTH, '-'))
    print('Finished testing examples.')
    print('-'.ljust(OUTPUT_WIDTH, '-'))

    return test_results
예제 #9
0
import sys

from tonic.io import read_config
from tonic.models.vic.vic2netcdf import vic2nc

# --- Load config file --- #
cfg_file = sys.argv[1]

# --- Run vic2nc --- #
cfg_vic2nc = read_config(cfg_file)
options = cfg_vic2nc.pop('OPTIONS')
global_atts = cfg_vic2nc.pop('GLOBAL_ATTRIBUTES')
if not options['regular_grid']:
    domain_dict = cfg_vic2nc.pop('DOMAIN')
else:
    domain_dict = None

# set aside fields dict
fields = cfg_vic2nc

vic2nc(options, global_atts, domain_dict, fields)
            f.write('standard_name: incoming_longwave_radiation\n')
            f.write('description: incoming longwave radiation\n')
        elif var == 'VP':
            f.write('\n[VP]\n')
            f.write('column: {}\n'.format(i))
            f.write('units: kPa\n')
            f.write('standard_name: water_vapor_pressure\n')
            f.write('description: near surface vapor pressure\n')
        elif var == 'WIND':
            f.write('\n[WIND]\n')
            f.write('column: {}\n'.format(i))
            f.write('units: m/s\n')
            f.write('standard_name: surface_air_pressure\n')
            f.write('description: near-surface wind speed\n')

# --- Run vic2nc --- #
cfg_vic2nc = read_config(cfg_file)
options = cfg_vic2nc.pop('OPTIONS')
global_atts = cfg_vic2nc.pop('GLOBAL_ATTRIBUTES')
if not options['regular_grid']:
    domain_dict = cfg_vic2nc.pop('DOMAIN')
else:
    domain_dict = None

# set aside fields dict
fields = cfg_vic2nc

vic2nc(options, global_atts, domain_dict, fields)


예제 #11
0
def _run(args):

    config = read_config(args.config)
    files = [config['Basics']['files']]
    var_keys = [config['Basics']['var_keys']]
    output = config['Paths']['ASCIIoutPath']
    #binary_mult = config['Basics']['binary_mult']
    #binary_type = config['Basics']['binary_type'],
    paths = config['Paths']
    out_prefix = config['Basics']['out_prefix']
    verbose = config['Basics']['verbose']	 
    mask = read_netcdf(paths['mask_path'], variables=['mask'])[0]['mask']
    yi, xi = np.nonzero(mask)
    print(mask)
    print('found {0} points in mask fqile.'.format(len(xi)))
    #x = read_netcdf(os.path.join(paths['in_path'], 'pr_1979.nc'))
    #print(x)

    xlist = []
    ylist = []
    pointlist = []
    append = False

    for i, fname in enumerate(files):
        d = read_netcdf(os.path.join(paths['in_path'], fname),
                        verbose=verbose)[0]
        print(i)
        if i == 0:

            # find point locations
            xs = d['lon']
            ys = d['lat']
            posinds = np.nonzero(xs > 180)
            xs[posinds] -= 360
            print('adjusted xs lon minimum')

	    
            for y, x in pyzip(yi, xi):
                active_flag = False
                for key in var_keys:
                    if (d[key][:, y, x].all() is np.ma.masked) \
                            or (mask[y, x] == 0):
                        active_flag = True
                if not active_flag:	  
                    point = (ys[y], xs[x])
		    print(point)
                    xlist.append(x)
                    ylist.append(y)
                    pointlist.append(point)

        else:
            append = True

        for y, x, point in pyzip(ylist, xlist, pointlist):

            data = np.empty((d[var_keys[0]].shape[0], len(var_keys)))

            for j, key in enumerate(var_keys):
                data[:, j] = d[key][:, y, x]

            #if output['Binary']:
            #   write_binary(data * binary_mult, point, binary_type,
            #               out_prefix, paths['BinaryoutPath'], append)
            #if output['ASCII']:
            write_ascii(data, point, out_prefix, paths['ASCIIoutPath'],
                            append)
    return
#!/bin/env python

import matplotlib.pyplot as plt
import os
import numpy as np
import pandas as pd
import xarray as xr
from tonic.io import read_config, read_configobj

# read configs for filenames and diagnostics to run
config_files = read_config(os.path.join(os.getcwd(), 'filenames.cfg'))
config_diag = read_configobj(os.path.join(os.getcwd(), 'diagnostics.cfg'))

# full list of l2x coupler fields

l2x_vars = [
    'l2x_Sl_t', 'l2x_Sl_tref', 'l2x_Sl_qref', 'l2x_Sl_avsdr', 'l2x_Sl_anidr',
    'l2x_Sl_avsdf', 'l2x_Sl_anidf', 'l2x_Sl_snowh', 'l2x_Sl_u10', 'l2x_Sl_fv',
    'l2x_Sl_ram1', 'l2x_Sl_logz0', 'l2x_Fall_taux', 'l2x_Fall_tauy',
    'l2x_Fall_lat', 'l2x_Fall_sen', 'l2x_Fall_lwup', 'l2x_Fall_evap',
    'l2x_Fall_swnet', 'l2x_Fall_flxdst1', 'l2x_Fall_flxdst2',
    'l2x_Fall_flxdst3', 'l2x_Fall_flxdst4', 'l2x_Flrl_rofliq',
    'l2x_Flrl_rofice'
]

l2x_units = [
    'K', 'K', 'g/g', 'fraction', 'fraction', 'fraction', 'fraction', 'm',
    'm/s', 'm/s', 's/m', 'm', 'N/m2', 'N/m2', 'W/m2', 'W/m2', 'W/m2', 'kg/m2s',
    'W/m2', 'm/s', 'm/s', 'm/s', 'm/s', 'kg/m2s', 'kg/m2s'
]
예제 #13
0
def run(config_file, outfile):
    """
    Generate high-resolution meteorologic forcings by downscaling the GCM
    and/or RCM using the Generalized Analog Regression Downscaling (GARD) tool.

    Inputs:
    Configuration file formatted with the following options:
    TODO: Add sample config
    """
    # Read configuration file into a dictionary
    config = read_config(config_file)

    log_level = config['Options']['LogLevel']
    chunk_years = relativedelta(years=int(config['Options']['ChunkYears']))

    # Set up logging for messaging
    logger = set_logger(
        os.path.splitext(os.path.split(__file__)[-1])[0], log_level)

    logger.info('Downscaling Configuration Options:')
    pp.pprint(config)

    # Define variables from configuration file

    # create directories if they don't exist yet
    data_dir = config['Options']['DataDir']
    filelist_dir = os.path.join(data_dir, 'gard_filelists')
    namelist_dir = os.path.join(data_dir, 'gard_namelists')
    for d in [data_dir, filelist_dir, namelist_dir]:
        os.makedirs(d, exist_ok=True)

    # if outfile is default, put in data_dir
    if outfile == 'namelist.txt':
        outfile = os.path.join(data_dir, outfile)

    # GARD namelist template
    namelist_template = config['Options']['NamelistTemplate']

    prediction_sets = config['Sets']

    # Make training file lists
    file_lists = {}
    file_lists_len = {}
    set_dirs = {}
    namelists = []

    for dataset, dset_config in config['Datasets'].items():
        predict_ranges = {}
        gcms = list_like(dset_config['GCMs'])

        if isinstance(gcms[0], int):
            # work around for these being cast as ints
            gcms = ['{0:03}'.format(i) for i in gcms]
            print(gcms)

        train_calendar = dset_config.get('TrainCalendar', None)
        obs_calendar = config['ObsDataset'].get('ObsCalendar', None)

        for setname, set_config in prediction_sets.items():

            logger.info('Creating configuration files for set:  %s', setname)

            # Make set directory
            set_dirs[setname] = os.path.join(data_dir, dataset, setname)
            os.makedirs(set_dirs[setname], exist_ok=True)

            # Training/prediction/obs variables
            obs_vars = list_like(set_config['ObsVars'])
            logger.info('Obs Vars:  %s', obs_vars)

            # For now, we can assume the training / prediction variable names
            # are he same
            vars_list = []
            for var in obs_vars:
                vars_list.append(set_config[var])
            vars_list = list(set(flatten(vars_list)))
            logger.info('Variables: %s', vars_list)

            # Get scenario to process
            logger.debug(dset_config['PredictPattern'])
            scenarios = dset_config['scenario']
            train_range = tuple(dset_config['TrainPeriod'])
            transform_range = tuple(dset_config['TransformPeriod'])
            transform_scen = dset_config['TransformScenario']
            logger.info('training range: %s', train_range)

            for scen, drange in scenarios.items():
                predict_ranges[scen] = tuple(drange)

            # Make the filelists
            for var in obs_vars:
                # Obs filelist
                key = filelistkey(dset=dataset,
                                  var=var,
                                  id='obs',
                                  drange=train_range,
                                  scenario='training')

                file_lists[key], file_lists_len[key] = make_filelist(
                    key,
                    config['ObsDataset']['ObsInputPattern'],
                    prefix=filelist_dir,
                    calendar=obs_calendar)

            for var in vars_list:
                # training filelist
                key = filelistkey(dset=dataset,
                                  var=TRAINVARMAP.get(var, var),
                                  id='training',
                                  drange=train_range,
                                  scenario='training')
                file_lists[key], file_lists_len[key] = make_filelist(
                    key,
                    dset_config['TrainPattern'],
                    prefix=filelist_dir,
                    calendar=train_calendar)

                # prediction filelists
                for gcm, scen in itertools.product(gcms, scenarios):
                    for drange in get_drange_chunks(
                            predict_ranges[scen], max_chunk_size=chunk_years):
                        key = filelistkey(dset=dataset,
                                          var=var,
                                          id=gcm,
                                          drange=drange,
                                          scenario=scen)
                        file_lists[key], file_lists_len[key] = make_filelist(
                            key,
                            dset_config['PredictPattern'],
                            prefix=filelist_dir,
                            transform_range=transform_range,
                            transform_scen=transform_scen,
                            calendar=config['Calendars'].get(
                                gcm, config['Calendars'].get('all', None)))

        for gcm, var, setname in itertools.product(gcms, obs_vars,
                                                   prediction_sets):

            for scen in dset_config['scenario']:
                # drange_str = _tslice_to_str(predict_ranges[scen])
                for drange in get_drange_chunks(predict_ranges[scen],
                                                max_chunk_size=chunk_years):
                    drange_str = _tslice_to_str(drange)
                    # Make GARD namelist
                    namelist = os.path.join(
                        namelist_dir,
                        NAMELIST_TEMPLATE.format(setname=setname,
                                                 var=var,
                                                 id=gcm,
                                                 dset=dataset,
                                                 scenario=scen,
                                                 drange=drange_str))

                    kwargs = defaults.copy()
                    kwargs.update(prediction_sets[setname])

                    # Set the downscaling mode
                    mode = prediction_sets[setname]['Mode']
                    for m in [
                            'pure_regression', 'analog_regression',
                            'pure_analog', 'pass_through'
                    ]:
                        kwargs[m] = (m == mode)

                    # Now, add a bunch of computed variables
                    # parameters section
                    out_dir = os.path.join(set_dirs[setname], drange_str)
                    os.makedirs(out_dir, exist_ok=True)

                    kwargs['output_file_prefix'] = os.path.join(
                        out_dir,
                        OUTPUT_TEMPLATE.format(setname=setname,
                                               drange=drange_str,
                                               dset=dataset,
                                               id=gcm,
                                               scenario=scen))
                    kwargs['start_date'] = pd.to_datetime(
                        drange[0]).strftime(GARD_TIMEFORMAT)
                    kwargs['end_date'] = pd.to_datetime(
                        drange[1]).strftime(GARD_TIMEFORMAT)
                    kwargs['start_train'] = pd.to_datetime(
                        train_range[0]).strftime(GARD_TIMEFORMAT)
                    kwargs['end_train'] = pd.to_datetime(
                        train_range[1]).strftime(GARD_TIMEFORMAT)
                    kwargs['start_transform'] = pd.to_datetime(
                        transform_range[0]).strftime(GARD_TIMEFORMAT)
                    kwargs['end_transform'] = pd.to_datetime(
                        transform_range[1]).strftime(GARD_TIMEFORMAT)

                    kwargs['logistic_threshold'] = LOGISTIC_THRESH[var]

                    # training_parameters section
                    var_list = list_like(prediction_sets[setname][var])
                    train_var_list = [TRAINVARMAP.get(v, v) for v in var_list]
                    train_kwargs = dict(
                        dset=dataset,
                        drange=train_range,
                        id='training',
                        scenario='training',
                    )
                    kwargs['train_nfiles'] = file_lists_len[filelistkey(
                        var=train_var_list[0], **train_kwargs)]
                    kwargs['train_nvars'] = len(train_var_list)
                    kwargs['train_vars'] = _make_variables_str(train_var_list)
                    kwargs['train_transform'] = get_transform_str(
                        train_var_list, mode)
                    kwargs['train_filelists'] = _make_filelist_str(
                        file_lists, train_var_list, **train_kwargs)
                    kwargs['train_calendar'] = '"{}"'.format(train_calendar)

                    # prediction_parameters section
                    predict_kwargs = dict(dset=dataset,
                                          drange=drange,
                                          id=gcm,
                                          scenario=scen)
                    kwargs['predict_nfiles'] = file_lists_len[filelistkey(
                        var=var_list[0], **predict_kwargs)]
                    nvars = len(var_list)
                    kwargs['predict_nvars'] = nvars
                    kwargs['predict_vars'] = _make_variables_str(var_list)
                    transformations = ','.join([str(QUANTILE_TRANSFORM)] *
                                               nvars)
                    kwargs['transformations'] = transformations
                    kwargs['predict_transform'] = get_transform_str(
                        var_list, mode)
                    kwargs['predict_filelists'] = _make_filelist_str(
                        file_lists, var_list, **predict_kwargs)
                    kwargs['predict_calendar'] = '"{}"'.format(
                        config['Calendars'].get(
                            gcm, config['Calendars'].get('all', None)))

                    # obs_parameters section
                    obs_kwargs = dict(dset=dataset,
                                      drange=train_range,
                                      id='obs',
                                      scenario='training')
                    kwargs['obs_nvars'] = 1
                    key = filelistkey(var=var, **obs_kwargs)
                    kwargs['obs_nfiles'] = file_lists_len[filelistkey(
                        var=var, **obs_kwargs)]
                    kwargs['obs_vars'] = _make_variables_str(var)
                    kwargs['obs_transform'] = TRANSFORM[var]
                    kwargs['obs_filelists'] = _make_filelist_str(
                        file_lists, [var], **obs_kwargs)
                    kwargs['obs_calendar'] = '"{}"'.format(obs_calendar)

                    # Special cases for "pass_through" option:
                    if mode == 'pass_through':
                        kwargs['train_transform'] = NO_TRANSFORM
                        kwargs['predict_transform'] = NO_TRANSFORM
                        kwargs['obs_transform'] = NO_TRANSFORM
                        kwargs['normalization_method'] = NO_NORM
                        kwargs['transformations'] = NO_TRANSFORM

                    # Store the namelist filepath for execuation
                    namelists.append(namelist)
                    # Create the namelist by filling in the template
                    replace_var_pythonic_config(namelist_template, namelist,
                                                **kwargs)

    # Write file of namelists
    logger.info('writing outfile %s', outfile)
    with open(outfile, 'w') as f:
        f.writelines('\n'.join(namelists))

    config_copy = os.path.join(data_dir, os.path.basename(config_file))
    logger.info('writing config file %s', config_copy)
    shutil.copyfile(config_file, config_copy)

    return