示例#1
0
def test_output_graph_processing_splitfm_B():
    from awrams.utils import extents
    from awrams.utils import datetools as dt

    import awrams.models.awral.description
    awrams.models.awral.description.CLIMATE_DATA = os.path.join(
        os.path.dirname(__file__), '..', '..', 'test_data', 'simulation')

    from awrams.utils.nodegraph import nodes, graph
    from awrams.simulation.ondemand import OnDemandSimulator
    from awrams.models import awral

    input_map = awral.get_default_mapping()
    output_map = {
        's0_save':
        nodes.write_to_annual_ncfile(os.path.dirname(__file__), 's0', mode='w')
    }
    # outputs = graph.OutputGraph(output_map)
    runner = OnDemandSimulator(awral, input_map.mapping, omapping=output_map)

    print("RUNNER NEW (FILES EXISTING): multiple cells, multiple years")
    period = dt.dates('2010-2011')
    extent = extents.from_boundary_offset(200, 200, 201, 201)
    r = runner.run(period, extent)

    print("RUNNER OLD (FILES EXISTING): single cell, single year")
    period = dt.dates('2015')
    extent = extents.from_cell_offset(202, 202)
    r = runner.run(period, extent)
示例#2
0
def test_output_graph_processing_splitfm_B():
    from awrams.utils import extents
    from awrams.utils import datetools as dt

    e_all = extents.get_default_extent()

    from awrams.utils.nodegraph import nodes, graph
    from awrams.simulation.ondemand import OnDemandSimulator

    input_map = awral.get_default_mapping()
    climate_mod(input_map)
    output_map = awral.get_output_mapping()
    output_map['s0_save'] = nodes.write_to_annual_ncfile(
        os.path.dirname(__file__), 's0', mode='w')

    runner = OnDemandSimulator(awral, input_map, omapping=output_map)

    print("RUNNER NEW (FILES EXISTING): multiple cells, multiple years")
    period = dt.dates('2010-2011')
    extent = e_all.ioffset[200:202, 200:202]
    r = runner.run(period, extent)

    clear_files()

    print("RUNNER OLD (FILES EXISTING): single cell, single year")
    period = dt.dates('2015')
    extent = e_all.ioffset[202, 202]
    r = runner.run(period, extent)
示例#3
0
def run_schema_test(schema):
    new_sfm = dm.SplitFileManager(test_path, m_tvar)
    new_sfm.create_files(schema, clobber=True, leave_open=True)

    data = new_sfm.get_padded_by_coords(new_sfm.cs)
    assert (np.isnan(data).all())

    newdata = np.random.normal(size=data.shape).astype(np.float32)
    new_sfm.set_by_coords(new_sfm.cs, newdata)

    data = new_sfm.get_padded_by_coords(new_sfm.cs)
    assert ((data == newdata).all())

    subcs = mt.gen_coordset(dt.dates('dec 9 2000 - jan 15 2001'),
                            extent.ioffset[5, 2])

    newdata = np.random.normal(size=subcs.shape).astype(np.float32)
    new_sfm.set_by_coords(subcs, newdata)
    assert ((new_sfm.get_padded_by_coords(subcs) == newdata).all())
    assert ((new_sfm.get_padded_by_coords(
        new_sfm.cs)[new_sfm.cs.get_index(subcs)] == newdata.reshape(
            dm.simple_shape(newdata.shape))).all())

    subcs = mt.gen_coordset(dt.dates('dec 12 2000'), extent.ioffset[5, 2:4])

    newdata = np.random.normal(size=subcs.shape).astype(np.float32)
    new_sfm.set_by_coords(subcs, newdata)
    assert ((new_sfm.get_padded_by_coords(subcs) == newdata).all())
    assert ((new_sfm.get_padded_by_coords(
        new_sfm.cs)[new_sfm.cs.get_index(subcs)] == newdata.reshape(
            dm.simple_shape(newdata.shape))).all())
示例#4
0
def setup_v5():
    from awrams.utils import config_manager

    sys_settings = config_manager.get_system_profile('default').get_settings()
    model_profile = config_manager.get_model_profile('awral','v5_default')
    model_settings = model_profile.get_settings()
    
    
    global awral
    awral = model_profile.get_model(model_settings)
    
    global period
    period = dt.dates('dec 2010 - jan 2011')

    global input_map
    input_map = model_profile.get_input_mapping(model_settings)
    model_settings['CLIMATE_DATASET'] = sys_settings['CLIMATE_DATASETS']['TESTING']

    global output_map
    output_map = awral.get_output_mapping()

    global outpath
    outpath = join(dirname(__file__),'..','..','test_data','simulation','outputs')

    output_map['s0_ncsave'] = nodes.write_to_annual_ncfile(outpath,'s0')

    output_map['mleaf_hrudr_state'] = nodes.write_to_ncfile_snapshot(outpath,'mleaf_hrudr')
示例#5
0
def test_SplitFileWriterNode():
    from awrams.utils import extents
    from awrams.utils import datetools as dt

    import awrams.models.awral.description
    awrams.models.awral.description.CLIMATE_DATA = os.path.join(
        os.path.dirname(__file__), '..', '..', 'test_data', 'simulation')

    from awrams.utils.nodegraph import nodes
    from awrams.simulation.ondemand import OnDemandSimulator
    from awrams.models import awral

    input_map = awral.get_default_mapping()

    from awrams.utils.nodegraph import nodes
    from awrams.utils.metatypes import ObjectDict

    # output_path = './'
    mapping = {}
    mapping['qtot'] = nodes.write_to_annual_ncfile('./', 'qtot')

    output_map = ObjectDict(
        mapping=ObjectDict(mapping))  #,output_path=output_path)

    runner = OnDemandSimulator(awral,
                               input_map.mapping,
                               omapping=output_map.mapping)

    period = dt.dates('2010-2011')
    extent = extents.from_cell_offset(200, 200)
    r = runner.run(period, extent)
示例#6
0
def load_results(results_folder, results_name=None):
    """
    Retrieve a results set from a previously run simulation.

    results_folder - path to a set of AWRAMS results, including netcdf outputs and a results
                     index file (currently 'results.json')
    """

    # results_folder = sanitise_dir(results_folder)

    results_dict = _index_results(results_folder)

    live_results = Results(model_version=results_dict.model_version,
                           results_name=results_name)
    live_results.path = results_folder
    live_results.extent = results_dict.extent
    if results_dict.period.representation == "YYYY-MM-DD":
        results_dict.period.start = results_dict.period.start.replace('-', '/')
        results_dict.period.end = results_dict.period.end.replace('-', '/')
    live_results.period = dt.dates(results_dict.period.start,
                                   results_dict.period.end,
                                   freq=results_dict.period.type)

    for v in results_dict.variables:
        live_results._add_variable(v)

    return live_results
def test_benchmark():
    from awrams.benchmarking.benchmark import Benchmark
    from awrams.benchmarking.utils import read_id_csv
    from awrams.utils import datetools as dt
    import os

    csv_path = os.path.join(TEST_DATA_PATH, 'benchmarking', 'runoff',
                            'q_obs.csv')

    b = Benchmark('QObs', 'qtot_avg')
    id_list = read_id_csv(
        os.path.join(TEST_DATA_PATH, 'benchmarking', 'catchment_ids.csv'))
    b.period = dt.dates("1981", "30/12/2011")
    b.load(csv_path, id_list)

    csv_path = os.path.join(TEST_DATA_PATH, 'benchmarking', 'runoff',
                            'awral_qtot_avg.csv')
    b.add_model('awral_v4', csv_path)

    assert hasattr(b.benchmark.models, 'awral_v4')

    b.benchmark.stat_percentiles()
    b.benchmark.data_percentiles()
    b.benchmark.stat()
    b.benchmark.plot_regression()
    b.benchmark.plot_box('pearsons_r')
    b.benchmark.plot_cdf()
    b.benchmark.plot_timeseries('105001')
示例#8
0
def test_extraction():
    from awrams.utils import gis

    from awrams.utils import config_manager

    sys_settings = config_manager.get_system_profile().get_settings()

    CATCHMENT_SHAPEFILE = os.path.join(sys_settings['DATA_PATHS']['SHAPEFILES'], \
               'Final_list_all_attributes.shp')

    catchments = gis.ShapefileDB(CATCHMENT_SHAPEFILE)

    e_all = extents.get_default_extent()

    e = catchments.get_extent_by_field('StationID', '421103', e_all)

    period = dt.dates('jun 1990 - jan 1995')

    var_name = 'rain_day'

    path, _ = os.path.split(os.path.abspath(__file__))

    data_path = os.path.join(sys_settings['DATA_PATHS']['TEST_DATA'],
                             'calibration/')

    pattern = data_path + '/%s*' % var_name

    df = extract(data_path, pattern, var_name, {
        '241': e,
        '512': e_all.ioffset[400, 400]
    }, period)

    assert ((df.index == period).all())
示例#9
0
def setup():
    from os.path import join, dirname

    from awrams.utils import datetools as dt

    from awrams.utils.nodegraph import nodes, graph
    from awrams.models.awral.model import AWRALModel
    from awrams.utils.mapping_types import period_to_tc

    global awral

    awral = AWRALModel()

    global period
    period = dt.dates('dec 2010 - jan 2011')

    global input_map
    input_map = awral.get_default_mapping()
    change_path_to_forcing(input_map)

    global output_map
    output_map = awral.get_output_mapping()

    global outpath
    outpath = join(dirname(__file__), '..', '..', 'test_data', 'simulation',
                   'outputs')

    output_map['s0_ncsave'] = nodes.write_to_annual_ncfile(outpath, 's0')
示例#10
0
def test_initial_states_point():
    import numpy as np

    from awrams.utils import extents
    from awrams.utils import datetools as dt

    from awrams.utils.nodegraph import nodes, graph
    from awrams.simulation.ondemand import OnDemandSimulator
    #from awrams.models import awral

    period = dt.dates('dec 2010')

    ### test a single cell
    extent = extents.get_default_extent()
    extent = extent.icoords[-30, 120.5]

    ### simulation with default initial states
    sim = OnDemandSimulator(awral, input_map)
    r, i = sim.run(period, extent, return_inputs=True)
    outputs_default = r['final_states']

    ### simulation with initial states read from nc files
    get_initial_states(input_map)
    sim = OnDemandSimulator(awral, input_map)
    r, i = sim.run(period, extent, return_inputs=True)
    outputs_init = r['final_states']

    ### compare final states with default states simulation
    ### should be different
    for k, o in outputs_init.items():
        assert not o == outputs_default[k]

    ### save initial states to compare
    ini_states = {}
    for k in i:
        try:
            if k.startswith('init'):
                ini_states[k] = i[k]
        except:
            pass

    ### simulation with initial states read from dict
    get_initial_states_dict(input_map, period, extent)
    sim = OnDemandSimulator(awral, input_map)
    r, i = sim.run(period, extent, return_inputs=True)
    outputs_init_dict = r['final_states']

    ### compare final states with other ini states simulation
    ### should be same
    for k, o in outputs_init_dict.items():
        assert o == outputs_init[k]

    ### compare initial states from both methods
    ### should be same
    for k in i:
        try:
            if k.startswith('init'):
                assert ini_states[k] == i[k]
        except:
            pass
示例#11
0
def test_benchmarksoilmoisture():
    from awrams.benchmarking.benchmark import BenchmarkSoilMoisture
    from awrams.benchmarking.utils import read_id_csv
    import awrams.benchmarking.meta.sasmas as sasmas
    from awrams.utils import datetools as dt
    import os

    sasmas_data_path = os.path.join(TEST_DATA_PATH, 'benchmarking', 'sasmas')

    b = BenchmarkSoilMoisture('SASMAS', 'soil_moisture', sasmas.meta)
    site_list = ['G6', 'K2', 'M1', 'S4']
    mod_site_list = ['SASMAS Soil moisture_' + site for site in site_list]
    b.period = dt.dates('2003-2011')
    b.load(sasmas_data_path, mod_site_list, convert_units=100.)

    csv_path = os.path.join(TEST_DATA_PATH, 'benchmarking', 'sasmas',
                            'awral_${v}.csv')
    b.add_model('awral_v4', csv_path)

    assert hasattr(b.benchmark, 'top')
    assert hasattr(b.benchmark, 'shallow')
    assert hasattr(b.benchmark, 'middle')
    assert hasattr(b.benchmark, 'deep')
    assert hasattr(b.benchmark, 'profile')
    assert hasattr(b.benchmark.top.models, 'awral_v4')

    b.benchmark.top.stat_percentiles()
    b.benchmark.middle.data_percentiles()
    b.benchmark.profile.stat()
    b.benchmark.shallow.plot_regression()
    b.benchmark.deep.plot_box('pearsons_r')
    b.benchmark.top.plot_cdf()
    b.benchmark.shallow.plot_timeseries('G6')
示例#12
0
    def _load_from_csv(self, csv):
        df = pd.io.parsers.read_csv(csv, index_col=0, parse_dates=True, dayfirst=True, na_values=['NaN','NA'])

        if self.period is None:
            self.period = dt.dates(df.index[0], df.index[-1])
        else:
            df = df.loc[(df.index >= self.period[0])&(df.index <= self.period[-1])]

        return df
示例#13
0
def test_big_filewrite():
    import awrams.models.awral.settings as settings
    settings.CLIMATE_DATA = '/mnt/awramsi_test_data/AWAP/'
    from awrams.utils import extents
    from awrams.utils import datetools as dt
    outputs = build_output_graph()
    test_simple_outputs(dt.dates('2000-2015'),
                        extents.default(),
                        output_mapping=outputs)
示例#14
0
def _sanitize_period(period, freq):
    if type(period) == str or type(period) == int:
        period = dt.dates(period)
        return pd.date_range(period[0], period[-1], freq=freq)
    ### +++ can it handle "jan 2000 - mar 2001" OR a pd.DatetimeIndex???
    elif type(period) == pd.DatetimeIndex:
        return pd.date_range(period[0], period[-1], freq=freq)
    else:
        raise Exception('period no good')
示例#15
0
def test_output_filewrite():
    import awrams.models.awral.settings as settings
    settings.CLIMATE_DATA = '/mnt/awramsi_test_data/AWAP/'
    from awrams.utils import extents
    from awrams.utils import datetools as dt
    outputs = build_output_graph()
    test_simple_outputs(dt.dates('2008-2009'),
                        extents.from_boundary_offset(200,200,250,250),
                        output_mapping=outputs)
示例#16
0
def test_climatology_region():
    ### test region
    period = dt.dates('dec 2010')
    extent = extents.get_default_extent()
    extent = extent.icoords[-32:-35,115:118]

    from awrams.simulation.ondemand import OnDemandSimulator
    sim = OnDemandSimulator(awral,input_map)
    r,i = sim.run(period,extent,return_inputs=True)
    assert not np.isnan(i['solar_f']).any()
示例#17
0
def test_get_padded_by_coords():
    from awrams.utils.io.data_mapping import SplitFileManager
    from awrams.utils.mapping_types import gen_coordset
    import awrams.utils.datetools as dt

    path = os.path.join(os.path.dirname(__file__),'..','..','test_data','simulation')

    sfm = SplitFileManager.open_existing(path,'temp_min_day_*.nc','temp_min_day')
    # return sfm
    extent = sfm.get_extent().ioffset[200:230,200:230]
    period = dt.dates('2011')
    coords = gen_coordset(period,extent)

    data = sfm.get_padded_by_coords(coords)
    assert data.shape == coords.shape
示例#18
0
def setup_var_coords():
    global m_tvar
    global extent
    global test_path

    georef = geo.GeoReference((0,0),1000,1000,0.05)
    extent = extents.Extent(georef).ioffset[0:10,0:10]

    period = dt.dates('dec 2000 - jan 25 2001')
    tvar = mt.Variable('test_var','mm')
    
    m_tvar = mt.MappedVariable(tvar,mt.gen_coordset(period,extent),np.float32)

    test_path = os.path.join(os.path.dirname(__file__),'file_tests')

    shutil.rmtree(test_path,True)
示例#19
0
def filter_months(ts, min_month_len=None):
    from dateutil import rrule

    out_idx = pd.DatetimeIndex(data=[])
    firstday = list(rrule.rrule(rrule.MONTHLY, dtstart=ts.index[0],until=ts.index[0]))
    if len(ts.index) > 1:
        secondpart = list(rrule.rrule(rrule.MONTHLY, dtstart=ts.index[1],until=ts.index[-1]))
        alldaylist = firstday + secondpart
    else:
        alldaylist = firstday

    for month in alldaylist:
        if ts.loc[month.strftime("%m-%Y")].size >= min_month_len:
            out_idx = out_idx.union(dt.dates('%s %s' % (dt.name_of_month[month.month],month.year)))

    return out_idx
    def retrieve_time_series(self,location,start=None,end=None):
        '''
        Read valid data if available, otherwise return pure climatology
        '''

        if start < self.start_date:
            if end > self.start_date:
                raise Exception("Only whole years of missing data valid in SplitClimateDataSet")
            period = dt.dates(start,end)
            out_series = self.gap_filler.create_for_period(period,location)
            return out_series

        series = self.read_data(None,location)
        if self.gap_filler.has_gaps(series,location):
            series = self.gap_filler.fill(series,location,self,series_start=start)

        return series
示例#21
0
def test_ondemand_with_mask():
    # Make output map with daily frequency
    output_map['mleaf_hrudr_state'] = nodes.write_to_ncfile_snapshot(outpath,'mleaf_hrudr', freq='D')

    period = dt.dates('25 dec 2010', '26 dec 2010')
    from awrams.simulation.ondemand import OnDemandSimulator
    sim = OnDemandSimulator(awral, input_map, omapping=output_map)
    r = sim.run(period, extents.get_default_extent())

    # Grab a new copy of the default extent in case the simulator mutated it
    default_mask = extents.get_default_extent().mask

    # Check that the results are masked arrays, using the first results and the final states
    # as examples. Then check the masks are the default mask - masked arrays ensure that masked
    # values are not used in computations.
    assert all(type(r[key] == np.ma.core.MaskedArray) for key in r.keys())
    assert all(type(r['final_states'][key] == np.ma.core.MaskedArray) for key in r['final_states'].keys())
    assert all(np.array_equal(r[key].mask[0], default_mask) for key in r.keys() if key != 'final_states')
    assert all(np.array_equal(r['final_states'][key].mask, default_mask) for key in r['final_states'].keys())
示例#22
0
def test_output_graph_processing_splitfm_G():
    from awrams.utils import extents
    from awrams.utils import datetools as dt

    e_all = extents.get_default_extent()

    from awrams.utils.nodegraph import nodes,graph
    from awrams.simulation.ondemand import OnDemandSimulator


    print("RUNNER NEW: single cell ncf, multiple years")
    period = dt.dates('2010-2011')
    extent = e_all.ioffset[202,202]

    #input_map = awral.get_default_mapping()
    output_map = awral.get_output_mapping()
    output_map['s0_save'] = nodes.write_to_annual_ncfile(os.path.dirname(__file__),'s0')
    # outputs = graph.OutputGraph(output_map)
    runner = OnDemandSimulator(awral,input_map,omapping=output_map)
    r = runner.run(period,extent)
示例#23
0
def test_output_graph_processing_snapshotfm_A():
    from awrams.utils import extents
    from awrams.utils import datetools as dt

    e_all = extents.get_default_extent()

    from awrams.utils.nodegraph import nodes,graph
    from awrams.simulation.ondemand import OnDemandSimulator

    #input_map = awral.get_default_mapping()
    output_map = awral.get_output_mapping()
    output_map['s0_save'] = nodes.write_to_ncfile_snapshot(
                            os.path.dirname(__file__), 's0')

    runner = OnDemandSimulator(awral, input_map, omapping=output_map)

    print("RUNNER NEW: multiple cells, multiple years")
    period = dt.dates('2010-2011')
    extent = e_all.ioffset[200, 200:202]
    r = runner.run(period,extent)
示例#24
0
def test_get_padded_by_coords():
    from awrams.utils.io.data_mapping import SplitFileManager
    from awrams.utils.mapping_types import gen_coordset
    import awrams.utils.datetools as dt

    data_paths = config_manager.get_system_profile().get_settings(
    )['DATA_PATHS']

    path = os.path.join(data_paths['BASE_DATA'], 'test_data', 'simulation',
                        'climate', 'temp_min_day')

    sfm = SplitFileManager.open_existing(path, 'temp_min_day_*.nc',
                                         'temp_min_day')
    # return sfm
    extent = sfm.get_extent().ioffset[200:230, 200:230]
    period = dt.dates('2011')
    coords = gen_coordset(period, extent)

    data = sfm.get_padded_by_coords(coords)
    assert data.shape == coords.shape
示例#25
0
def test_climatology_region():
    import numpy as np
    from awrams.utils import extents
    from awrams.utils import datetools as dt
    from awrams.simulation.ondemand import OnDemandSimulator
    #from awrams.models import awral

    ### test region
    period = dt.dates('dec 2010')
    extent = extents.get_default_extent()
    extent = extent.icoords[-32:-35, 115:118]

    sim = OnDemandSimulator(awral, input_map)  #,omapping=output_map.mapping)
    r, i = sim.run(period, extent, return_inputs=True)
    ### this should be true
    assert np.isnan(i['solar_f']).any()

    insert_climatology(input_map)
    sim = OnDemandSimulator(awral, input_map)  #,omapping=output_map.mapping)
    r, i = sim.run(period, extent, return_inputs=True)
    assert not np.isnan(i['solar_f']).any()
示例#26
0
def test_climatology_point():
    import numpy as np
    from awrams.utils import extents
    from awrams.utils import datetools as dt
    from awrams.simulation.ondemand import OnDemandSimulator
    from awrams.models import awral

    ### test point
    period = dt.dates('dec 2010')
    extent = extents.from_cell_coords(-30, 120.5)

    sim = OnDemandSimulator(awral,
                            input_map.mapping)  #,omapping=output_map.mapping)
    r, i = sim.run(period, extent, return_inputs=True)
    ### this should be true
    assert np.isnan(i['solar_f']).any()

    insert_climatology(input_map)
    sim = OnDemandSimulator(awral,
                            input_map.mapping)  #,omapping=output_map.mapping)
    r, i = sim.run(period, extent, return_inputs=True)
    assert not np.isnan(i['solar_f']).any()
示例#27
0
def test_extraction():
    from awrams.utils import gis
    catchments = gis.ShapefileDB(gis.CATCHMENT_SHAPEFILE)

    e_all = extents.get_default_extent()

    e = catchments.get_extent_by_field('StationID', '421103', e_all)

    period = dt.dates('jun 1990 - jan 1995')

    var_name = 'rain_day'

    path, _ = os.path.split(os.path.abspath(__file__))

    data_path = os.path.join(path, '../../test_data/calibration/')

    pattern = data_path + '/%s*' % var_name

    df = extract(data_path, pattern, var_name, {
        '241': e,
        '512': e_all.ioffset[400, 400]
    }, period)

    assert ((df.index == period).all())
示例#28
0
def test_SplitFileWriterNode():
    from awrams.utils import extents
    from awrams.utils import datetools as dt

    extent = extents.get_default_extent()

    from awrams.utils.nodegraph import nodes
    from awrams.simulation.ondemand import OnDemandSimulator

    input_map = awral.get_default_mapping()
    climate_mod(input_map)

    from awrams.utils.nodegraph import nodes
    from awrams.utils.metatypes import ObjectDict

    # output_path = './'
    output_map = awral.get_output_mapping()
    output_map['qtot_save'] = nodes.write_to_annual_ncfile('./', 'qtot')

    runner = OnDemandSimulator(awral, input_map, omapping=output_map)

    period = dt.dates('2010-2011')
    extent = extent.ioffset[200, 200:202]
    r = runner.run(period, extent)
示例#29
0
def test_single_catchment():
    import awrams.calibration.calibrate as cal
    from awrams.calibration.sce import SCEOptimizer, ProxyOptimizer

    from awrams.utils import datetools as dt

    import pandas as pd
    import os

    cal_catchment = '421103'  # '204007' # '421103' '105001'
    time_period = dt.dates('1990 - 1995')  #2005 - 2010') #1990 - 1995')

    # path = os.path.join(os.path.dirname(__file__),'data')
    path = os.path.join(os.path.dirname(__file__), '..', '..', 'test_data',
                        'calibration')

    from awrams.utils import catchments
    # Get the catchment as a spatial extent we can use as the bounds of the simulation
    try:
        db = catchments.CatchmentDB()
        spatial = db.get_by_id(cal_catchment)

    except ImportError as e:
        print(e)
        # read catchment extent from a pickle
        import pickle
        # path = os.path.join(os.path.dirname(__file__),'../../test_data')
        pkl = os.path.join(path, 'extent_421103.pkl')
        spatial = pickle.load(open(pkl, 'rb'))

    def change_path_to_forcing(imap):
        from awrams.utils.nodegraph import nodes

        # path = os.path.join(os.path.dirname(__file__),'../../test_data')

        FORCING = {
            'tmin': ('temp_min*', 'temp_min_day'),
            'tmax': ('temp_max*', 'temp_max_day'),
            'precip': ('rain_day*', 'rain_day'),
            'solar': ('solar*', 'solar_exposure_day')
        }
        for k, v in FORCING.items():
            imap.mapping[k + '_f'] = nodes.forcing_from_ncfiles(path,
                                                                v[0],
                                                                v[1],
                                                                cache=True)

    change_path_to_forcing(cal.input_map)

    csv = os.path.join(path, 'q_obs.csv')
    qobs = pd.read_csv(csv, parse_dates=[0])
    qobs = qobs.set_index(qobs.columns[0])
    obs = qobs[cal_catchment]

    parameters = cal.get_parameter_df(cal.input_map.mapping)

    evaluator = cal.RunoffEvaluator(time_period, spatial, obs)

    # Create the SCE instance...
    sce = ProxyOptimizer(13, 2, 4, 3, 3, parameters, evaluator)
    sce.max_iter = 100

    sce.run_optimizer()

    # run with seed population...
    sce.run_optimizer(seed=sce.population.iloc[0])

    sce.terminate_children()
示例#30
0
    def map_files(self,
                  path,
                  pattern,
                  variable,
                  ff=None,
                  max_open_files=MAX_FILES_PER_SFM,
                  map_func=None):

        var_name = variable if isinstance(variable, str) else variable.name

        self.var_name = var_name

        search_pattern = os.path.join(path, pattern)
        files = glob.glob(search_pattern)
        files.sort()

        if ff is None:

            def ff(x):
                return True

        _files = []
        for f in files:
            if ff(f):
                _files.append(f)
        files = _files

        if len(files) == 0:
            raise Exception("No files found in %s matching %s" %
                            (path, pattern))

        #import netCDF4 as ncd
        #db_opener_TEST = ncd.Dataset

        dsm_start = DatasetManager(db_opener(files[0], self.mode))
        #dsm_start = DatasetManager(open_append(db_opener_TEST,files[0],self.mode))

        self.ref_ds = dsm_start

        coords = dsm_start.get_coords()

        time = dsm_start.get_coord('time')
        time_idx = time.index

        self.file_time_map[files[0]] = time
        self.time_file_map[time.index[0]] = files[0]

        tsegs = [time.index]

        if len(files) > 1:
            for fn in files[1:]:
                if map_func is not None:
                    t = map_func(fn)
                else:
                    dsm = DatasetManager(db_opener(fn, self.mode))
                    t = dsm.get_coord('time')
                self.file_time_map[fn] = t
                self.time_file_map[t[0]] = fn
                #self.time_access_map[t[0]] = dsm.variables[var_name] #+++ as below...
                #self.datasetmanager_map[t[0]] = dsm #+++ deprecate? Only works if files are open...
                tsegs.append(t)
                #time_idx = time_idx.union(t.index)

        new_segs = []

        for i in range(1, len(tsegs)):
            first_new, last_old = tsegs[i][0], tsegs[i - 1][-1]
            tdelta = first_new - last_old

            day = dt.days(1)

            if tdelta > day:
                new_t = dt.dates(last_old + day, first_new - day)
                new_segs.append(new_t)
                self.time_file_map[new_t[0]] = None

        all_segs = sorted(tsegs + new_segs, key=lambda t: t[0])

        self.seg_time_map = dict([(i, t[0]) for i, t in enumerate(all_segs)])

        full_t = time.index.union_many(all_segs)
        full_tc = TimeCoordinates(time.dimension, full_t)

        self.cs = CoordinateSet((full_tc, coords.latitude, coords.longitude))

        self.splitter = Splitter(full_tc, all_segs)

        ncvar = dsm_start.variables[var_name]
        self.fillvalue = ncvar.attrs['_FillValue'][0]
        #self.fillvalue = 1.0
        v = Variable.from_ncvar(ncvar)

        self.mapped_var = MappedVariable(v, self.cs, ncvar.dtype)