def test_DoubleVector(self):
     v1 = api.DoubleVector([i for i in range(10)])  # empy
     v2 = api.DoubleVector.FromNdArray(np.arange(0, 10.0, 0.5))
     v3 = api.DoubleVector(np.arange(0, 10.0, 0.5))
     self.assertEqual(len(v1), 10)
     self.assertEqual(len(v2), 20)
     self.assertEqual(len(v3), 20)
     self.assertAlmostEqual(v2[3], 1.5)
Exemple #2
0
 def test_min_max_check_ts_fill(self):
     ta=api.TimeAxis(0, 1, 5)
     ts_src=api.TimeSeries(ta, values=api.DoubleVector([1.0, -1.0, 2.0, float('nan'), 4.0]), point_fx=api.POINT_AVERAGE_VALUE)
     cts=api.TimeSeries(ta, values=api.DoubleVector([1.0, 1.8, 2.0, 2.0, 4.0]), point_fx=api.POINT_AVERAGE_VALUE)
     ts_qac=ts_src.min_max_check_ts_fill(v_max=10.0, v_min=-10.0, dt_max=300, cts=cts)
     self.assertAlmostEqual(ts_qac.value(3), 2.0)
     ts_qac=ts_src.min_max_check_ts_fill(v_max=10.0, v_min=0.0, dt_max=300, cts=cts)
     self.assertAlmostEqual(ts_qac.value(1), 1.8)  # -1 out, replaced with linear between
     self.assertAlmostEqual(ts_qac.value(3), 2.0)
Exemple #3
0
 def test_create_from_x_y_z_vector(self):
     x = api.DoubleVector([1.0, 4.0, 7.0])
     y = api.DoubleVector([2.0, 5.0, 8.0])
     z = api.DoubleVector([3.0, 6.0, 9.0])
     gpv = api.GeoPointVector.create_from_x_y_z(x, y, z)
     for i in range(3):
         self.assertAlmostEqual(gpv[i].x, 3 * i + 1)
         self.assertAlmostEqual(gpv[i].y, 3 * i + 2)
         self.assertAlmostEqual(gpv[i].z, 3 * i + 3)
Exemple #4
0
 def _create_shyft_ts(self):
     b = 946684800  # 2000.01.01 00:00:00
     h = 3600  #one hour in seconds
     values = np.array([1.0, 2.0, 3.0])
     shyft_ts_factory = api.TsFactory()
     return shyft_ts_factory.create_point_ts(len(values), b, h,
                                             api.DoubleVector(values))
Exemple #5
0
 def test_calibration_ts_case(self):
     times=[0, 3600, 3600 + 2*3600]
     ta=api.TimeAxis(api.UtcTimeVector(times[0:-1]), times[-1])
     values=api.DoubleVector([0.0]*(len(times) - 1))
     ts=api.TimeSeries(ta, values, point_fx=api.point_interpretation_policy.POINT_AVERAGE_VALUE)
     target=api.TargetSpecificationPts(ts, api.IntVector([0]), 1.0, api.ABS_DIFF, 1.0, 1.0, 1.0, api.CELL_CHARGE, 'water_balance')
     self.assertIsNotNone(target)
Exemple #6
0
 def test_abs(self):
     c=api.Calendar()
     t0=c.time(2016, 1, 1)
     dt=api.deltahours(1)
     n=4
     v=api.DoubleVector([1.0, -1.5, float("nan"), 3.0])
     ta=api.TimeAxisFixedDeltaT(t0, dt, n)
     ts0=api.TimeSeries(ta=ta, values=v, point_fx=api.point_interpretation_policy.POINT_AVERAGE_VALUE)
     tsa=api.TimeSeries('a')
     ts1=tsa.abs()
     ts1_blob=ts1.serialize()
     ts1=api.TimeSeries.deserialize(ts1_blob)
     self.assertTrue(ts1.needs_bind())
     bts=ts1.find_ts_bind_info()
     self.assertEqual(len(bts), 1)
     bts[0].ts.bind(ts0)
     ts1.bind_done()
     self.assertFalse(ts1.needs_bind())
     self.assertAlmostEqual(ts0.value(0), ts1.value(0), 6)
     self.assertAlmostEqual(abs(ts0.value(1)), ts1.value(1), 6)
     self.assertTrue(math.isnan(ts1.value(2)))
     self.assertAlmostEqual(ts0.value(3), ts1.value(3), 6)
     tsv0=api.TsVector()
     tsv0.append(ts0)
     tsv1=tsv0.abs()
     self.assertAlmostEqual(tsv0[0].value(0), tsv1[0].value(0), 6)
     self.assertAlmostEqual(abs(tsv0[0].value(1)), tsv1[0].value(1), 6)
     self.assertTrue(math.isnan(tsv1[0].value(2)))
     self.assertAlmostEqual(tsv0[0].value(3), tsv1[0].value(3), 6)
    def verify_parameter_for_calibration(self,
                                         param,
                                         expected_size,
                                         valid_names,
                                         test_dict=None):
        min_p_value = -1e+10
        max_p_value = +1e+10
        test_dict = test_dict or dict()
        self.assertEqual(expected_size, param.size(),
                         "expected parameter size changed")
        pv = api.DoubleVector([param.get(i) for i in range(param.size())])
        for i in range(param.size()):
            v = param.get(i)
            self.assertTrue(v > min_p_value and v < max_p_value)
            if i not in test_dict:
                pv[i] = v * 1.01
                param.set(
                    pv
                )  # set the complete vector, only used during C++ calibration, but we verify it here
                x = param.get(i)
                self.assertAlmostEqual(v * 1.01, x, 3,
                                       "Expect new value when setting value")
            else:

                pv[i] = test_dict[i]
                param.set(pv)
                x = param.get(i)
                self.assertAlmostEqual(x, test_dict[i], 1,
                                       "Expect new value when setting value")
            p_name = param.get_name(i)
            self.assertTrue(len(p_name) > 0, "parameter name should exist")
            self.assertEqual(valid_names[i], p_name)
Exemple #8
0
 def test_store(self):
     ds = SmGTsRepository(PREPROD)
     nl = [u'/shyft/test/a', u'/shyft/test/b',
           u'/shyft/test/c']  #[u'/ICC-test-v9.2']
     t0 = 946684800  # time_t/unixtime 2000.01.01 00:00:00
     dt = 3600  #one hour in seconds
     values = np.array([1.0, 2.0, 3.0])
     shyft_ts_factory = api.TsFactory()
     shyft_result_ts = shyft_ts_factory.create_point_ts(
         len(values), t0, dt, api.DoubleVector(values))
     shyft_catchment_result = dict()
     shyft_catchment_result[nl[0]] = shyft_result_ts
     shyft_catchment_result[nl[1]] = shyft_result_ts
     shyft_catchment_result[nl[2]] = shyft_result_ts
     r = ds.store(shyft_catchment_result)
     self.assertEqual(r, True)
     # now read back the ts.. and verify it's there..
     read_period = api.UtcPeriod(t0, t0 + 3 * dt)
     rts_list = ds.read(nl, read_period)
     self.assertIsNotNone(rts_list)
     c2 = rts_list[nl[-1]]
     [
         self.assertAlmostEqual(c2.value(i), values[i])
         for i in range(len(values))
     ]
Exemple #9
0
    def _call_qm(self, prep_fcst_lst, weights, geo_points, ta,
                 input_source_types, nb_prior_scenarios):

        # Check interpolation period is within time axis (ta)
        ta_start = ta.time(0)
        ta_end = ta.time(ta.size() - 1)  # start of last time step
        interp_start = ta_start + api.deltahours(self.qm_interp_hours[0])
        interp_end = ta_start + api.deltahours(self.qm_interp_hours[1])
        if interp_start > ta_end:
            interp_start = api.no_utctime
            interp_end = api.no_utctime
        if interp_end > ta_end:
            interp_end = ta_end

        # Re-organize data before sending to api.quantile_map_forecast. For each source type and geo_point, group
        # forecasts as TsVectorSets, send to api.quantile_map_forecast and return results as ensemble of source-keyed
        # dictionaries of  geo-ts
        # First re-organize weights - one weight per TVS.
        weight_sets = api.DoubleVector([w for ws in weights for w in ws])

        # New version
        results = [{} for i in range(nb_prior_scenarios)]
        for src in input_source_types:
            qm_scenarios = []
            for geo_pt_idx, geo_pt in enumerate(geo_points):
                forecast_sets = api.TsVectorSet()
                for i, fcst_group in enumerate(prep_fcst_lst):
                    for j, forecast in enumerate(fcst_group):
                        scenarios = api.TsVector()
                        for member in forecast:
                            scenarios.append(member[src][geo_pt_idx].ts)
                        forecast_sets.append(scenarios)
                        if i == self.repo_prior_idx and j == 0:
                            prior_data = scenarios
                            # TODO: read prior if repo_prior_idx is None

                qm_scenarios.append(
                    api.quantile_map_forecast(forecast_sets, weight_sets,
                                              prior_data, ta, interp_start,
                                              interp_end, True))

            # Alternative: convert to array to enable slicing
            # arr = np.array(qm_scenarios)

            # Now organize to desired output format: ensemble of source-keyed dictionaries of  geo-ts
            for i in range(0, nb_prior_scenarios):
                # source_dict = {}
                # ts_vct = arr[:, i]
                ts_vct = [x[i] for x in qm_scenarios]
                vct = self.source_vector_map[src]()
                [
                    vct.append(self.source_type_map[src](geo_pt, ts))
                    for geo_pt, ts in zip(geo_points, ts_vct)
                ]
                # Alternatives:
                # vct[:] = [self.source_type_map[src](geo_pt, ts) for geo_pt, ts in zip(geo_points, ts_vct)]
                # vct = self.source_vector_map[src]([self.source_type_map[src](geo_pt, ts) for geo_pt, ts in zip(geo_points, ts_vct)])
                results[i][src] = vct
        return results
 def _create_constant_geo_ts(self, geo_ts_type, geo_point, utc_period, value):
     """Create a time point ts, with one value at the start
     of the supplied utc_period."""
     tv = api.UtcTimeVector()
     tv.push_back(utc_period.start)
     vv = api.DoubleVector()
     vv.push_back(value)
     cts = api.TsFactory().create_time_point_ts(utc_period, tv, vv, api.POINT_AVERAGE_VALUE)
     return geo_ts_type(geo_point, cts)
Exemple #11
0
 def _make_shyft_ts_from_xts(xts):
     if not isinstance(xts, ITimeSeries):
         raise SmgDataError("Supplied xts should be of type ITimeSeries")
     t = api.UtcTimeVector()
     v = api.DoubleVector()
     for i in range(xts.Count):
         t.push_back(xts.Time(i).ToUnixTime())
         v.push_back(xts.Value(i).V)
     shyft_ts = api.TsFactory().create_time_point_ts(api.UtcPeriod(t[0], t[-1]), t, v)
     return shyft_ts
Exemple #12
0
 def test_min_max_check_linear_fill(self):
     ta=api.TimeAxis(0, 1, 5)
     ts_src=api.TimeSeries(ta, values=api.DoubleVector([1.0, -1.0, 2.0, float('nan'), 4.0]), point_fx=api.POINT_AVERAGE_VALUE)
     ts_qac=ts_src.min_max_check_linear_fill(v_max=10.0, v_min=-10.0, dt_max=300)
     self.assertAlmostEqual(ts_qac.value(3), 3.0)
     ts_qac=ts_src.min_max_check_linear_fill(v_max=10.0, v_min=0.0, dt_max=300)
     self.assertAlmostEqual(ts_qac.value(1), 1.5)  # -1 out, replaced with linear between
     self.assertAlmostEqual(ts_qac.value(3), 3.0)
     ts_qac=ts_src.min_max_check_linear_fill(v_max=10.0, v_min=0.0, dt_max=0)
     self.assertTrue(not math.isfinite(ts_qac.value(3)))  # should give nan, not allowed to fill in
     self.assertTrue(not math.isfinite(ts_qac.value(1)))  # should give nan, not allowed to fill in
    def test_create_TargetSpecificationPts(self):
        t = api.TargetSpecificationPts();
        t.scale_factor = 1.0
        t.calc_mode = api.NASH_SUTCLIFFE
        t.calc_mode = api.KLING_GUPTA;
        t.s_r = 1.0  # KGEs scale-factors
        t.s_a = 2.0
        t.s_b = 3.0
        self.assertAlmostEqual(t.scale_factor, 1.0)
        # create a ts with some points
        cal = api.Calendar();
        start = cal.time(api.YMDhms(2015, 1, 1, 0, 0, 0))
        dt = api.deltahours(1)
        tsf = api.TsFactory();
        times = api.UtcTimeVector()
        times.push_back(start + 1 * dt);
        times.push_back(start + 3 * dt);
        times.push_back(start + 4 * dt)

        values = api.DoubleVector()
        values.push_back(1.0)
        values.push_back(3.0)
        values.push_back(np.nan)
        tsp = tsf.create_time_point_ts(api.UtcPeriod(start, start + 24 * dt), times, values)
        # convert it from a time-point ts( as returned from current smgrepository) to a fixed interval with timeaxis, needed by calibration
        tst = api.TsTransform()
        tsa = tst.to_average(start, dt, 24, tsp)
        # tsa2 = tst.to_average(start,dt,24,tsp,False)
        # tsa_staircase = tst.to_average_staircase(start,dt,24,tsp,False) # nans infects the complete interval to nan
        # tsa_staircase2 = tst.to_average_staircase(start,dt,24,tsp,True) # skip nans, nans are 0
        # stuff it into the target spec.
        # also show how to specify snow-calibration
        cids = api.IntVector([0, 2, 3])
        t2 = api.TargetSpecificationPts(tsa,cids, 0.7, api.KLING_GUPTA, 1.0, 1.0, 1.0, api.SNOW_COVERED_AREA)
        t2.catchment_property = api.SNOW_WATER_EQUIVALENT
        self.assertEqual(t2.catchment_property, api.SNOW_WATER_EQUIVALENT)
        self.assertIsNotNone(t2.catchment_indexes)
        for i in range(len(cids)):
            self.assertEqual(cids[i],t2.catchment_indexes[i])
        t.ts = tsa
        #TODO: Does not work, list of objects are not yet convertible tv = api.TargetSpecificationVector([t, t2])
        tv=api.TargetSpecificationVector()
        tv.append(t)
        tv.append(t2)
        # now verify we got something ok
        self.assertEqual(2, tv.size())
        self.assertAlmostEqual(tv[0].ts.value(1), 1.5)  # average value 0..1 ->0.5
        self.assertAlmostEqual(tv[0].ts.value(2), 2.5)  # average value 0..1 ->0.5
        self.assertAlmostEqual(tv[0].ts.value(3), 3.0)  # average value 0..1 ->0.5
        # and that the target vector now have its own copy of ts
        tsa.set(1, 3.0)
        self.assertAlmostEqual(tv[0].ts.value(1), 1.5)  # make sure the ts passed onto target spec, is a copy
        self.assertAlmostEqual(tsa.value(1), 3.0)  # and that we really did change the source
Exemple #14
0
 def test_double_vector(self):
     dv_from_list = api.DoubleVector([x for x in range(10)])
     dv_np = np.arange(10.0)
     dv_from_np = api.DoubleVector.from_numpy(dv_np)
     self.assertEqual(len(dv_from_list), 10)
     assert_array_almost_equal(dv_from_list.to_numpy(), dv_np)
     assert_array_almost_equal(dv_from_np.to_numpy(), dv_np)
     dv_from_np[5] = 8
     dv_from_np.append(11)
     dv_from_np.push_back(12)
     dv_np[5] = 8
     dv_np.resize(12)
     dv_np[10] = 11
     dv_np[11] = 12
     assert_array_almost_equal(dv_from_np.to_numpy(), dv_np)
Exemple #15
0
    def _downscaling(self, forecast, target_grid, ta_fixed_dt):
        # Using idw for time being
        prep_fcst = {}
        for src_type, fcst in forecast.items():
            if src_type == 'precipitation':
                # just setting som idw_params for time being
                idw_params = api.IDWPrecipitationParameter()
                idw_params.max_distance = 15000
                idw_params.max_members = 4
                idw_params.scale_factor = 1.0
                prep_fcst[src_type] = api.idw_precipitation(
                    fcst, target_grid, ta_fixed_dt, idw_params)
            elif src_type == 'temperature':
                # just setting som idw_params for time being
                idw_params = api.IDWTemperatureParameter()
                idw_params.max_distance = 15000
                idw_params.max_members = 4
                idw_params.gradient_by_equation = False
                prep_fcst[src_type] = api.idw_temperature(
                    fcst, target_grid, ta_fixed_dt, idw_params)
            elif src_type == 'radiation':
                # just setting som idw_params for time being
                idw_params = api.IDWParameter()
                idw_params.max_distance = 15000
                idw_params.max_members = 4
                idw_params.distance_measure_factor = 1
                slope_factor = api.DoubleVector([0.9] * len(target_grid))
                prep_fcst[src_type] = api.idw_radiation(
                    fcst, target_grid, ta_fixed_dt, idw_params, slope_factor)
            elif src_type == 'wind_speed':
                # just setting som idw_params for time being
                idw_params = api.IDWParameter()
                idw_params.max_distance = 15000
                idw_params.max_members = 4
                idw_params.distance_measure_factor = 1
                prep_fcst[src_type] = api.idw_wind_speed(
                    fcst, target_grid, ta_fixed_dt, idw_params)
            elif src_type == 'relative_humidity':
                # just setting som idw_params for time being
                idw_params = api.IDWParameter()
                idw_params.max_distance = 15000
                idw_params.max_members = 4
                idw_params.distance_measure_factor = 1
                prep_fcst[src_type] = api.idw_relative_humidity(
                    fcst, target_grid, ta_fixed_dt, idw_params)

        return prep_fcst
    def test_idw_radiation_transform_from_set_to_grid(self):
        """
        Test IDW interpolation transforms wind_speed time-series according to time-axis and range.

        """
        idw_p = api.IDWParameter()
        self.assertEqual(idw_p.max_distance, 200000)
        self.assertEqual(idw_p.max_members, 10)
        fx = lambda z : [15 for x in range(self.n)]
        arome_grid = self._create_geo_radiation_grid(self.nx, self.ny, self.dx_arome, fx)
        dest_grid_points = self._create_geo_point_grid(self.mnx, self.mny, self.dx_model)
        ta = api.TimeAxisFixedDeltaT(self.t, self.d * 3, int(self.n / 3))
        radiation_slope_factors = api.DoubleVector()
        radiation_slope_factors[:] = [ 0.9 for i in range(len(dest_grid_points))]
        dest_grid = api.idw_radiation(arome_grid, dest_grid_points, ta, idw_p, radiation_slope_factors)
        self.assertIsNotNone(dest_grid)
        self.assertEqual(len(dest_grid), self.mnx * self.mny)
Exemple #17
0
 def test_merge_points(self):
     a=api.TimeSeries()  # a empty at beginning, we allow that.
     tb=api.TimeAxis(0, 1, 5)
     b=api.TimeSeries(tb, values=api.DoubleVector([1.0, -1.0, 2.0, 3.0, 4.0]), point_fx=api.POINT_AVERAGE_VALUE)
     a.merge_points(b)  # now a should equal b
     c=api.TimeSeries(api.TimeAxis(api.UtcTimeVector([3, 10, 11]), t_end=12), fill_value=9.0, point_fx=api.POINT_AVERAGE_VALUE)
     a.merge_points(c)  # now a should have new values for t=3, plus new time-points 11 and 12
     self.assertEqual(len(a), 7)
     assert_array_almost_equal(a.values.to_numpy(), np.array([1.0, -1.0, 2.0, 9.0, 4.0, 9.0, 9.0]))
     assert_array_almost_equal(a.time_axis.time_points, np.array([0,1,2,3,4,10,11,12]))
     xa= api.TimeSeries("some_unbound_ts")
     xa.merge_points(a)  # now it should be bound, and it's values are from a
     self.assertEqual(len(xa), 7)
     assert_array_almost_equal(xa.values.to_numpy(), np.array([1.0, -1.0, 2.0, 9.0, 4.0, 9.0, 9.0]))
     assert_array_almost_equal(xa.time_axis.time_points, np.array([0,1,2,3,4,10,11,12]))
     d=api.TimeSeries(api.TimeAxis(api.UtcTimeVector([3, 10, 11]), t_end=12), fill_value=10.0, point_fx=api.POINT_AVERAGE_VALUE)
     xa.merge_points(d)  #now that xa is bound, also check we get updated
     self.assertEqual(len(xa), 7)
     assert_array_almost_equal(xa.values.to_numpy(), np.array([1.0, -1.0, 2.0, 10.0, 4.0, 10.0, 10.0]))
     assert_array_almost_equal(xa.time_axis.time_points, np.array([0, 1, 2, 3, 4, 10, 11, 12]))
radcaly24 = api.RadiationCalculator(radparamy)
radcaly3 = api.RadiationCalculator(radparamy)
radresy = api.RadiationResponse()
radresy1 = api.RadiationResponse()
radresy24 = api.RadiationResponse()
radresy3 = api.RadiationResponse()

# we now can simply run the routine step by step:
try:
    del net_rad
    del ra_rad
    del rah_rad
except:
    pass

net_rad = api.DoubleVector()
swcalc_step1 = api.DoubleVector()
ra_rad = api.DoubleVector()
ra_rad1 = api.DoubleVector()
rah_rad = api.DoubleVector()
rat_rad = api.DoubleVector()
declin_arr = api.DoubleVector()
radtheorint_arr = api.DoubleVector()
swcalc_step24 = api.DoubleVector()
radcalc_step24 = api.DoubleVector()

swcalc_step3 = api.DoubleVector()
radcalc_step3 = api.DoubleVector()

i = 0
j = 1
Exemple #19
0
def run_radiation(latitude_deg, slope_deg, aspect_deg, elevation, albedo, turbidity, temperature, rhumidity, flag = 'instant', method='dingman'):

    """Module creates shyft radiation model with different timesteps and run it for a defined period of time (1 year with 24-hours averaging) """

    import numpy as np
    import math

    from shyft import api


    # single method test

    # here I will try to reproduce the Fig.1b from Allen2006 (reference)
    utc = api.Calendar()

    n = 365 # nr of time steps: 1 year, daily data
    t_start = utc.time(2002, 1, 1) # starting at the beginning of the year 1970

    # converting station data
    tempP1 = temperature # [degC], real data should be used
    rhP1 = rhumidity #[%], real data should be used
    rsm = 0.0

    radparam = api.RadiationParameter(albedo,turbidity)
    radcal_inst = api.RadiationCalculator(radparam)
    radcal_1h = api.RadiationCalculator(radparam)
    radcal_24h = api.RadiationCalculator(radparam)
    radcal_3h = api.RadiationCalculator(radparam)
    radres_inst = api.RadiationResponse()
    radres_1h = api.RadiationResponse()
    radres_24h = api.RadiationResponse()
    radres_3h = api.RadiationResponse()

    rv_rso = [] # clear-sky radiation, result vector
    rv_ra = [] # extraterrestrial radiation, result vector
    rv_net = [] # net radiation
    rv_net_sw = [] #net short-wave
    rv_net_lw = [] #net long-wave

    dayi = 0
    doy = api.DoubleVector()
    # running 24-h timestep
    step = api.deltahours(24)
    tadays = api.TimeAxis(t_start, step, n + 1)  # days
    k = 1
    while (k <= n):
        doy.append(dayi)
        k += 1
        dayi += 1

    if flag=='24-hour':
        dayi = 0
        doy = api.DoubleVector()

        # running 24-h timestep
        step = api.deltahours(24)
        tadays = api.TimeAxis(t_start, step, n+1)  # days
        k = 1
        while (k<=n):
            time1 = tadays.time(k-1)
            if method=='dingman':
                radcal_24h.net_radiation_step(radres_24h, latitude_deg, time1, step, slope_deg, aspect_deg,
                                                      tempP1, rhP1, elevation, rsm)
            else:
                radcal_24h.net_radiation_step_asce_st(radres_24h, latitude_deg, time1, step, slope_deg, aspect_deg, tempP1, rhP1, elevation, rsm)
            rv_rso.append(radres_24h.sw_t)
            rv_ra.append(radres_24h.ra)
            rv_net.append(radres_24h.net)
            rv_net_sw.append(radres_24h.net_sw)
            rv_net_lw.append(radres_24h.net_lw)
            # print(radres_24h.ra)
            doy.append(dayi)
            k+=1
            dayi += 1
        # doy.append(dayi)
    elif flag=='3-hour':

        # running 3h timestep
        step = api.deltahours(3)
        ta3 = api.TimeAxis(t_start, step, n * 8)  # hours, 1h timestep
        rso_3h = [] #clear-sky radiation
        ra_3h = [] # extraterrestrial radiation
        net_sw_3h = []
        net_lw_3h = []
        net_3h = []
        k = 1
        while (k<n*8):
            time0 = ta3.time(k-1)
            if method=='dingman':
                radcal_3h.net_radiation_step(radres_3h, latitude_deg, time0, step, slope_deg, aspect_deg, tempP1,
                                                    rhP1, elevation, rsm)
            else:
                radcal_3h.net_radiation_step_asce_st(radres_3h, latitude_deg, time0, step, slope_deg, aspect_deg, tempP1, rhP1, elevation, rsm)
            rso_3h.append(radres_3h.sw_t)
            ra_3h.append(radres_3h.ra)
            net_sw_3h.append(radres_3h.net_sw)
            net_lw_3h.append(radres_3h.net_lw)
            net_3h.append(radres_3h.net)
            k+=1
        rv_rso = [sum(rso_3h[i:i + 8]) for i in range(0, len(rso_3h), 8)]
        rv_ra = [sum(ra_3h[i:i + 8]) for i in range(0, len(ra_3h), 8)]
        rv_net_sw = [sum(net_sw_3h[i:i + 8]) for i in range(0, len(net_sw_3h), 8)]
        rv_net_lw = [sum(net_lw_3h[i:i + 8])/8 for i in range(0, len(net_lw_3h), 8)]
        rv_net = [sum(net_3h[i:i + 8]) for i in range(0, len(net_3h), 8)]
    elif flag=='1-hour':
        # runing 1h timestep
        step = api.deltahours(1)
        ta = api.TimeAxis(t_start, step, n * 24)  # hours, 1h timestep
        rso_1h = []
        ra_1h = []
        net_sw_1h = []
        net_lw_1h = []
        net_1h = []
        k = 1
        while (k<n*24):
            time1 = ta.time(k-1)
            if method=='dingman':
                radcal_1h.net_radiation_step(radres_1h, latitude_deg, time1, step, slope_deg, aspect_deg,
                                                     tempP1, rhP1, elevation, rsm)
            else:
                radcal_1h.net_radiation_step_asce_st(radres_1h, latitude_deg, time1, step, slope_deg, aspect_deg, tempP1, rhP1, elevation,rsm)
            rso_1h.append(radres_1h.sw_t)
            ra_1h.append(radres_1h.ra)
            net_sw_1h.append(radres_1h.net_sw)
            net_lw_1h.append(radres_1h.net_lw)
            net_1h.append(radres_1h.net)
            k += 1
        rv_rso = [sum(rso_1h[i:i + 24]) for i in range(0, len(rso_1h), 24)]
        rv_ra = [sum(ra_1h[i:i + 24]) for i in range(0, len(ra_1h), 24)]
        rv_net_sw = [sum(net_sw_1h[i:i + 24]) for i in range(0, len(net_sw_1h), 24)]
        rv_net_lw = [sum(net_lw_1h[i:i + 24])/24 for i in range(0, len(net_lw_1h), 24)]
        rv_net = [sum(net_1h[i:i + 24]) for i in range(0, len(net_1h), 24)]
    elif flag=='instant':
        # running instantaneous with dmin timstep
        minutes = 60
        dmin = 1
        step = api.deltaminutes(dmin)
        tamin = api.TimeAxis(t_start,step , n * 24 * minutes)
        rso_inst = []
        ra_inst = []
        net_sw_inst = []
        net_lw_inst = []
        net_inst = []
        doy1 = []
        k = 0
        while (k < n*24*minutes):
            timemin = tamin.time(k)
            radcal_inst.net_radiation(radres_inst, latitude_deg, timemin, slope_deg, aspect_deg, tempP1, rhP1,
                                      elevation, rsm)
            rso_inst.append(radres_inst.sw_t)
            ra_inst.append(radres_inst.ra)
            net_sw_inst.append(radres_inst.net_sw)
            net_lw_inst.append(radres_inst.net_lw)
            net_inst.append(radres_inst.net)
            doy1.append(k)
            k += 1
        rv_rso = [sum(rso_inst[i:i+24*minutes])/(24*minutes) for i in range(0,len(rso_inst),24*minutes)]
        rv_ra = [sum(ra_inst[i:i + 24 * minutes]) /(24 * minutes) for i in range(0, len(ra_inst), 24 * minutes)]
        rv_net_sw = [sum(net_sw_inst[i:i + 24*minutes])/(24*minutes) for i in range(0, len(net_sw_inst), 24*minutes)]
        rv_net_lw = [sum(net_lw_inst[i:i + 24*minutes])/(24*minutes) for i in range(0, len(net_lw_inst), 24*minutes)]
        rv_net = [sum(net_inst[i:i + 24*minutes])/(24*minutes) for i in range(0, len(net_inst), 24*minutes)]
    else:
        return 'Nothing todo. Please, specify timestep'



    return doy, rv_ra, rv_rso, rv_net_sw, rv_net_lw, rv_net
    def test_create_TargetSpecificationPts(self):
        t = api.TargetSpecificationPts()
        t.scale_factor = 1.0
        t.calc_mode = api.NASH_SUTCLIFFE
        t.calc_mode = api.KLING_GUPTA
        t.calc_mode = api.ABS_DIFF
        t.calc_mode = api.RMSE
        t.s_r = 1.0  # KGEs scale-factors
        t.s_a = 2.0
        t.s_b = 3.0
        self.assertIsNotNone(t.uid)
        t.uid = 'test'
        self.assertEqual(t.uid, 'test')
        self.assertAlmostEqual(t.scale_factor, 1.0)
        # create a ts with some points
        cal = api.Calendar()
        start = cal.time(2015, 1, 1, 0, 0, 0)
        dt = api.deltahours(1)
        tsf = api.TsFactory()
        times = api.UtcTimeVector()
        times.push_back(start + 1 * dt)
        times.push_back(start + 3 * dt)
        times.push_back(start + 4 * dt)

        values = api.DoubleVector()
        values.push_back(1.0)
        values.push_back(3.0)
        values.push_back(np.nan)
        tsp = tsf.create_time_point_ts(api.UtcPeriod(start, start + 24 * dt),
                                       times, values)
        # convert it from a time-point ts( as returned from current smgrepository) to a fixed interval with timeaxis, needed by calibration
        tst = api.TsTransform()
        tsa = tst.to_average(start, dt, 24, tsp)
        # tsa2 = tst.to_average(start,dt,24,tsp,False)
        # tsa_staircase = tst.to_average_staircase(start,dt,24,tsp,False) # nans infects the complete interval to nan
        # tsa_staircase2 = tst.to_average_staircase(start,dt,24,tsp,True) # skip nans, nans are 0
        # stuff it into the target spec.
        # also show how to specify snow-calibration
        cids = api.IntVector([0, 2, 3])
        t2 = api.TargetSpecificationPts(tsa, cids, 0.7, api.KLING_GUPTA, 1.0,
                                        1.0, 1.0, api.SNOW_COVERED_AREA,
                                        'test_uid')
        self.assertEqual(t2.uid, 'test_uid')
        t2.catchment_property = api.SNOW_WATER_EQUIVALENT
        self.assertEqual(t2.catchment_property, api.SNOW_WATER_EQUIVALENT)
        t2.catchment_property = api.CELL_CHARGE
        self.assertEqual(t2.catchment_property, api.CELL_CHARGE)
        self.assertIsNotNone(t2.catchment_indexes)
        for i in range(len(cids)):
            self.assertEqual(cids[i], t2.catchment_indexes[i])
        t.ts = api.TimeSeries(tsa)  # target spec is now a regular TimeSeries
        tv = api.TargetSpecificationVector()
        tv[:] = [t, t2]
        # now verify we got something ok
        self.assertEqual(2, tv.size())
        self.assertAlmostEqual(tv[0].ts.value(1),
                               1.5)  # average value 0..1 ->0.5
        self.assertAlmostEqual(tv[0].ts.value(2),
                               2.5)  # average value 0..1 ->0.5
        # self.assertAlmostEqual(tv[0].ts.value(3), 3.0)  # original flat out at end, but now:
        self.assertTrue(math.isnan(
            tv[0].ts.value(3)))  # strictly linear between points.
        # and that the target vector now have its own copy of ts
        tsa.set(1, 3.0)
        self.assertAlmostEqual(
            tv[0].ts.value(1),
            1.5)  # make sure the ts passed onto target spec, is a copy
        self.assertAlmostEqual(tsa.value(1),
                               3.0)  # and that we really did change the source
        # Create a clone of target specification vector
        tv2 = api.TargetSpecificationVector(tv)
        self.assertEqual(2, tv2.size())
        self.assertAlmostEqual(tv2[0].ts.value(1),
                               1.5)  # average value 0..1 ->0.5
        self.assertAlmostEqual(tv2[0].ts.value(2),
                               2.5)  # average value 0..1 ->0.5
        self.assertTrue(math.isnan(
            tv2[0].ts.value(3)))  # average value 0..1 ->0.5
        tv2[0].scale_factor = 10.0
        self.assertAlmostEqual(tv[0].scale_factor, 1.0)
        self.assertAlmostEqual(tv2[0].scale_factor, 10.0)
        # test we can create from breakpoint time-series
        ts_bp = api.TimeSeries(api.TimeAxis(api.UtcTimeVector([0, 25, 20]),
                                            30),
                               fill_value=2.0,
                               point_fx=api.POINT_AVERAGE_VALUE)

        tspec_bp = api.TargetSpecificationPts(ts_bp, cids, 0.7,
                                              api.KLING_GUPTA, 1.0, 1.0, 1.0,
                                              api.CELL_CHARGE, 'test_uid')
        self.assertIsNotNone(tspec_bp)
Exemple #21
0
 def p_max(self):
     return api.DoubleVector([
         self._config.calibration_parameters[name]['max']
         for name in self.calib_param_names
     ])
Exemple #22
0
    def _call_qm_old(self, prep_fcst_lst, weights, geo_points, ta,
                     input_source_types, nb_prior_scenarios):

        # TODO: Extend handling to cover all cases and send out warnings if interpolation period is modified
        # Check ta against interpolation start and end times
        # Simple logic for time being, should be refined for the overlap cases
        ta_start = ta.time(0)
        ta_end = ta.time(ta.size() - 1)  # start of last time step
        interp_start = ta_start + api.deltahours(self.qm_interp_hours[0])
        interp_end = ta_start + api.deltahours(self.qm_interp_hours[1])
        if interp_start > ta_end:
            interp_start = api.no_utctime
            interp_end = api.no_utctime
        if interp_end > ta_end:
            interp_end = ta_end

        # Re-organize data before sending to api.quantile_map_forecast. For each source type and geo_point, group
        # forecasts as TsVectorSets, send to api.quantile_map_forecast and return results as ensemble of source-keyed
        # dictionaries of  geo-ts
        # First re-organize weights - one weight per TVS.
        weight_sets = api.DoubleVector([w for ws in weights for w in ws])

        # New version
        # results = [{} for i in range(nb_prior_scenarios)]
        # for src in input_source_types:
        #     qm_scenarios = []
        #     for geo_pt_idx, geo_pt in enumerate(geo_points):
        #         forecast_sets = api.TsVectorSet()
        #         for i, fcst_group in enumerate(prep_fcst_lst) :
        #            for j, forecast in enumerate(fcst_group):
        #                 scenarios = api.TsVector()
        #                 for member in forecast:
        #                     scenarios.append(member[src][geo_pt_idx].ts)
        #                 forecast_sets.append(scenarios)
        #                 if i == self.repo_prior_idx and j==0:
        #                     prior_data = scenarios
        #                     # TODO: read prior if repo_prior_idx is None
        #
        #         qm_scenarios.append(api.quantile_map_forecast(forecast_sets, weight_sets, prior_data, ta,
        #                                                  interp_start, interp_end, True))
        #
        #     # Alternative: convert to array to enable slicing
        #     # arr = np.array(qm_scenarios)
        #
        #     # Now organize to desired output format: ensemble of source-keyed dictionaries of  geo-ts
        #     for i in range(0,nb_prior_scenarios):
        #     # source_dict = {}
        #         # ts_vct = arr[:, i]
        #         ts_vct = [x[i] for x in qm_scenarios]
        #         vct = self.source_vector_map[src]()
        #         [vct.append(self.source_type_map[src](geo_pt, ts)) for geo_pt, ts in zip(geo_points, ts_vct)]
        #         # Alternatives:
        #         # vct[:] = [self.source_type_map[src](geo_pt, ts) for geo_pt, ts in zip(geo_points, ts_vct)]
        #         # vct = self.source_vector_map[src]([self.source_type_map[src](geo_pt, ts) for geo_pt, ts in zip(geo_points, ts_vct)])
        #         results[i][src] = vct
        # return results

        # Old version
        weight_sets = api.DoubleVector([w for ws in weights for w in ws])
        dict = {}
        for src in input_source_types:
            qm_scenarios = []
            for geo_pt_idx, geo_pt in enumerate(geo_points):
                forecast_sets = api.TsVectorSet()
                for i, fcst_group in enumerate(prep_fcst_lst):
                    for j, forecast in enumerate(fcst_group):
                        scenarios = api.TsVector()
                        for member in forecast:
                            scenarios.append(member[src][geo_pt_idx].ts)
                        forecast_sets.append(scenarios)

                        # TODO: handle prior similarly if repo_prior_idx is None
                        if i == self.repo_prior_idx and j == 0:
                            prior_data = scenarios

                qm_scenarios.append(
                    api.quantile_map_forecast(forecast_sets, weight_sets,
                                              prior_data, ta, interp_start,
                                              interp_end, True))
            dict[src] = np.array(qm_scenarios)

        # Now organize to desired output format: ensenble of source-keyed dictionaries of  geo-ts
        # TODO: write function to extract info about prior like number of scenarios
        nb_prior_scenarios = dict[input_source_types[0]].shape[1]
        results = []
        for i in range(0, nb_prior_scenarios):
            source_dict = {}
            for src in input_source_types:
                ts_vct = dict[src][:, i]
                vct = self.source_vector_map[src]()
                [
                    vct.append(self.source_type_map[src](geo_pt, ts))
                    for geo_pt, ts in zip(geo_points, ts_vct)
                ]
                # Alternatives:
                # vct[:] = [self.source_type_map[src](geo_pt, ts) for geo_pt, ts in zip(geo_points, ts_vct)]
                # vct = self.source_vector_map[src]([self.source_type_map[src](geo_pt, ts) for geo_pt, ts in zip(geo_points, ts_vct)])
                source_dict[src] = vct
            results.append(source_dict)
        return results
 def _create_shyft_ts(self):
     b = 946684800  # 2000.01.01 00:00:00
     h = 3600  # One hour in seconds
     v = np.array([1.0, 2.0, 3.0])
     return api.TsFactory().create_point_ts(len(v), b, h,
                                            api.DoubleVector(v))