Пример #1
0
    def test_rating_curve_ts(self):
        t0=api.utctime_now()
        ta=api.TimeAxis(t0, api.deltaminutes(30), 48*2)
        data=np.linspace(0, 10, ta.size())
        ts=api.TimeSeries(ta, data, api.POINT_INSTANT_VALUE)

        rcf1=api.RatingCurveFunction()
        rcf1.add_segment(0, 1, 0, 1)
        rcf1.add_segment(api.RatingCurveSegment(5, 2, 0, 1))

        rcf2=api.RatingCurveFunction()
        rcf2.add_segment(0, 3, 0, 1)
        rcf2.add_segment(api.RatingCurveSegment(8, 4, 0, 1))

        rcp=api.RatingCurveParameters()
        rcp.add_curve(t0, rcf1)
        rcp.add_curve(t0 + api.deltahours(24), rcf2)

        sts=api.TimeSeries("a")
        rcsts=sts.rating_curve(rcp)

        rcsts_blob=rcsts.serialize()
        rcsts_2=api.TimeSeries.deserialize(rcsts_blob)

        self.assertTrue(rcsts_2.needs_bind())
        fbi=rcsts_2.find_ts_bind_info()
        self.assertEqual(len(fbi), 1)
        fbi[0].ts.bind(ts)
        rcsts_2.bind_done()
        self.assertFalse(rcsts_2.needs_bind())

        self.assertEqual(len(rcsts_2), len(ts))
        for i in range(rcsts_2.size()):
            expected=(1*ts.get(i).v if ts.get(i).v < 5 else 2*ts.get(i).v) if ts.get(i).t < t0 + api.deltahours(24) else (
                3*ts.get(i).v if ts.get(i).v < 8 else 4*ts.get(i).v)
            self.assertEqual(rcsts_2.get(i).t, ts.get(i).t)
            self.assertEqual(rcsts_2.get(i).v, expected)
Пример #2
0
    def test_percentiles_with_min_max_extremes(self):
        """ the percentiles function now also supports picking out the min-max peak value
            within each interval.
            Setup test-data so that we have a well known percentile result,
            but also have peak-values within the interval that we can
            verify.
            We let hour ts 0..9 have values 0..9 constant 24*10 days
               then modify ts[1], every day first  value to a peak min value equal to - day_no*1
                                  every day second value to a peak max value equal to + day_no*1
                                  every day 3rd    value to a nan value
            ts[1] should then have same average value for each day (so same percentile)
                                            but min-max extreme should be equal to +- day_no*1
        """
        c=api.Calendar()
        t0=c.time(2016, 1, 1)
        dt=api.deltahours(1)
        n=240
        ta=api.TimeAxis(t0, dt, n)
        timeseries=api.TsVector()
        p_fx=api.point_interpretation_policy.POINT_AVERAGE_VALUE
        for i in range(10):
            timeseries.append(api.TimeSeries(ta=ta, fill_value=i, point_fx=p_fx))

        ts=timeseries[1]  # pick this one to insert min/max extremes
        for i in range(0, 240, 24):
            ts.set(i + 0, 1.0 - 100*i/24.0)
            ts.set(i + 1, 1.0 + 100*i/24.0)  # notice that when i==0, this gives 1.0
            ts.set(i + 2, float('nan'))  # also put in a nan, just to verify it is ignored during average processing

        wanted_percentiles=api.IntVector([api.statistics_property.MIN_EXTREME,
                                          0, 10, 50,
                                          api.statistics_property.AVERAGE,
                                          70, 100,
                                          api.statistics_property.MAX_EXTREME])
        ta_day=api.TimeAxis(t0, dt*24, n//24)
        percentiles=api.percentiles(timeseries, ta_day, wanted_percentiles)
        for i in range(len(ta_day)):
            if i == 0:  # first timestep, the min/max extremes are picked from 0'th and 9'th ts.
                self.assertAlmostEqual(0.0, percentiles[0].value(i), 3, "min-extreme ")
                self.assertAlmostEqual(9.0, percentiles[7].value(i), 3, "min-extreme ")
            else:
                self.assertAlmostEqual(1.0 - 100.0*i*24.0/24.0, percentiles[0].value(i), 3, "min-extreme ")
                self.assertAlmostEqual(1.0 + 100.0*i*24.0/24.0, percentiles[7].value(i), 3, "max-extreme")
            self.assertAlmostEqual(0.0, percentiles[1].value(i), 3, "  0-percentile")
            self.assertAlmostEqual(0.9, percentiles[2].value(i), 3, " 10-percentile")
            self.assertAlmostEqual(4.5, percentiles[3].value(i), 3, " 50-percentile")
            self.assertAlmostEqual(4.5, percentiles[4].value(i), 3, "   -average")
            self.assertAlmostEqual(6.3, percentiles[5].value(i), 3, " 70-percentile")
            self.assertAlmostEqual(9.0, percentiles[6].value(i), 3, "100-percentile")
Пример #3
0
    def test_accumulate(self):
        c=api.Calendar()
        t0=c.time(2016, 1, 1)
        dt=api.deltahours(1)
        n=240
        ta=api.TimeAxis(t0, dt, n)
        ts0=api.TimeSeries(ta=ta, fill_value=1.0, point_fx=api.point_interpretation_policy.POINT_AVERAGE_VALUE)
        tsa=1.0*api.TimeSeries('a') + 0.0  # an expression, that need bind

        ts1=tsa.accumulate(ta)  # ok, maybe we should make method that does time-axis implicit ?
        self.assertTrue(ts1.needs_bind())
        ts1_blob=ts1.serialize()
        ts1=api.TimeSeries.deserialize(ts1_blob)
        tsb=ts1.find_ts_bind_info()
        self.assertEqual(len(tsb), 1)
        tsb[0].ts.bind(ts0)
        ts1.bind_done()
        self.assertFalse(ts1.needs_bind())

        ts1_values=ts1.values
        for i in range(n):
            expected_value=i*dt*1.0
            self.assertAlmostEqual(expected_value, ts1.value(i), 3, "expect integral f(t)*dt")
            self.assertAlmostEqual(expected_value, ts1_values[i], 3, "expect value vector equal as well")
 def test_create_source_vector_does_not_leak(self):
     n = 365 * 24 * 1  # 1st checkpoint memory here,
     for i in range(10):
         v = api.TemperatureSourceVector([
             api.TemperatureSource(
                 api.GeoPoint(0.0, 1.0, 2.0),
                 api.TimeSeries(api.TimeAxis(api.time(0), api.time(3600),
                                             n),
                                fill_value=float(x),
                                point_fx=api.POINT_AVERAGE_VALUE))
             for x in range(n)
         ])
         self.assertIsNotNone(v)
         del v
     pass  # 2nd mem check here, should be approx same as first checkpoint
Пример #5
0
 def _create_target_specvect(self):
     print("Creating TargetSpecificationVector...")
     tv = api.TargetSpecificationVector()
     tst = api.TsTransform()
     cid_map = self.region_model.catchment_id_map
     for repo in self.target_repo:
         tsp = repo['repository'].read(
             [ts_info['uid'] for ts_info in repo['1D_timeseries']],
             self.time_axis.total_period())
         for ts_info in repo['1D_timeseries']:
             if np.count_nonzero(np.in1d(cid_map,
                                         ts_info['catch_id'])) != len(
                                             ts_info['catch_id']):
                 raise ConfigSimulatorError(
                     "Catchment ID {} for target series {} not found.".
                     format(
                         ','.join([
                             str(val) for val in [
                                 i for i in ts_info['catch_id']
                                 if i not in cid_map
                             ]
                         ]), ts_info['uid']))
             period = api.UtcPeriod(
                 ts_info['start_datetime'], ts_info['start_datetime'] +
                 ts_info['number_of_steps'] * ts_info['run_time_step'])
             if not self.time_axis.total_period().contains(period):
                 raise ConfigSimulatorError(
                     "Period {} for target series {} is not within the full simulation period {}."
                     .format(period.to_string(), ts_info['uid'],
                             self.time_axis.total_period().to_string()))
             #tsp = repo['repository'].read([ts_info['uid']], period)[ts_info['uid']]
             t = api.TargetSpecificationPts()
             t.uid = ts_info['uid']
             t.catchment_indexes = api.IntVector(ts_info['catch_id'])
             t.scale_factor = ts_info['weight']
             t.calc_mode = self.obj_funcs[ts_info['obj_func']['name']]
             [
                 setattr(t, nm, ts_info['obj_func']['scaling_factors'][k])
                 for nm, k in zip(['s_r', 's_a', 's_b'],
                                  ['s_corr', 's_var', 's_bias'])
             ]
             t.ts = api.TimeSeries(
                 tst.to_average(ts_info['start_datetime'],
                                ts_info['run_time_step'],
                                ts_info['number_of_steps'],
                                tsp[ts_info['uid']]))
             tv.append(t)
     return tv
Пример #6
0
    def test_compute_running_bias(self):
        """
        Verify that if we feed forecast[n] and observation into the bias-predictor
        it will create the estimated bias offsets
        """
        f = api.KalmanFilter()
        bp = api.KalmanBiasPredictor(f)
        self.assertIsNotNone(bp)
        self.assertEqual(bp.filter.parameter.n_daily_observations, 8)

        n_fc = 1
        utc = api.Calendar()
        t0 = utc.time(2016, 1, 1)
        dt = api.deltahours(1)
        n_fc_steps = 24 * 10  # 10 days history
        fc_dt = api.deltahours(6)
        fc_fx = lambda time_axis: self._create_fc_values(
            time_axis, 2.0)  # just return a constant 2.0 deg C for now

        n_obs = n_fc_steps
        obs_ta = api.TimeAxis(t0, dt, n_obs)
        obs_ts = api.TimeSeries(obs_ta,
                                fill_value=0.0,
                                point_fx=api.POINT_INSTANT_VALUE)
        kalman_dt = api.deltahours(
            3)  # suitable average for prediction temperature
        kalman_ta = api.TimeAxis(t0, kalman_dt, n_obs // 3)
        fc_ts = self._create_forecast_set(n_fc, t0, dt, n_fc_steps, fc_dt,
                                          fc_fx)[0]
        bias_ts = bp.compute_running_bias(
            fc_ts, obs_ts,
            kalman_ta)  # also verify we can feed in a pure TsVector
        bias_pattern = bp.state.x  # the bp.state.x is now the best estimates fo the bias between fc and observation
        self.assertEqual(len(bias_pattern), 8)
        for i in range(len(bias_pattern)):
            self.assertLess(abs(bias_pattern[i] - 2.0),
                            0.2)  # bias should iterate to approx 2.0 degC now.
        # and...:
        for i in range(8):
            self.assertAlmostEqual(bias_ts.value(i),
                                   0.0)  # expect 0.0 for the first day

        for i in range(8):
            self.assertLess(abs(bias_ts.value(bias_ts.size() - i - 1) - 2.0),
                            0.2)  # last part should be 2.0 deg.C
Пример #7
0
    def test_bias_predictor(self):
        """
        Verify that if we feed forecast[n] and observation into the bias-predictor
        it will create the estimated bias offsets
        """
        f = api.KalmanFilter()
        bp = api.KalmanBiasPredictor(f)
        self.assertIsNotNone(bp)
        self.assertEqual(bp.filter.parameter.n_daily_observations, 8)

        n_fc = 8
        utc = api.Calendar()
        t0 = utc.time(2016, 1, 1)
        dt = api.deltahours(1)
        n_fc_steps = 36  # e.g. like arome 36 hours
        fc_dt = api.deltahours(6)
        fc_fx = lambda time_axis: self._create_fc_values(
            time_axis, 2.0)  # just return a constant 2.0 deg C for now
        fc_set = self._create_geo_forecast_set(n_fc, t0, dt, n_fc_steps, fc_dt,
                                               fc_fx)
        n_obs = 24
        obs_ta = api.TimeAxis(t0, dt, n_obs)
        obs_ts = api.TimeSeries(obs_ta,
                                fill_value=0.0,
                                point_fx=api.POINT_INSTANT_VALUE)
        kalman_dt = api.deltahours(
            3)  # suitable average for prediction temperature
        kalman_ta = api.TimeAxis(t0, kalman_dt, 8)
        bp.update_with_forecast(
            fc_set, obs_ts, kalman_ta
        )  # here we feed in forecast-set and observation into kalman
        fc_setv = self._create_forecast_set(n_fc, t0, dt, n_fc_steps, fc_dt,
                                            fc_fx)
        bp.update_with_forecast(
            fc_setv, obs_ts,
            kalman_ta)  # also verify we can feed in a pure TsVector
        bias_pattern = bp.state.x  # the bp.state.x is now the best estimates fo the bias between fc and observation
        self.assertEqual(len(bias_pattern), 8)
        for i in range(len(bias_pattern)):
            self.assertLess(abs(bias_pattern[i] - 2.0),
                            0.2)  # bias should iterate to approx 2.0 degC now.
Пример #8
0
    def test_partition_by(self):
        """
        verify/demo exposure of the .partition_by function that can
        be used to produce yearly percentiles statistics for long historical
        time-series

        """
        c=api.Calendar()
        t0=c.time(1930, 9, 1)
        dt=api.deltahours(1)
        n=c.diff_units(t0, c.time(2016, 9, 1), dt)

        ta=api.TimeAxis(t0, dt, n)
        pattern_values=api.DoubleVector.from_numpy(np.arange(len(ta)))  # increasing values

        src_ts=api.TimeSeries(ta=ta, values=pattern_values,
                              point_fx=api.point_interpretation_policy.POINT_AVERAGE_VALUE)

        partition_t0=c.time(2016, 9, 1)
        n_partitions=80
        partition_interval=api.Calendar.YEAR
        # get back TsVector,
        # where all TsVector[i].index_of(partition_t0)
        # is equal to the index ix for which the TsVector[i].value(ix) correspond to start value of that particular partition.
        ts_partitions=src_ts.partition_by(c, t0, partition_interval, n_partitions, partition_t0)
        self.assertEqual(len(ts_partitions), n_partitions)
        ty=t0
        for ts in ts_partitions:
            ix=ts.index_of(partition_t0)
            vix=ts.value(ix)
            expected_value=c.diff_units(t0, ty, dt)
            self.assertEqual(vix, expected_value)
            ty=c.add(ty, partition_interval, 1)

        # Now finally, try percentiles on the partitions
        wanted_percentiles=[0, 10, 25, -1, 50, 75, 90, 100]
        ta_percentiles=api.TimeAxis(partition_t0, api.deltahours(24), 365)
        percentiles=api.percentiles(ts_partitions, ta_percentiles, wanted_percentiles)
        self.assertEqual(len(percentiles), len(wanted_percentiles))
Пример #9
0
tempmean_dv = api.DoubleVector.from_numpy(ws_Tmean)
rhmean_dv = api.DoubleVector.from_numpy(ws_rhmean)

# The TimeSeries class has some powerfull funcionality (however, this is not subject of matter in here).
# For this reason, one needs to specify how the input data can be interpreted:
# - as instant point values at the time given (e.g. such as most observed temperatures), or
# - as average value of the period (e.g. such as most observed precipitation)
# This distinction can be specified by passing the respective "point_interpretation_policy",
# provided by the API:
instant = api.point_interpretation_policy.POINT_INSTANT_VALUE
average = api.point_interpretation_policy.POINT_AVERAGE_VALUE

# Finally, we create shyft time-series as follows:
# (Note: This step is not necessarily required to run the single methods.
#  We could also just work with the double vector objects and the time axis)
tempmax_ts = api.TimeSeries(tadays, tempmax_dv, point_fx=instant)
tempmin_ts = api.TimeSeries(tadays, tempmin_dv, point_fx=instant)
ea_ts = api.TimeSeries(tadays, ea_dv, point_fx=instant)
rs_ts = api.TimeSeries(tadays, rs_dv, point_fx=instant)
windspeed_ts = api.TimeSeries(tadays, windspeed_dv, point_fx=instant)

#recalculated inputs:
tempmean_ts = api.TimeSeries(tadays, tempmean_dv, point_fx=instant)
rhmean_ts = api.TimeSeries(tadays, rhmean_dv, point_fx=instant)

radp = api.RadiationParameter(0.26, 1.0)
radc = api.RadiationCalculator(radp)
radr = api.RadiationResponse()
# pmp=api.PenmanMonteithParameter(lai,height_ws,height_t)

pmp = api.PenmanMonteithParameter(height_veg, height_ws, height_t, rl, 1)
Пример #10
0
    def test_can_run_ordinary_kriging_from_observation_sites_to_1km_grid(self):
        """
        Somewhat more complex test, first do kriging of 1 timeseries out to grid (expect same values flat)
        then do kriging of 3 time-series out to the grid (expect different values, no real verification here since this is done elsewhere

        """
        # arrange the test with a btk_parameter, a source grid and a destination grid
        ok_parameter = api.OKParameter(
            c=1.0,
            a=10.0 * 1000.0,
            cov_type=api.OKCovarianceType.EXPONENTIAL,
            z_scale=1.0)
        fx = lambda z: api.DoubleVector.from_numpy(np.zeros(self.n))

        grid_1km_1 = self._create_geo_point_grid(self.mnx, self.mny,
                                                 self.dx_model)
        grid_1km_3 = self._create_geo_point_grid(self.mnx, self.mny,
                                                 self.dx_model)

        observation_sites = api.GeoPointSourceVector()
        ta_obs = api.TimeAxisFixedDeltaT(self.t, self.d * 3, int(self.n / 3))
        ta_grid = api.TimeAxisFixedDeltaT(self.t, self.d, self.n)
        point_fx = api.point_interpretation_policy.POINT_AVERAGE_VALUE
        ts_site_1 = api.TimeSeries(
            ta_obs,
            values=api.DoubleVector.from_numpy((1.0) + 0.1 * np.sin(
                np.arange(start=0, stop=ta_obs.size(), step=1) * 2 * np.pi /
                8.0 - np.pi / 2.0)),
            point_fx=point_fx)
        ts_site_2 = api.TimeSeries(
            ta_obs,
            values=api.DoubleVector.from_numpy((0.8) + 0.2 * np.sin(
                np.arange(start=0, stop=ta_obs.size(), step=1) * 2 * np.pi /
                8.0 - np.pi / 2.0)),
            point_fx=point_fx)
        ts_site_3 = api.TimeSeries(
            ta_obs,
            values=api.DoubleVector.from_numpy((1.2) + 0.1 * np.sin(
                np.arange(start=0, stop=ta_obs.size(), step=1) * 2 * np.pi /
                8.0 - np.pi / 2.0)),
            point_fx=point_fx)

        observation_sites.append(
            api.GeoPointSource(api.GeoPoint(50.0, 50.0, 5.0), ts_site_1))

        # act 1: just one time-series put into the system, should give same ts (true-averaged) in all the grid-1km_ts (which can be improved using std.gradient..)
        grid_1km_1ts = api.ordinary_kriging(observation_sites, grid_1km_1,
                                            ta_grid, ok_parameter)

        # assert 1:
        self.assertEqual(len(grid_1km_1ts), self.mnx * self.mny)
        expected_grid_1ts_values = ts_site_1.average(
            api.TimeAxis(ta_grid)).values.to_numpy()

        for gts in grid_1km_1ts:
            self.assertEqual(gts.ts.size(), ta_grid.size())
            self.assertTrue(
                np.allclose(expected_grid_1ts_values,
                            gts.ts.values.to_numpy()))

        observation_sites.append(
            api.GeoPointSource(api.GeoPoint(9000.0, 500.0, 500), ts_site_2))
        observation_sites.append(
            api.GeoPointSource(api.GeoPoint(9000.0, 12000.0, 1050.0),
                               ts_site_3))
        ok_parameter.cov_type = api.OKCovarianceType.GAUSSIAN  # just to switch covariance formula
        grid_1km_3ts = api.ordinary_kriging(observation_sites, grid_1km_3,
                                            ta_grid, ok_parameter)

        self.assertEqual(len(grid_1km_3ts), self.mnx * self.mny)

        for gts in grid_1km_3ts:
            self.assertEqual(gts.ts.size(), ta_grid.size())
            self.assertFalse(
                np.allclose(expected_grid_1ts_values,
                            gts.ts.values.to_numpy()))
Пример #11
0
    def test_can_run_bayesian_kriging_from_observation_sites_to_1km_grid(self):
        """
        Somewhat more complex test, first do kriging of 1 timeseries out to grid (expect same values flat)
        then do kriging of 3 time-series out to the grid (expect different values, no real verification here since this is done elsewhere

        """
        # arrange the test with a btk_parameter, a source grid and a destination grid
        btk_parameter = api.BTKParameter(temperature_gradient=-0.6,
                                         temperature_gradient_sd=0.25,
                                         sill=25.0,
                                         nugget=0.5,
                                         range=20000.0,
                                         zscale=20.0)
        fx = lambda z: api.DoubleVector.from_numpy(np.zeros(self.n))

        grid_1km_1 = self._create_geo_point_grid(self.mnx, self.mny,
                                                 self.dx_model)
        grid_1km_3 = self._create_geo_point_grid(self.mnx, self.mny,
                                                 self.dx_model)

        observation_sites = api.TemperatureSourceVector()
        ta_obs = api.TimeAxisFixedDeltaT(self.t, self.d * 3, int(self.n / 3))
        ta_grid = api.TimeAxisFixedDeltaT(self.t, self.d, self.n)
        point_fx = api.point_interpretation_policy.POINT_AVERAGE_VALUE
        ts_site_1 = api.TimeSeries(
            ta_obs,
            values=api.DoubleVector.from_numpy(
                (20.0 - 0.6 * 5.0 / 100) + 3.0 * np.sin(
                    np.arange(start=0, stop=ta_obs.size(), step=1) * 2 *
                    np.pi / 8.0 - np.pi / 2.0)),
            point_fx=point_fx)
        ts_site_2 = api.TimeSeries(
            ta_obs,
            values=api.DoubleVector.from_numpy(
                (20.0 - 0.6 * 500.0 / 100) + 3.0 * np.sin(
                    np.arange(start=0, stop=ta_obs.size(), step=1) * 2 *
                    np.pi / 8.0 - np.pi / 2.0)),
            point_fx=point_fx)
        ts_site_3 = api.TimeSeries(
            ta_obs,
            values=api.DoubleVector.from_numpy(
                (20.0 - 0.6 * 1050.0 / 100) + 3.0 * np.sin(
                    np.arange(start=0, stop=ta_obs.size(), step=1) * 2 *
                    np.pi / 8.0 - np.pi / 2.0)),
            point_fx=point_fx)

        observation_sites.append(
            api.TemperatureSource(api.GeoPoint(50.0, 50.0, 5.0), ts_site_1))

        # act 1: just one time-series put into the system, should give same ts (true-averaged) in all the grid-1km_ts (which can be improved using std.gradient..)
        grid_1km_1ts = api.bayesian_kriging_temperature(
            observation_sites, grid_1km_1, ta_grid, btk_parameter)

        # assert 1:
        self.assertEqual(len(grid_1km_1ts), self.mnx * self.mny)
        expected_grid_1ts_values = ts_site_1.average(
            api.TimeAxis(ta_grid)).values.to_numpy()

        for gts in grid_1km_1ts:
            self.assertEqual(gts.ts.size(), ta_grid.size())
            self.assertTrue(
                np.allclose(expected_grid_1ts_values,
                            gts.ts.values.to_numpy()))

        observation_sites.append(
            api.TemperatureSource(api.GeoPoint(9000.0, 500.0, 500), ts_site_2))
        observation_sites.append(
            api.TemperatureSource(api.GeoPoint(9000.0, 12000.0, 1050.0),
                                  ts_site_3))

        grid_1km_3ts = api.bayesian_kriging_temperature(
            observation_sites, grid_1km_3, ta_grid, btk_parameter)

        self.assertEqual(len(grid_1km_3ts), self.mnx * self.mny)

        for gts in grid_1km_3ts:
            self.assertEqual(gts.ts.size(), ta_grid.size())
            self.assertFalse(
                np.allclose(expected_grid_1ts_values,
                            gts.ts.values.to_numpy()))
Пример #12
0
    def test_create_TargetSpecificationPts(self):
        t = api.TargetSpecificationPts()
        t.scale_factor = 1.0
        t.calc_mode = api.NASH_SUTCLIFFE
        t.calc_mode = api.KLING_GUPTA
        t.calc_mode = api.ABS_DIFF
        t.calc_mode = api.RMSE
        t.s_r = 1.0  # KGEs scale-factors
        t.s_a = 2.0
        t.s_b = 3.0
        self.assertIsNotNone(t.uid)
        t.uid = 'test'
        self.assertEqual(t.uid, 'test')
        self.assertAlmostEqual(t.scale_factor, 1.0)
        # create a ts with some points
        cal = api.Calendar()
        start = cal.time(2015, 1, 1, 0, 0, 0)
        dt = api.deltahours(1)
        tsf = api.TsFactory()
        times = api.UtcTimeVector()
        times.push_back(start + 1 * dt)
        times.push_back(start + 3 * dt)
        times.push_back(start + 4 * dt)

        values = api.DoubleVector()
        values.push_back(1.0)
        values.push_back(3.0)
        values.push_back(np.nan)
        tsp = tsf.create_time_point_ts(api.UtcPeriod(start, start + 24 * dt),
                                       times, values)
        # convert it from a time-point ts( as returned from current smgrepository) to a fixed interval with timeaxis, needed by calibration
        tst = api.TsTransform()
        tsa = tst.to_average(start, dt, 24, tsp)
        # tsa2 = tst.to_average(start,dt,24,tsp,False)
        # tsa_staircase = tst.to_average_staircase(start,dt,24,tsp,False) # nans infects the complete interval to nan
        # tsa_staircase2 = tst.to_average_staircase(start,dt,24,tsp,True) # skip nans, nans are 0
        # stuff it into the target spec.
        # also show how to specify snow-calibration
        cids = api.IntVector([0, 2, 3])
        t2 = api.TargetSpecificationPts(tsa, cids, 0.7, api.KLING_GUPTA, 1.0,
                                        1.0, 1.0, api.SNOW_COVERED_AREA,
                                        'test_uid')
        self.assertEqual(t2.uid, 'test_uid')
        t2.catchment_property = api.SNOW_WATER_EQUIVALENT
        self.assertEqual(t2.catchment_property, api.SNOW_WATER_EQUIVALENT)
        t2.catchment_property = api.CELL_CHARGE
        self.assertEqual(t2.catchment_property, api.CELL_CHARGE)
        self.assertIsNotNone(t2.catchment_indexes)
        for i in range(len(cids)):
            self.assertEqual(cids[i], t2.catchment_indexes[i])
        t.ts = api.TimeSeries(tsa)  # target spec is now a regular TimeSeries
        tv = api.TargetSpecificationVector()
        tv[:] = [t, t2]
        # now verify we got something ok
        self.assertEqual(2, tv.size())
        self.assertAlmostEqual(tv[0].ts.value(1),
                               1.5)  # average value 0..1 ->0.5
        self.assertAlmostEqual(tv[0].ts.value(2),
                               2.5)  # average value 0..1 ->0.5
        # self.assertAlmostEqual(tv[0].ts.value(3), 3.0)  # original flat out at end, but now:
        self.assertTrue(math.isnan(
            tv[0].ts.value(3)))  # strictly linear between points.
        # and that the target vector now have its own copy of ts
        tsa.set(1, 3.0)
        self.assertAlmostEqual(
            tv[0].ts.value(1),
            1.5)  # make sure the ts passed onto target spec, is a copy
        self.assertAlmostEqual(tsa.value(1),
                               3.0)  # and that we really did change the source
        # Create a clone of target specification vector
        tv2 = api.TargetSpecificationVector(tv)
        self.assertEqual(2, tv2.size())
        self.assertAlmostEqual(tv2[0].ts.value(1),
                               1.5)  # average value 0..1 ->0.5
        self.assertAlmostEqual(tv2[0].ts.value(2),
                               2.5)  # average value 0..1 ->0.5
        self.assertTrue(math.isnan(
            tv2[0].ts.value(3)))  # average value 0..1 ->0.5
        tv2[0].scale_factor = 10.0
        self.assertAlmostEqual(tv[0].scale_factor, 1.0)
        self.assertAlmostEqual(tv2[0].scale_factor, 10.0)
        # test we can create from breakpoint time-series
        ts_bp = api.TimeSeries(api.TimeAxis(api.UtcTimeVector([0, 25, 20]),
                                            30),
                               fill_value=2.0,
                               point_fx=api.POINT_AVERAGE_VALUE)

        tspec_bp = api.TargetSpecificationPts(ts_bp, cids, 0.7,
                                              api.KLING_GUPTA, 1.0, 1.0, 1.0,
                                              api.CELL_CHARGE, 'test_uid')
        self.assertIsNotNone(tspec_bp)
Пример #13
0
    def test_ts_extend(self):
        t0=api.utctime_now()
        dt=api.deltahours(1)
        n=512
        ta_a=api.TimeAxisFixedDeltaT(t0, dt, 2*n)
        ta_b=api.TimeAxisFixedDeltaT(t0 + n*dt, dt, 2*n)
        ta_c=api.TimeAxisFixedDeltaT(t0 + 2*n*dt, dt, 2*n)
        ta_d=api.TimeAxisFixedDeltaT(t0 + 3*n*dt, dt, 2*n)

        a=api.TimeSeries(ta=ta_a, fill_value=1.0, point_fx=api.point_interpretation_policy.POINT_AVERAGE_VALUE)
        b=api.TimeSeries(ta=ta_b, fill_value=2.0, point_fx=api.point_interpretation_policy.POINT_AVERAGE_VALUE)
        c=api.TimeSeries(ta=ta_c, fill_value=4.0, point_fx=api.point_interpretation_policy.POINT_AVERAGE_VALUE)
        d=api.TimeSeries(ta=ta_d, fill_value=8.0, point_fx=api.point_interpretation_policy.POINT_AVERAGE_VALUE)

        # default behavior: extend from end of a
        ac=a.extend(c)

        for i in range(2*n):  # valus from first ts
            self.assertEqual(ac(t0 + i*dt), 1.0)
        for i in range(2*n):  # values from extension ts
            self.assertEqual(ac(t0 + (i + 2*n)*dt), 4.0)

        # default behavior: extend from end of a, fill gap with nan
        ad=a.extend(d)

        for i in range(2*n):  # values from first
            self.assertEqual(ad(t0 + i*dt), 1.0)
        for i in range(n):  # gap
            self.assertTrue(math.isnan(ad(t0 + (i + 2*n)*dt)))
        for i in range(2*n):  # extension
            self.assertEqual(ad(t0 + (i + 3*n)*dt), 8.0)

        # split at the first value of d instead of last of c
        cd=c.extend(d, split_policy=api.extend_split_policy.RHS_FIRST)

        for i in range(n):  # first, only until the extension start
            self.assertEqual(cd(t0 + (2*n + i)*dt), 4.0)
        for i in range(2*n):  # extension
            self.assertEqual(cd(t0 + (3*n + i)*dt), 8.0)

        # split at a given time step, and extend the last value through the gap
        ac=a.extend(c, split_policy=api.extend_split_policy.AT_VALUE, split_at=(t0 + dt*n//2),
                    fill_policy=api.extend_fill_policy.USE_LAST)

        for i in range(n//2):  # first, only until the given split value
            self.assertEqual(ac(t0 + i*dt), 1.0)
        for i in range(3*n//2):  # gap, uses last value before gap
            self.assertEqual(ac(t0 + (n//2 + i)*dt), 1.0)
        for i in range(2*n):  # extension
            self.assertEqual(ac(t0 + (2*n + i)*dt), 4.0)

        # split at the beginning of the ts to extend when the extension start before it
        cb=c.extend(b, split_policy=api.extend_split_policy.AT_VALUE, split_at=(t0 + 2*n*dt))

        for i in range(n):  # don't extend before
            self.assertTrue(math.isnan(cb(t0 + (n + i)*dt)))
        for i in range(n):  # we split at the beginning => only values from extension
            self.assertEqual(cb(t0 + (2*n + i)*dt), 2.0)
        for i in range(n):  # no values after extension
            self.assertTrue(math.isnan(cb(t0 + (3*n + i)*dt)))

        # extend with ts starting after the end, fill the gap with a given value
        ad=a.extend(d, fill_policy=api.extend_fill_policy.FILL_VALUE, fill_value=5.5)

        for i in range(2*n):  # first
            self.assertEqual(ad(t0 + i*dt), 1.0)
        for i in range(n):  # gap, filled with 5.5
            self.assertEqual(ad(t0 + (2*n + i)*dt), 5.5)
        for i in range(2*n):  # extension
            self.assertEqual(ad(t0 + (3*n + i)*dt), 8.0)

        # check extend with more exotic combination of time-axis(we had an issue with this..)
        a=api.TimeSeries(api.TimeAxis(0, 1, 10), fill_value=1.0, point_fx=api.POINT_AVERAGE_VALUE)
        b=api.TimeSeries(api.TimeAxis(api.Calendar(), 0, 1, 20), fill_value=2.0, point_fx=api.POINT_AVERAGE_VALUE)
        ab=a.extend(b)
        ba=b.extend(a, split_policy=api.extend_split_policy.AT_VALUE, split_at=a.time_axis.time(5))
        self.assertAlmostEqual(ab.value(0), 1.0)
        self.assertAlmostEqual(ab.value(11), 2.0)
        self.assertAlmostEqual(ba.value(0), 2.0)
        self.assertAlmostEqual(ab.value(7), 1.0)
Пример #14
0
    def test_a_time_series_vector(self):
        c=api.Calendar()
        t0=api.utctime_now()
        dt=api.deltahours(1)
        n=240
        ta=api.TimeAxisFixedDeltaT(t0, dt, n)

        a=api.TimeSeries(ta=ta, fill_value=3.0, point_fx=api.point_interpretation_policy.POINT_AVERAGE_VALUE)
        b=api.TimeSeries(ta=ta, fill_value=2.0, point_fx=api.point_interpretation_policy.POINT_AVERAGE_VALUE)
        c=api.TimeSeries(ta=ta, fill_value=10.0, point_fx=api.point_interpretation_policy.POINT_AVERAGE_VALUE)
        v=api.TsVector()
        v.append(a)
        v.append(b)

        self.assertEqual(len(v), 2)
        self.assertAlmostEqual(v[0].value(0), 3.0, "expect first ts to be 3.0")
        aa=api.TimeSeries(ta=a.time_axis, values=a.values,
                          point_fx=api.point_interpretation_policy.POINT_AVERAGE_VALUE)  # copy construct (really copy the values!)
        a.fill(1.0)
        self.assertAlmostEqual(v[0].value(0), 1.0, "expect first ts to be 1.0, because the vector keeps a reference ")
        self.assertAlmostEqual(aa.value(0), 3.0)

        vt=v.values_at(t0).to_numpy()
        self.assertEqual(len(vt), len(v))
        v1=v[0:1]
        self.assertEqual(len(v1), 1)
        self.assertAlmostEqual(v1[0].value(0), 1.0)
        v_clone=api.TsVector(v)
        self.assertEqual(len(v_clone), len(v))
        del v_clone[-1]
        self.assertEqual(len(v_clone), 1)
        self.assertEqual(len(v), 2)
        v_slice_all=v.slice(api.IntVector())
        v_slice_1=v.slice(api.IntVector([1]))
        v_slice_12=v.slice(api.IntVector([0, 1]))
        self.assertEqual(len(v_slice_all), 2)
        self.assertEqual(len(v_slice_1), 1)
        self.assertAlmostEqual(v_slice_1[0].value(0), 2.0)
        self.assertEqual(len(v_slice_12), 2)
        self.assertAlmostEqual(v_slice_12[0].value(0), 1.0)

        # multiplication by scalar
        v_x_2a=v*2.0
        v_x_2b=2.0*v
        for i in range(len(v)):
            self.assertAlmostEqual(v_x_2a[i].value(0), 2*v[i].value(0))
            self.assertAlmostEqual(v_x_2b[i].value(0), 2*v[i].value(0))

        # division by scalar
        v_d_a=v/3.0
        v_d_b=3.0/v
        for i in range(len(v)):
            self.assertAlmostEqual(v_d_a[i].value(0), v[i].value(0)/3.0)
            self.assertAlmostEqual(v_d_b[i].value(0), 3.0/v[i].value(0))

        # addition by scalar
        v_a_a=v + 3.0
        v_a_b=3.0 + v
        for i in range(len(v)):
            self.assertAlmostEqual(v_a_a[i].value(0), v[i].value(0) + 3.0)
            self.assertAlmostEqual(v_a_b[i].value(0), 3.0 + v[i].value(0))

        # sub by scalar
        v_s_a=v - 3.0
        v_s_b=3.0 - v
        for i in range(len(v)):
            self.assertAlmostEqual(v_s_a[i].value(0), v[i].value(0) - 3.0)
            self.assertAlmostEqual(v_s_b[i].value(0), 3.0 - v[i].value(0))

        # multiplication vector by ts
        v_x_ts=v*c
        ts_x_v=c*v
        for i in range(len(v)):
            self.assertAlmostEqual(v_x_ts[i].value(0), v[i].value(0)*c.value(0))
            self.assertAlmostEqual(ts_x_v[i].value(0), c.value(0)*v[i].value(0))

        # division vector by ts
        v_d_ts=v/c
        ts_d_v=c/v
        for i in range(len(v)):
            self.assertAlmostEqual(v_d_ts[i].value(0), v[i].value(0)/c.value(0))
            self.assertAlmostEqual(ts_d_v[i].value(0), c.value(0)/v[i].value(0))

        # add vector by ts
        v_a_ts=v + c
        ts_a_v=c + v
        for i in range(len(v)):
            self.assertAlmostEqual(v_a_ts[i].value(0), v[i].value(0) + c.value(0))
            self.assertAlmostEqual(ts_a_v[i].value(0), c.value(0) + v[i].value(0))

        # sub vector by ts
        v_s_ts=v - c
        ts_s_v=c - v
        for i in range(len(v)):
            self.assertAlmostEqual(v_s_ts[i].value(0), v[i].value(0) - c.value(0))
            self.assertAlmostEqual(ts_s_v[i].value(0), c.value(0) - v[i].value(0))

        # vector mult vector
        va=v
        vb=2.0*v

        v_m_v=va*vb
        self.assertEqual(len(v_m_v), len(va))
        for i in range(len(va)):
            self.assertAlmostEqual(v_m_v[i].value(0), va[i].value(0)*vb[i].value(0))

        # vector div vector
        v_d_v=va/vb
        self.assertEqual(len(v_d_v), len(va))
        for i in range(len(va)):
            self.assertAlmostEqual(v_d_v[i].value(0), va[i].value(0)/vb[i].value(0))

        # vector add vector
        v_a_v=va + vb
        self.assertEqual(len(v_a_v), len(va))
        for i in range(len(va)):
            self.assertAlmostEqual(v_a_v[i].value(0), va[i].value(0) + vb[i].value(0))

        # vector sub vector
        v_s_v=va - vb
        self.assertEqual(len(v_s_v), len(va))
        for i in range(len(va)):
            self.assertAlmostEqual(v_s_v[i].value(0), va[i].value(0) - vb[i].value(0))

        # vector unary minus
        v_u=- va
        self.assertEqual(len(v_u), len(va))
        for i in range(len(va)):
            self.assertAlmostEqual(v_u[i].value(0), -va[i].value(0))

        # integral functions, just to verify exposure works, and one value is according to spec.
        ta2=api.TimeAxis(t0, dt*24, n//24)
        v_avg=v.average(ta2)
        v_int=v.integral(ta2)
        v_acc=v.accumulate(ta2)
        v_sft=v.time_shift(dt*24)
        self.assertIsNotNone(v_avg)
        self.assertIsNotNone(v_int)
        self.assertIsNotNone(v_acc)
        self.assertIsNotNone(v_sft)
        self.assertAlmostEqual(v_avg[0].value(0), 1.0)
        self.assertAlmostEqual(v_int[0].value(0), 86400.0)
        self.assertAlmostEqual(v_acc[0].value(0), 0.0)
        self.assertAlmostEqual(v_sft[0].time(0), t0 + dt*24)

        # min/max functions
        min_v_double=va.min(-1000.0)
        max_v_double=va.max(1000.0)
        self.assertAlmostEqual(min_v_double[0].value(0), -1000.0)
        self.assertAlmostEqual(max_v_double[0].value(0), +1000.0)
        min_v_double=api.min(va, -1000.0)
        max_v_double=api.max(va, +1000.0)
        self.assertAlmostEqual(min_v_double[0].value(0), -1000.0)
        self.assertAlmostEqual(max_v_double[0].value(0), +1000.0)
        # c = 10.0
        c1000=100.0*c
        min_v_double=va.min(-c1000)
        max_v_double=va.max(c1000)
        self.assertAlmostEqual(min_v_double[0].value(0), -c1000.value(0))
        self.assertAlmostEqual(max_v_double[0].value(0), c1000.value(0))
        min_v_double=api.min(va, -c1000)
        max_v_double=api.max(va, c1000)
        self.assertAlmostEqual(min_v_double[0].value(0), -c1000.value(0))
        self.assertAlmostEqual(max_v_double[0].value(0), c1000.value(0))

        v1000=va*1000.0
        min_v_double=va.min(-v1000)
        max_v_double=va.max(v1000)
        self.assertAlmostEqual(min_v_double[0].value(0), -v1000[0].value(0))
        self.assertAlmostEqual(max_v_double[0].value(0), v1000[0].value(0))
        min_v_double=api.min(va, -v1000)
        max_v_double=api.max(va, v1000)
        self.assertAlmostEqual(min_v_double[0].value(0), -v1000[0].value(0))
        self.assertAlmostEqual(max_v_double[0].value(0), v1000[0].value(0))

        # finally, test that exception is raised if we try to multiply two unequal sized vectors

        try:
            x=v_clone*va
            self.assertTrue(False, 'We expected exception for unequal sized ts-vector op')
        except RuntimeError as re:
            pass

        # also test that empty vector + vector -> vector etc.
        va_2=va + api.TsVector()
        va_3=api.TsVector() + va
        va_4=va - api.TsVector()
        va_5=api.TsVector() - va
        va_x=api.TsVector() + api.TsVector()
        self.assertEqual(len(va_2), len(va))
        self.assertEqual(len(va_3), len(va))
        self.assertEqual(len(va_4), len(va))
        self.assertEqual(len(va_5), len(va))
        self.assertEqual(not va_x, True)
        self.assertEqual(not va_2, False)
        va_2_ok=False
        va_x_ok=True
        if va_2:
            va_2_ok=True
        if va_x:
            va_x_ok=False
        self.assertTrue(va_2_ok)
        self.assertTrue(va_x_ok)
Пример #15
0
def run_pm(ws_Th, ws_eah, ws_Rsh, ws_windspeedh,ws_rhh, rnet, height_veg=0.12,dt=1, n=30,rl = 144.0,height_ws = 3, height_t = 1.68, elevation = 1462.4, method='asce-ewri'):
    """Run Penman-Monteith evapotranspiration model from SHyFT"""

    import numpy as np
    import math
    import shyft
    from shyft import api

    utc = api.Calendar()

    c_MJm2d2Wm2 = 0.086400
    c_MJm2h2Wm2 = 0.0036

    #for radiation model
    latitude = 40.4
    slope_deg = 0.0
    aspect_deg = 0.0




    # n = 30 # nr of time steps: 1 year, daily data
    t_starth = utc.time(2000, 6, 1,16,0,0,0) # starting at the beginning of the year 1970
    step = api.deltahours(dt)


    # Let's now create Shyft time series from the supplied lists of precipitation and temperature.
    # First, we need a time axis, which is defined by a starting time, a time step and the number of time steps.
    ta = api.TimeAxis(t_starth, step, n) # days

    # First, we convert the lists to shyft internal vectors of double values:
    temph_dv = api.DoubleVector.from_numpy(ws_Th)
    eah_dv = api.DoubleVector.from_numpy(ws_eah)
    rsh_dv = api.DoubleVector.from_numpy(ws_Rsh)
    windspeedh_dv = api.DoubleVector.from_numpy(ws_windspeedh)

    rhh_dv = api.DoubleVector.from_numpy(ws_rhh)

    # Finally, we create shyft time-series as follows:
    # (Note: This step is not necessarily required to run the single methods.
    #  We could also just work with the double vector objects and the time axis)
    instant = api.point_interpretation_policy.POINT_INSTANT_VALUE
    average = api.point_interpretation_policy.POINT_AVERAGE_VALUE

    temph_ts = api.TimeSeries(ta, temph_dv, point_fx=instant)
    eah_ts = api.TimeSeries(ta, eah_dv, point_fx=instant)
    rsh_ts = api.TimeSeries(ta, rsh_dv, point_fx=instant)
    windspeedh_ts = api.TimeSeries(ta, windspeedh_dv, point_fx=instant)


    #recalculated inputs:
    rhh_ts = api.TimeSeries(ta, rhh_dv, point_fx=instant)


    radph = api.RadiationParameter(0.26,1.0)
    radch = api.RadiationCalculator(radph)
    radrh =api.RadiationResponse()
    if method=='asce-ewri':
        full_model = False
    else:
        full_model = True
    pmph=api.PenmanMonteithParameter(height_veg,height_ws,height_t, rl, full_model)
    pmch=api.PenmanMonteithCalculator(pmph)
    pmrh =api.PenmanMonteithResponse()

    #PriestleyTaylor
    ptp = api.PriestleyTaylorParameter(0.2,1.26)
    ptc = api.PriestleyTaylorCalculator(0.2, 1.26)
    ptr = api.PriestleyTaylorResponse

    ET_ref_sim_h= []

    for i in range(n-1):
        pmch.reference_evapotranspiration(pmrh, step,rnet[i], temph_ts.v[i],temph_ts.v[i],
                                                      rhh_ts.v[i], elevation, windspeedh_ts.v[i])
        ET_ref_sim_h.append(pmrh.et_ref)




    return ET_ref_sim_h
Пример #16
0
 def construct(d):
     if ta.size() != d.size:
         raise EcDataRepositoryError("Time axis size {} not equal to the number of "
                                        "data points ({}) for {}"
                                        "".format(ta.size(), d.size, key))
     return api.TimeSeries(ta, api.DoubleVector_FromNdArray(d.flatten()), self.series_type[key])