def test_create_target_spec_from_std_time_series(self):
        """
        Verify we can create target-spec giving ordinary ts,
        and that passing a non-fixed time-axis raises exception

        """
        cal = api.Calendar()
        ta = api.TimeAxis(cal.time(2017, 1, 1), api.deltahours(1), 24)
        ts = api.TimeSeries(
            ta,
            fill_value=3.0,
            point_fx=api.point_interpretation_policy.POINT_AVERAGE_VALUE)
        cids = api.IntVector([0, 2, 3])
        t0 = api.TargetSpecificationPts(ts, cids, 0.7, api.KLING_GUPTA, 1.0,
                                        1.0, 1.0, api.SNOW_COVERED_AREA,
                                        'test_uid')
        self.assertAlmostEqual(t0.ts.value(0), ts.value(0))
        rid = 0
        t1 = api.TargetSpecificationPts(ts, rid, 0.7, api.KLING_GUPTA, 1.0,
                                        1.0, 1.0, 'test_uid')
        self.assertAlmostEqual(t1.ts.value(0), ts.value(0))
        tax = api.TimeAxis(api.UtcTimeVector.from_numpy(ta.time_points[:-1]),
                           ta.total_period().end)
        tsx = api.TimeSeries(
            tax,
            fill_value=2.0,
            point_fx=api.point_interpretation_policy.POINT_AVERAGE_VALUE)
        tx = api.TargetSpecificationPts(tsx, rid, 0.7, api.KLING_GUPTA, 1.0,
                                        1.0, 1.0, 'test_uid')
        self.assertIsNotNone(tx)
Esempio n. 2
0
 def dacc_time(t):
     dt_last = int(t[-1] - t[-2])
     if np.all(t[1:] - t[:-1] == dt_last):  # fixed_dt time axis
         return api.TimeAxis(int(t[0]), dt_last, len(t) - 1)
     else:
         return api.TimeAxis(
             api.UtcTimeVector.from_numpy(t[:-1].astype(int)),
             int(t[-1]))
Esempio n. 3
0
 def noop_time(t):
     # if issubset:
     #     t = t[:-1]
     dt_last = int(t[-1] - t[-2])
     if np.all(t[1:] - t[:-1] == dt_last):  # fixed_dt time axis
         return api.TimeAxis(int(t[0]), dt_last, len(t))
     else:  # point_type time axis
         return api.TimeAxis(api.UtcTimeVector.from_numpy(t.astype(int)), int(t[-1] + dt_last))
Esempio n. 4
0
 def test_timeaxis_time_points(self):
     c = api.Calendar('Europe/Oslo')
     dt = api.deltahours(1)
     n = 240
     t0 = c.time(2016, 4, 10)
     ta = api.TimeAxis(c, t0, dt, n)
     tp = ta.time_points
     self.assertIsNotNone(tp)
     self.assertEqual(len(tp), n + 1)
     self.assertEqual(len(api.TimeAxis(c, t0, dt, 0).time_points), 0)
Esempio n. 5
0
 def test_timeaxis_time_points_double(self):
     dt = 1.5
     n = 240
     t0 = 0
     ta = api.TimeAxis(t0, dt, n)
     tp = ta.time_points_double
     self.assertIsNotNone(tp)
     self.assertEqual(len(tp), n + 1)
     self.assertEqual(len(api.TimeAxis(t0, dt, 0).time_points_double), 0)
     self.assertEqual(ta.time_points_double[1], 1.5)
Esempio n. 6
0
 def test_average_outside_give_nan(self):
     ta1=api.TimeAxis(0, 10, 10)
     ta2=api.TimeAxis(-10, 10, 21)
     tsa=api.TimeSeries(ta1, fill_value=1.0, point_fx=api.POINT_AVERAGE_VALUE)
     tsb=tsa.average(ta2)
     self.assertTrue(math.isnan(tsb.value(11)))  # nan when a ends
     self.assertTrue(math.isnan(tsb.value(0)))  # nan before first a
     tsa=api.TimeSeries(ta1, fill_value=1.0, point_fx=api.POINT_INSTANT_VALUE)
     tsb=tsa.average(ta2)
     self.assertTrue(math.isnan(tsb.value(10)))  # notice we get one less due to linear-between, it ends at last point in tsa.
     self.assertTrue(math.isnan(tsb.value(0)))
Esempio n. 7
0
 def test_integral_fine_resolution(self):
     """ Case study for last-interval bug from python"""
     utc=api.Calendar()
     ta=api.TimeAxis(utc.time(2017, 10, 16), api.deltahours(24*7), 219)
     tf=api.TimeAxis(utc.time(2017, 10, 16), api.deltahours(3), 12264)
     src=api.TimeSeries(ta, fill_value=1.0, point_fx=api.POINT_AVERAGE_VALUE)
     ts=src.integral(tf)
     self.assertIsNotNone(ts)
     for i in range(len(tf)):
         if not math.isclose(ts.value(i), 1.0*api.deltahours(3)):
             self.assertAlmostEqual(ts.value(i), 1.0*api.deltahours(3))
Esempio n. 8
0
def _clip_ensemble_of_geo_timeseries(ensemble, utc_period, err, allow_shorter_period=False):
    """
    Clip ensemble og source-keyed dictionaries of geo-ts according to utc_period

    Parameters
    ----------
    ensemble: list
        List of dictionaries keyed by time series type, where values are
        api vectors of geo located time series over the same time axis
    utc_period: api.UtcPeriod
        The utc time period that should (as a minimum) be covered.
    allow_shorter_period: bool, optional
        may return ts for shorter period if time_axis does not cover utc_period
    """
    if utc_period is None:
        return ensemble

    # Check time axis of first ensemble member/geo_point and if required create new time axis to use for clipping
    member = ensemble[0]
    time_axis = {}
    is_optimal = {}
    for key, geo_ts in member.items():
        is_optimal[key] = False
        point_type = geo_ts[0].ts.point_interpretation() == api.POINT_INSTANT_VALUE
        ta = geo_ts[0].ts.time_axis
        if ta.total_period().start > utc_period.start or ta.time_points[-1] - point_type < utc_period.end:
            if not allow_shorter_period:
                raise err("Found time axis that does not cover utc_period.")
            else:
                period_start = max(ta.time_points[0], int(utc_period.start))
                period_end = min(ta.time_points[-1] - point_type, int(utc_period.end))
        else:
            period_start = utc_period.start
            period_end = utc_period.end
        idx_start = np.argmax(ta.time_points > period_start) - 1
        idx_end = np.argmin(ta.time_points < period_end + point_type)
        if idx_start > 0 or idx_end < len(ta.time_points) - 1:
            if ta.timeaxis_type == api.TimeAxisType.FIXED:
                dt = ta.time(1) - ta.time(0)
                n = int(idx_end - idx_start)
                time_axis[key] = api.TimeAxis(int(ta.time_points[idx_start]), int(dt), n)
            else:
                time_points = api.UtcTimeVector(ta.time_points[idx_start:idx_end].tolist())
                t_end = ta.time_points[idx_end]
                time_axis[key] = api.TimeAxis(time_points, int(t_end))
        else:
            is_optimal[key] = True
            time_axis[key] = ta

    if all(list(is_optimal.values())):  # No need to clip if all are optimal
        return ensemble

    return [{key: source_vector_map[key]([source_type_map[key](s.mid_point(), s.ts.average(time_axis[key]))
                                              for s in geo_ts]) for key, geo_ts in f.items()} for f in ensemble]
Esempio n. 9
0
    def test_generic_timeaxis(self):
        c = api.Calendar('Europe/Oslo')
        dt = api.deltahours(1)
        n = 240
        t0 = c.time(2016, 4, 10)

        tag1 = api.TimeAxis(t0, dt, n)
        self.assertEqual(len(tag1), n)
        self.assertEqual(tag1.time(0), t0)

        tag2 = api.TimeAxis(c, t0, dt, n)
        self.assertEqual(len(tag2), n)
        self.assertEqual(tag2.time(0), t0)
        self.assertIsNotNone(tag2.calendar_dt.calendar)
Esempio n. 10
0
    def test_percentiles_with_min_max_extremes(self):
        """ the percentiles function now also supports picking out the min-max peak value
            within each interval.
            Setup test-data so that we have a well known percentile result,
            but also have peak-values within the interval that we can
            verify.
            We let hour ts 0..9 have values 0..9 constant 24*10 days
               then modify ts[1], every day first  value to a peak min value equal to - day_no*1
                                  every day second value to a peak max value equal to + day_no*1
                                  every day 3rd    value to a nan value
            ts[1] should then have same average value for each day (so same percentile)
                                            but min-max extreme should be equal to +- day_no*1
        """
        c=api.Calendar()
        t0=c.time(2016, 1, 1)
        dt=api.deltahours(1)
        n=240
        ta=api.TimeAxis(t0, dt, n)
        timeseries=api.TsVector()
        p_fx=api.point_interpretation_policy.POINT_AVERAGE_VALUE
        for i in range(10):
            timeseries.append(api.TimeSeries(ta=ta, fill_value=i, point_fx=p_fx))

        ts=timeseries[1]  # pick this one to insert min/max extremes
        for i in range(0, 240, 24):
            ts.set(i + 0, 1.0 - 100*i/24.0)
            ts.set(i + 1, 1.0 + 100*i/24.0)  # notice that when i==0, this gives 1.0
            ts.set(i + 2, float('nan'))  # also put in a nan, just to verify it is ignored during average processing

        wanted_percentiles=api.IntVector([api.statistics_property.MIN_EXTREME,
                                          0, 10, 50,
                                          api.statistics_property.AVERAGE,
                                          70, 100,
                                          api.statistics_property.MAX_EXTREME])
        ta_day=api.TimeAxis(t0, dt*24, n//24)
        percentiles=api.percentiles(timeseries, ta_day, wanted_percentiles)
        for i in range(len(ta_day)):
            if i == 0:  # first timestep, the min/max extremes are picked from 0'th and 9'th ts.
                self.assertAlmostEqual(0.0, percentiles[0].value(i), 3, "min-extreme ")
                self.assertAlmostEqual(9.0, percentiles[7].value(i), 3, "min-extreme ")
            else:
                self.assertAlmostEqual(1.0 - 100.0*i*24.0/24.0, percentiles[0].value(i), 3, "min-extreme ")
                self.assertAlmostEqual(1.0 + 100.0*i*24.0/24.0, percentiles[7].value(i), 3, "max-extreme")
            self.assertAlmostEqual(0.0, percentiles[1].value(i), 3, "  0-percentile")
            self.assertAlmostEqual(0.9, percentiles[2].value(i), 3, " 10-percentile")
            self.assertAlmostEqual(4.5, percentiles[3].value(i), 3, " 50-percentile")
            self.assertAlmostEqual(4.5, percentiles[4].value(i), 3, "   -average")
            self.assertAlmostEqual(6.3, percentiles[5].value(i), 3, " 70-percentile")
            self.assertAlmostEqual(9.0, percentiles[6].value(i), 3, "100-percentile")
Esempio n. 11
0
 def setUp(self):
     self.c = api.Calendar()
     self.d = api.deltahours(1)
     self.n = 24
     # self.t= self.c.trim(api.utctime_now(),self.d)
     self.t = self.c.trim(self.c.time(1969, 12, 31, 0, 0, 0), self.d)
     self.ta = api.TimeAxis(self.t, self.d, self.n)
Esempio n. 12
0
    def _create_forecast_set(self, n_fc, t0, dt, n_steps, dt_fc, fx):
        """

        Parameters
        ----------
        n_fc : int number of forecasts, e.g. 8
        t0 : utctime start of first forecast
        dt : utctimespan delta t for forecast-ts
        n_steps : number of steps in one forecast-ts
        dt_fc : utctimespan delta t between each forecast, like deltahours(6)
        fx : lambda time_axis:  a function returning a DoubleVector with values for the supplied time-axis

        Returns
        -------
        api.TsVector()

        """
        fc_set = api.TsVector()
        for i in range(n_fc):
            ta = api.TimeAxis(t0 + i * dt_fc, dt, n_steps)
            ts = api.TimeSeries(
                ta=ta,
                values=fx(ta),
                point_fx=api.point_interpretation_policy.POINT_AVERAGE_VALUE)
            fc_set.append(ts)
        return fc_set
Esempio n. 13
0
    def test_ts_get_krls_predictor(self):
        t0=api.utctime_now()
        ta=api.TimeAxis(t0, api.deltahours(1), 30*24)
        data=np.sin(np.linspace(0, 2*np.pi, ta.size()))
        ts_data=api.TimeSeries(ta, data, api.POINT_INSTANT_VALUE)

        ts=api.TimeSeries("a")

        try:
            ts.get_krls_predictor()
            self.fail("should not be able to get predictor for unbound")
        except:
            pass

        fbi=ts.find_ts_bind_info()
        fbi[0].ts.bind(ts_data)
        ts.bind_done()

        pred=ts.get_krls_predictor(api.deltahours(3))

        ts_krls=pred.predict(ta)
        self.assertEqual(len(ts_krls), len(ts_data))
        ts_mse=pred.mse_ts(ts_data)
        self.assertEqual(len(ts_mse), len(ts_data))
        for i in range(len(ts_krls)):
            self.assertAlmostEqual(ts_krls.values[i], ts_data.values[i], places=1)
            self.assertAlmostEqual(ts_mse.values[i], 0, places=2)
        self.assertAlmostEqual(pred.predictor_mse(ts_data), 0, places=2)
        def pad(v, t):
            if not concat:
                if self.nb_pads > 0:
                    nb_pads = self.nb_pads
                    t_padded = np.zeros((t.shape[0], t.shape[1] + nb_pads),
                                        dtype=t.dtype)
                    t_padded[:, :-nb_pads] = t[:, :]
                    t_add = t[0, -1] - t[0, -nb_pads - 1]
                    # print('t_add:',t_add)
                    t_padded[:, -nb_pads:] = t[:, -nb_pads:] + t_add

                    v_padded = np.zeros(
                        (v.shape[0], t.shape[1] + nb_pads, v.shape[2]),
                        dtype=v.dtype)
                    v_padded[:, :-nb_pads, :] = v[:, :, :]
                    v_padded[:, -nb_pads:, :] = v[:, -nb_pads:, :]

                else:
                    t_padded = t
                    v_padded = v
                dt_last = t_padded[0, -1] - t_padded[0, -2]
                return (v_padded, [
                    api.TimeAxis(api.UtcTimeVector.from_numpy(t_one),
                                 int(t_one[-1] + dt_last))
                    for t_one in t_padded
                ])
            else:
                return (v, t)
Esempio n. 15
0
    def test_kling_gupta_and_nash_sutcliffe(self):
        """
        Test/verify exposure of the kling_gupta and nash_sutcliffe correlation functions

        """

        def np_nash_sutcliffe(o, p):
            return 1 - (np.sum((o - p)**2))/(np.sum((o - np.mean(o))**2))

        c=api.Calendar()
        t0=c.time(2016, 1, 1)
        dt=api.deltahours(1)
        n=240
        ta=api.TimeAxis(t0, dt, n)
        from math import sin, pi
        rad_max=10*2*pi
        obs_values=api.DoubleVector.from_numpy(np.array([sin(i*rad_max/n) for i in range(n)]))
        mod_values=api.DoubleVector.from_numpy(np.array([0.1 + sin(pi/10.0 + i*rad_max/n) for i in range(n)]))
        obs_ts=api.TimeSeries(ta=ta, values=obs_values, point_fx=api.point_interpretation_policy.POINT_AVERAGE_VALUE)
        mod_ts=api.TimeSeries(ta=ta, values=mod_values, point_fx=api.point_interpretation_policy.POINT_AVERAGE_VALUE)

        self.assertAlmostEqual(api.kling_gupta(obs_ts, obs_ts, ta, 1.0, 1.0, 1.0), 1.0, None, "1.0 for perfect match")
        self.assertAlmostEqual(api.nash_sutcliffe(obs_ts, obs_ts, ta), 1.0, None, "1.0 for perfect match")
        # verify some non trivial cases, and compare to numpy version of ns
        mod_inv=obs_ts*-1.0
        kge_inv=obs_ts.kling_gupta(mod_inv)  # also show how to use time-series.method itself to ease use
        ns_inv=obs_ts.nash_sutcliffe(mod_inv)  # similar for nash_sutcliffe, you can reach it directly from a ts
        ns_inv2=np_nash_sutcliffe(obs_ts.values.to_numpy(), mod_inv.values.to_numpy())
        self.assertLessEqual(kge_inv, 1.0, "should be less than 1")
        self.assertLessEqual(ns_inv, 1.0, "should be less than 1")
        self.assertAlmostEqual(ns_inv, ns_inv2, 4, "should equal numpy calculated value")
        kge_obs_mod=api.kling_gupta(obs_ts, mod_ts, ta, 1.0, 1.0, 1.0)
        self.assertLessEqual(kge_obs_mod, 1.0)
        self.assertAlmostEqual(obs_ts.nash_sutcliffe(mod_ts),
                               np_nash_sutcliffe(obs_ts.values.to_numpy(), mod_ts.values.to_numpy()))
Esempio n. 16
0
 def test_calibration_ts_case(self):
     times=[0, 3600, 3600 + 2*3600]
     ta=api.TimeAxis(api.UtcTimeVector(times[0:-1]), times[-1])
     values=api.DoubleVector([0.0]*(len(times) - 1))
     ts=api.TimeSeries(ta, values, point_fx=api.point_interpretation_policy.POINT_AVERAGE_VALUE)
     target=api.TargetSpecificationPts(ts, api.IntVector([0]), 1.0, api.ABS_DIFF, 1.0, 1.0, 1.0, api.CELL_CHARGE, 'water_balance')
     self.assertIsNotNone(target)
Esempio n. 17
0
    def test_integral(self):
        c=api.Calendar()
        t0=c.time(2016, 1, 1)
        dt=api.deltahours(1)
        n=240
        ta=api.TimeAxis(t0, dt, n)
        fill_value=1.0
        ts=api.TimeSeries(ta=ta, fill_value=fill_value, point_fx=api.point_interpretation_policy.POINT_AVERAGE_VALUE)
        tsa=api.TimeSeries('a')*1.0 + 0.0  # expression, needing bind
        tsb=api.TimeSeries('b')*1.0 + 0.0  # another expression, needing bind for different ts
        ts_i1=tsa.integral(ta)
        ts_i2=api.integral(tsb, ta)
        # circulate through serialization
        ts_i1_blob=ts_i1.serialize()
        ts_i2_blob=ts_i2.serialize()
        ts_i1=api.TimeSeries.deserialize(ts_i1_blob)
        ts_i2=api.TimeSeries.deserialize(ts_i2_blob)

        for ts_i in [ts_i1, ts_i2]:
            self.assertTrue(ts_i.needs_bind())
            tsb=ts_i.find_ts_bind_info()
            self.assertEqual(len(tsb), 1)
            tsb[0].ts.bind(ts)
            ts_i.bind_done()
            self.assertFalse(ts_i.needs_bind())

        ts_i1_values=ts_i1.values
        for i in range(n):
            expected_value=dt*fill_value
            self.assertAlmostEqual(expected_value, ts_i1.value(i), 4, "expect integral of each interval")
            self.assertAlmostEqual(expected_value, ts_i2.value(i), 4, "expect integral of each interval")
            self.assertAlmostEqual(expected_value, ts_i1_values[i], 4, "expect integral of each interval")
Esempio n. 18
0
    def test_percentiles(self):
        c=api.Calendar()
        t0=c.time(2016, 1, 1)
        dt=api.deltahours(1)
        n=240
        ta=api.TimeAxisFixedDeltaT(t0, dt, n)
        timeseries=api.TsVector()

        for i in range(10):
            timeseries.append(
                api.TimeSeries(ta=ta, fill_value=i, point_fx=api.point_interpretation_policy.POINT_AVERAGE_VALUE))

        wanted_percentiles=api.IntVector([api.statistics_property.MIN_EXTREME,
                                          0, 10, 50,
                                          api.statistics_property.AVERAGE,
                                          70, 100,
                                          api.statistics_property.MAX_EXTREME])
        ta_day=api.TimeAxisFixedDeltaT(t0, dt*24, n//24)
        ta_day2=api.TimeAxis(t0, dt*24, n//24)
        percentiles=api.percentiles(timeseries, ta_day, wanted_percentiles)
        percentiles2=timeseries.percentiles(ta_day2, wanted_percentiles)  # just to verify it works with alt. syntax

        self.assertEqual(len(percentiles2), len(percentiles))

        for i in range(len(ta_day)):
            self.assertAlmostEqual(0.0, percentiles[0].value(i), 3, "min-extreme ")
            self.assertAlmostEqual(0.0, percentiles[1].value(i), 3, "  0-percentile")
            self.assertAlmostEqual(0.9, percentiles[2].value(i), 3, " 10-percentile")
            self.assertAlmostEqual(4.5, percentiles[3].value(i), 3, " 50-percentile")
            self.assertAlmostEqual(4.5, percentiles[4].value(i), 3, "   -average")
            self.assertAlmostEqual(6.3, percentiles[5].value(i), 3, " 70-percentile")
            self.assertAlmostEqual(9.0, percentiles[6].value(i), 3, "100-percentile")
            self.assertAlmostEqual(9.0, percentiles[7].value(i), 3, "max-extreme")
Esempio n. 19
0
    def test_compute_running_bias(self):
        """
        Verify that if we feed forecast[n] and observation into the bias-predictor
        it will create the estimated bias offsets
        """
        f = api.KalmanFilter()
        bp = api.KalmanBiasPredictor(f)
        self.assertIsNotNone(bp)
        self.assertEqual(bp.filter.parameter.n_daily_observations, 8)

        n_fc = 1
        utc = api.Calendar()
        t0 = utc.time(2016, 1, 1)
        dt = api.deltahours(1)
        n_fc_steps = 24 * 10  # 10 days history
        fc_dt = api.deltahours(6)
        fc_fx = lambda time_axis: self._create_fc_values(
            time_axis, 2.0)  # just return a constant 2.0 deg C for now

        n_obs = n_fc_steps
        obs_ta = api.TimeAxis(t0, dt, n_obs)
        obs_ts = api.TimeSeries(obs_ta,
                                fill_value=0.0,
                                point_fx=api.POINT_INSTANT_VALUE)
        kalman_dt = api.deltahours(
            3)  # suitable average for prediction temperature
        kalman_ta = api.TimeAxis(t0, kalman_dt, n_obs // 3)
        fc_ts = self._create_forecast_set(n_fc, t0, dt, n_fc_steps, fc_dt,
                                          fc_fx)[0]
        bias_ts = bp.compute_running_bias(
            fc_ts, obs_ts,
            kalman_ta)  # also verify we can feed in a pure TsVector
        bias_pattern = bp.state.x  # the bp.state.x is now the best estimates fo the bias between fc and observation
        self.assertEqual(len(bias_pattern), 8)
        for i in range(len(bias_pattern)):
            self.assertLess(abs(bias_pattern[i] - 2.0),
                            0.2)  # bias should iterate to approx 2.0 degC now.
        # and...:
        for i in range(8):
            self.assertAlmostEqual(bias_ts.value(i),
                                   0.0)  # expect 0.0 for the first day

        for i in range(8):
            self.assertLess(abs(bias_ts.value(bias_ts.size() - i - 1) - 2.0),
                            0.2)  # last part should be 2.0 deg.C
Esempio n. 20
0
 def test_min_max_check_ts_fill(self):
     ta=api.TimeAxis(0, 1, 5)
     ts_src=api.TimeSeries(ta, values=api.DoubleVector([1.0, -1.0, 2.0, float('nan'), 4.0]), point_fx=api.POINT_AVERAGE_VALUE)
     cts=api.TimeSeries(ta, values=api.DoubleVector([1.0, 1.8, 2.0, 2.0, 4.0]), point_fx=api.POINT_AVERAGE_VALUE)
     ts_qac=ts_src.min_max_check_ts_fill(v_max=10.0, v_min=-10.0, dt_max=300, cts=cts)
     self.assertAlmostEqual(ts_qac.value(3), 2.0)
     ts_qac=ts_src.min_max_check_ts_fill(v_max=10.0, v_min=0.0, dt_max=300, cts=cts)
     self.assertAlmostEqual(ts_qac.value(1), 1.8)  # -1 out, replaced with linear between
     self.assertAlmostEqual(ts_qac.value(3), 2.0)
Esempio n. 21
0
 def _reduce_fcst_group_horizon(self, fcst_group, nb_hours):
     # for each fcst in group; create time axis for clipping
     clipped_fcst_group = []
     for fcst in fcst_group:
         # Get time acces from first src type in first member
         ta = fcst[0][list(fcst[0].keys())[0]][0].ts.time_axis
         clip_end = ta.time(0) + nb_hours * api.deltahours(1)
         if ta.time(0) < clip_end < ta.total_period().end:
             if ta.timeaxis_type == api.TimeAxisType.FIXED:
                 dt = ta.time(1) - ta.time(0)
                 n = nb_hours * api.deltahours(1) // dt
                 ta = api.TimeAxis(ta.time(0), dt, n)
             else:
                 idx = ta.time_points < clip_end
                 t_end = ta.time(int(idx.nonzero()[0][-1] + 1))
                 ta = api.TimeAxis(
                     api.UtcTimeVector(ta.time_points[idx].tolist()), t_end)
         clipped_fcst_group.append(self._clip_forecast(fcst, ta))
     return clipped_fcst_group
Esempio n. 22
0
 def test_merge_points(self):
     a=api.TimeSeries()  # a empty at beginning, we allow that.
     tb=api.TimeAxis(0, 1, 5)
     b=api.TimeSeries(tb, values=api.DoubleVector([1.0, -1.0, 2.0, 3.0, 4.0]), point_fx=api.POINT_AVERAGE_VALUE)
     a.merge_points(b)  # now a should equal b
     c=api.TimeSeries(api.TimeAxis(api.UtcTimeVector([3, 10, 11]), t_end=12), fill_value=9.0, point_fx=api.POINT_AVERAGE_VALUE)
     a.merge_points(c)  # now a should have new values for t=3, plus new time-points 11 and 12
     self.assertEqual(len(a), 7)
     assert_array_almost_equal(a.values.to_numpy(), np.array([1.0, -1.0, 2.0, 9.0, 4.0, 9.0, 9.0]))
     assert_array_almost_equal(a.time_axis.time_points, np.array([0,1,2,3,4,10,11,12]))
     xa= api.TimeSeries("some_unbound_ts")
     xa.merge_points(a)  # now it should be bound, and it's values are from a
     self.assertEqual(len(xa), 7)
     assert_array_almost_equal(xa.values.to_numpy(), np.array([1.0, -1.0, 2.0, 9.0, 4.0, 9.0, 9.0]))
     assert_array_almost_equal(xa.time_axis.time_points, np.array([0,1,2,3,4,10,11,12]))
     d=api.TimeSeries(api.TimeAxis(api.UtcTimeVector([3, 10, 11]), t_end=12), fill_value=10.0, point_fx=api.POINT_AVERAGE_VALUE)
     xa.merge_points(d)  #now that xa is bound, also check we get updated
     self.assertEqual(len(xa), 7)
     assert_array_almost_equal(xa.values.to_numpy(), np.array([1.0, -1.0, 2.0, 10.0, 4.0, 10.0, 10.0]))
     assert_array_almost_equal(xa.time_axis.time_points, np.array([0, 1, 2, 3, 4, 10, 11, 12]))
Esempio n. 23
0
 def test_min_max_check_linear_fill(self):
     ta=api.TimeAxis(0, 1, 5)
     ts_src=api.TimeSeries(ta, values=api.DoubleVector([1.0, -1.0, 2.0, float('nan'), 4.0]), point_fx=api.POINT_AVERAGE_VALUE)
     ts_qac=ts_src.min_max_check_linear_fill(v_max=10.0, v_min=-10.0, dt_max=300)
     self.assertAlmostEqual(ts_qac.value(3), 3.0)
     ts_qac=ts_src.min_max_check_linear_fill(v_max=10.0, v_min=0.0, dt_max=300)
     self.assertAlmostEqual(ts_qac.value(1), 1.5)  # -1 out, replaced with linear between
     self.assertAlmostEqual(ts_qac.value(3), 3.0)
     ts_qac=ts_src.min_max_check_linear_fill(v_max=10.0, v_min=0.0, dt_max=0)
     self.assertTrue(not math.isfinite(ts_qac.value(3)))  # should give nan, not allowed to fill in
     self.assertTrue(not math.isfinite(ts_qac.value(1)))  # should give nan, not allowed to fill in
Esempio n. 24
0
    def test_bias_predictor(self):
        """
        Verify that if we feed forecast[n] and observation into the bias-predictor
        it will create the estimated bias offsets
        """
        f = api.KalmanFilter()
        bp = api.KalmanBiasPredictor(f)
        self.assertIsNotNone(bp)
        self.assertEqual(bp.filter.parameter.n_daily_observations, 8)

        n_fc = 8
        utc = api.Calendar()
        t0 = utc.time(2016, 1, 1)
        dt = api.deltahours(1)
        n_fc_steps = 36  # e.g. like arome 36 hours
        fc_dt = api.deltahours(6)
        fc_fx = lambda time_axis: self._create_fc_values(
            time_axis, 2.0)  # just return a constant 2.0 deg C for now
        fc_set = self._create_geo_forecast_set(n_fc, t0, dt, n_fc_steps, fc_dt,
                                               fc_fx)
        n_obs = 24
        obs_ta = api.TimeAxis(t0, dt, n_obs)
        obs_ts = api.TimeSeries(obs_ta,
                                fill_value=0.0,
                                point_fx=api.POINT_INSTANT_VALUE)
        kalman_dt = api.deltahours(
            3)  # suitable average for prediction temperature
        kalman_ta = api.TimeAxis(t0, kalman_dt, 8)
        bp.update_with_forecast(
            fc_set, obs_ts, kalman_ta
        )  # here we feed in forecast-set and observation into kalman
        fc_setv = self._create_forecast_set(n_fc, t0, dt, n_fc_steps, fc_dt,
                                            fc_fx)
        bp.update_with_forecast(
            fc_setv, obs_ts,
            kalman_ta)  # also verify we can feed in a pure TsVector
        bias_pattern = bp.state.x  # the bp.state.x is now the best estimates fo the bias between fc and observation
        self.assertEqual(len(bias_pattern), 8)
        for i in range(len(bias_pattern)):
            self.assertLess(abs(bias_pattern[i] - 2.0),
                            0.2)  # bias should iterate to approx 2.0 degC now.
Esempio n. 25
0
    def setUp(self):
        self.cal = api.Calendar()
        self.dt = api.deltahours(1)
        self.nt = 24 * 10
        self.t0 = self.cal.time(2016, 1, 1)
        self.ta = api.TimeAxis(self.t0, self.dt, self.nt)
        self.ta1 = api.TimeAxisFixedDeltaT(self.t0, self.dt, self.nt)

        self.geo_points = api.GeoPointVector()
        self.geo_points.append(api.GeoPoint(100, 100, 1000))
        self.geo_points.append(api.GeoPoint(5100, 100, 1150))
        self.geo_points.append(api.GeoPoint(100, 5100, 850))
Esempio n. 26
0
    def test_can_run_bayesian_kriging_from_observation_sites_to_1km_grid(self):
        """
        Somewhat more complex test, first do kriging of 1 timeseries out to grid (expect same values flat)
        then do kriging of 3 time-series out to the grid (expect different values, no real verification here since this is done elsewhere

        """
        # arrange the test with a btk_parameter, a source grid and a destination grid
        btk_parameter = api.BTKParameter(temperature_gradient=-0.6, temperature_gradient_sd=0.25, sill=25.0, nugget=0.5, range=20000.0, zscale=20.0)
        fx = lambda z: api.DoubleVector.from_numpy(np.zeros(self.n))

        grid_1km_1 = self._create_geo_point_grid(self.mnx, self.mny, self.dx_model)
        grid_1km_3 = self._create_geo_point_grid(self.mnx, self.mny, self.dx_model)

        observation_sites = api.TemperatureSourceVector()
        ta_obs = api.TimeAxisFixedDeltaT(self.t, self.d * 3, int(self.n / 3))
        ta_grid = api.TimeAxisFixedDeltaT(self.t, self.d, self.n)
        point_fx = api.point_interpretation_policy.POINT_AVERAGE_VALUE
        ts_site_1 = api.TimeSeries(ta_obs,
                                   values=api.DoubleVector.from_numpy(
            (20.0 - 0.6 * 5.0 / 100) + 3.0 * np.sin(np.arange(start=0, stop=ta_obs.size(), step=1) * 2 * np.pi / 8.0 - np.pi / 2.0)
                                   ),
                                   point_fx=point_fx)
        ts_site_2 = api.TimeSeries(ta_obs, values=api.DoubleVector.from_numpy(
            (20.0 - 0.6 * 500.0 / 100) + 3.0 * np.sin(np.arange(start=0, stop=ta_obs.size(), step=1) * 2 * np.pi / 8.0 - np.pi / 2.0)),
                                   point_fx=point_fx)
        ts_site_3 = api.TimeSeries(ta_obs, values=api.DoubleVector.from_numpy(
            (20.0 - 0.6 * 1050.0 / 100) + 3.0 * np.sin(np.arange(start=0, stop=ta_obs.size(), step=1) * 2 * np.pi / 8.0 - np.pi / 2.0)),
                                   point_fx=point_fx)

        observation_sites.append(api.TemperatureSource(api.GeoPoint(50.0, 50.0, 5.0), ts_site_1))

        # act 1: just one time-series put into the system, should give same ts (true-averaged) in all the grid-1km_ts (which can be improved using std.gradient..)
        grid_1km_1ts = api.bayesian_kriging_temperature(observation_sites, grid_1km_1, ta_grid, btk_parameter)

        # assert 1:
        self.assertEqual(len(grid_1km_1ts), self.mnx * self.mny)
        expected_grid_1ts_values = ts_site_1.average(api.TimeAxis(ta_grid)).values.to_numpy()

        for gts in grid_1km_1ts:
            self.assertEqual(gts.ts.size(), ta_grid.size())
            self.assertTrue(np.allclose(expected_grid_1ts_values, gts.ts.values.to_numpy()))

        observation_sites.append(api.TemperatureSource(api.GeoPoint(9000.0, 500.0, 500), ts_site_2))
        observation_sites.append(api.TemperatureSource(api.GeoPoint(9000.0, 12000.0, 1050.0), ts_site_3))

        grid_1km_3ts = api.bayesian_kriging_temperature(observation_sites, grid_1km_3, ta_grid, btk_parameter)

        self.assertEqual(len(grid_1km_3ts), self.mnx * self.mny)

        for gts in grid_1km_3ts:
            self.assertEqual(gts.ts.size(), ta_grid.size())
            self.assertFalse(np.allclose(expected_grid_1ts_values, gts.ts.values.to_numpy()))
Esempio n. 27
0
 def setUp(self):
     self.c = api.Calendar()
     self.d = api.deltahours(1)
     self.n = 24
     self.t = self.c.trim(self.c.time(2016, 9, 1), self.d)
     self.ta = api.TimeAxis(self.t, self.d, self.n)
     self.dx_arome = 2500
     self.dx_model = 1000
     self.nx = 2
     self.ny = 2
     self.mnx = 5
     self.mny = 5
     self.max_elevation = 1000
Esempio n. 28
0
    def test_partition_by(self):
        """
        verify/demo exposure of the .partition_by function that can
        be used to produce yearly percentiles statistics for long historical
        time-series

        """
        c=api.Calendar()
        t0=c.time(1930, 9, 1)
        dt=api.deltahours(1)
        n=c.diff_units(t0, c.time(2016, 9, 1), dt)

        ta=api.TimeAxis(t0, dt, n)
        pattern_values=api.DoubleVector.from_numpy(np.arange(len(ta)))  # increasing values

        src_ts=api.TimeSeries(ta=ta, values=pattern_values,
                              point_fx=api.point_interpretation_policy.POINT_AVERAGE_VALUE)

        partition_t0=c.time(2016, 9, 1)
        n_partitions=80
        partition_interval=api.Calendar.YEAR
        # get back TsVector,
        # where all TsVector[i].index_of(partition_t0)
        # is equal to the index ix for which the TsVector[i].value(ix) correspond to start value of that particular partition.
        ts_partitions=src_ts.partition_by(c, t0, partition_interval, n_partitions, partition_t0)
        self.assertEqual(len(ts_partitions), n_partitions)
        ty=t0
        for ts in ts_partitions:
            ix=ts.index_of(partition_t0)
            vix=ts.value(ix)
            expected_value=c.diff_units(t0, ty, dt)
            self.assertEqual(vix, expected_value)
            ty=c.add(ty, partition_interval, 1)

        # Now finally, try percentiles on the partitions
        wanted_percentiles=[0, 10, 25, -1, 50, 75, 90, 100]
        ta_percentiles=api.TimeAxis(partition_t0, api.deltahours(24), 365)
        percentiles=api.percentiles(ts_partitions, ta_percentiles, wanted_percentiles)
        self.assertEqual(len(percentiles), len(wanted_percentiles))
Esempio n. 29
0
 def _predict_bias(self, obs_set, fc_set):
     # Return a set of bias_ts per observation geo_point
     bias_set = api.TemperatureSourceVector()
     kf = api.KalmanFilter()
     kbp = api.KalmanBiasPredictor(kf)
     kta = api.TimeAxis(self.t0, api.deltahours(3), 8)
     for obs in obs_set:
         kbp.update_with_forecast(fc_set, obs.ts, kta)
         pattern = api.KalmanState.get_x(kbp.state)
         # a_ts = api.TimeSeries(pattern, api.deltahours(3), self.ta)  # can do using ct of TimeSeries, or:
         b_ts = api.create_periodic_pattern_ts(pattern, api.deltahours(3),
                                               self.ta.time(0),
                                               self.ta)  # function
         bias_set.append(api.TemperatureSource(obs.mid_point(), b_ts))
     return bias_set
 def test_create_source_vector_does_not_leak(self):
     n = 365 * 24 * 1  # 1st checkpoint memory here,
     for i in range(10):
         v = api.TemperatureSourceVector([
             api.TemperatureSource(
                 api.GeoPoint(0.0, 1.0, 2.0),
                 api.TimeSeries(api.TimeAxis(api.time(0), api.time(3600),
                                             n),
                                fill_value=float(x),
                                point_fx=api.POINT_AVERAGE_VALUE))
             for x in range(n)
         ])
         self.assertIsNotNone(v)
         del v
     pass  # 2nd mem check here, should be approx same as first checkpoint