示例#1
0
    def test_percentiles(self):
        c=api.Calendar()
        t0=c.time(2016, 1, 1)
        dt=api.deltahours(1)
        n=240
        ta=api.TimeAxisFixedDeltaT(t0, dt, n)
        timeseries=api.TsVector()

        for i in range(10):
            timeseries.append(
                api.TimeSeries(ta=ta, fill_value=i, point_fx=api.point_interpretation_policy.POINT_AVERAGE_VALUE))

        wanted_percentiles=api.IntVector([api.statistics_property.MIN_EXTREME,
                                          0, 10, 50,
                                          api.statistics_property.AVERAGE,
                                          70, 100,
                                          api.statistics_property.MAX_EXTREME])
        ta_day=api.TimeAxisFixedDeltaT(t0, dt*24, n//24)
        ta_day2=api.TimeAxis(t0, dt*24, n//24)
        percentiles=api.percentiles(timeseries, ta_day, wanted_percentiles)
        percentiles2=timeseries.percentiles(ta_day2, wanted_percentiles)  # just to verify it works with alt. syntax

        self.assertEqual(len(percentiles2), len(percentiles))

        for i in range(len(ta_day)):
            self.assertAlmostEqual(0.0, percentiles[0].value(i), 3, "min-extreme ")
            self.assertAlmostEqual(0.0, percentiles[1].value(i), 3, "  0-percentile")
            self.assertAlmostEqual(0.9, percentiles[2].value(i), 3, " 10-percentile")
            self.assertAlmostEqual(4.5, percentiles[3].value(i), 3, " 50-percentile")
            self.assertAlmostEqual(4.5, percentiles[4].value(i), 3, "   -average")
            self.assertAlmostEqual(6.3, percentiles[5].value(i), 3, " 70-percentile")
            self.assertAlmostEqual(9.0, percentiles[6].value(i), 3, "100-percentile")
            self.assertAlmostEqual(9.0, percentiles[7].value(i), 3, "max-extreme")
示例#2
0
    def test_extend_vector_of_timeseries(self):
        t0=api.utctime_now()
        dt=api.deltahours(1)
        n=512

        tsvector=api.TsVector()

        ta=api.TimeAxisFixedDeltaT(t0 + 3*n*dt, dt, 2*n)

        tsvector.push_back(api.TimeSeries(
            ta=api.TimeAxisFixedDeltaT(t0, dt, 2*n),
            fill_value=1.0, point_fx=api.point_interpretation_policy.POINT_AVERAGE_VALUE))
        tsvector.push_back(api.TimeSeries(
            ta=api.TimeAxisFixedDeltaT(t0 + 2*n*dt, dt, 2*n),
            fill_value=2.0, point_fx=api.point_interpretation_policy.POINT_AVERAGE_VALUE))

        extension=api.TimeSeries(ta=ta, fill_value=8.0, point_fx=api.point_interpretation_policy.POINT_AVERAGE_VALUE)

        # extend after all time-series in the vector
        extended_tsvector=tsvector.extend_ts(extension)

        # assert first element
        for i in range(2*n):
            self.assertEqual(extended_tsvector[0](t0 + i*dt), 1.0)
        for i in range(n):
            self.assertTrue(math.isnan(extended_tsvector[0](t0 + (2*n + i)*dt)))
        for i in range(2*n):
            self.assertEqual(extended_tsvector[0](t0 + (3*n + i)*dt), 8.0)

        # assert second element
        for i in range(2*n):
            self.assertEqual(extended_tsvector[1](t0 + (2*n + i)*dt), 2.0)
        for i in range(n):
            self.assertEqual(extended_tsvector[1](t0 + (4*n + i)*dt), 8.0)

        tsvector_2=api.TsVector()
        tsvector_2.push_back(api.TimeSeries(
            ta=api.TimeAxisFixedDeltaT(t0 + 2*n*dt, dt, 4*n),
            fill_value=10.0, point_fx=api.point_interpretation_policy.POINT_AVERAGE_VALUE))
        tsvector_2.push_back(api.TimeSeries(
            ta=api.TimeAxisFixedDeltaT(t0 + 4*n*dt, dt, 4*n),
            fill_value=20.0, point_fx=api.point_interpretation_policy.POINT_AVERAGE_VALUE))

        # extend each element in tsvector by the corresponding element in tsvector_2
        extended_tsvector=tsvector.extend_ts(tsvector_2)

        # assert first element
        for i in range(2*n):
            self.assertEqual(extended_tsvector[0](t0 + i*dt), 1.0)
        for i in range(4*n):
            self.assertEqual(extended_tsvector[0](t0 + (2*n + i)*dt), 10.0)

        # assert second element
        for i in range(2*n):
            self.assertEqual(extended_tsvector[1](t0 + (2*n + i)*dt), 2.0)
        for i in range(4*n):
            self.assertEqual(extended_tsvector[1](t0 + (4*n + i)*dt), 20.0)
示例#3
0
    def test_can_run_bayesian_kriging_from_observation_sites_to_1km_grid(self):
        """
        Somewhat more complex test, first do kriging of 1 timeseries out to grid (expect same values flat)
        then do kriging of 3 time-series out to the grid (expect different values, no real verification here since this is done elsewhere

        """
        # arrange the test with a btk_parameter, a source grid and a destination grid
        btk_parameter = api.BTKParameter(temperature_gradient=-0.6, temperature_gradient_sd=0.25, sill=25.0, nugget=0.5, range=20000.0, zscale=20.0)
        fx = lambda z: api.DoubleVector.from_numpy(np.zeros(self.n))

        grid_1km_1 = self._create_geo_point_grid(self.mnx, self.mny, self.dx_model)
        grid_1km_3 = self._create_geo_point_grid(self.mnx, self.mny, self.dx_model)

        observation_sites = api.TemperatureSourceVector()
        ta_obs = api.TimeAxisFixedDeltaT(self.t, self.d * 3, int(self.n / 3))
        ta_grid = api.TimeAxisFixedDeltaT(self.t, self.d, self.n)
        point_fx = api.point_interpretation_policy.POINT_AVERAGE_VALUE
        ts_site_1 = api.TimeSeries(ta_obs,
                                   values=api.DoubleVector.from_numpy(
            (20.0 - 0.6 * 5.0 / 100) + 3.0 * np.sin(np.arange(start=0, stop=ta_obs.size(), step=1) * 2 * np.pi / 8.0 - np.pi / 2.0)
                                   ),
                                   point_fx=point_fx)
        ts_site_2 = api.TimeSeries(ta_obs, values=api.DoubleVector.from_numpy(
            (20.0 - 0.6 * 500.0 / 100) + 3.0 * np.sin(np.arange(start=0, stop=ta_obs.size(), step=1) * 2 * np.pi / 8.0 - np.pi / 2.0)),
                                   point_fx=point_fx)
        ts_site_3 = api.TimeSeries(ta_obs, values=api.DoubleVector.from_numpy(
            (20.0 - 0.6 * 1050.0 / 100) + 3.0 * np.sin(np.arange(start=0, stop=ta_obs.size(), step=1) * 2 * np.pi / 8.0 - np.pi / 2.0)),
                                   point_fx=point_fx)

        observation_sites.append(api.TemperatureSource(api.GeoPoint(50.0, 50.0, 5.0), ts_site_1))

        # act 1: just one time-series put into the system, should give same ts (true-averaged) in all the grid-1km_ts (which can be improved using std.gradient..)
        grid_1km_1ts = api.bayesian_kriging_temperature(observation_sites, grid_1km_1, ta_grid, btk_parameter)

        # assert 1:
        self.assertEqual(len(grid_1km_1ts), self.mnx * self.mny)
        expected_grid_1ts_values = ts_site_1.average(api.TimeAxis(ta_grid)).values.to_numpy()

        for gts in grid_1km_1ts:
            self.assertEqual(gts.ts.size(), ta_grid.size())
            self.assertTrue(np.allclose(expected_grid_1ts_values, gts.ts.values.to_numpy()))

        observation_sites.append(api.TemperatureSource(api.GeoPoint(9000.0, 500.0, 500), ts_site_2))
        observation_sites.append(api.TemperatureSource(api.GeoPoint(9000.0, 12000.0, 1050.0), ts_site_3))

        grid_1km_3ts = api.bayesian_kriging_temperature(observation_sites, grid_1km_3, ta_grid, btk_parameter)

        self.assertEqual(len(grid_1km_3ts), self.mnx * self.mny)

        for gts in grid_1km_3ts:
            self.assertEqual(gts.ts.size(), ta_grid.size())
            self.assertFalse(np.allclose(expected_grid_1ts_values, gts.ts.values.to_numpy()))
示例#4
0
def create_mock_station_data(t0, dt, n_steps, **kwargs):
    time_axis = api.TimeAxisFixedDeltaT(t0, dt, n_steps)
    return {"temperature": create_mock_time_series_data("temperature", time_axis, **kwargs),
            "precipitation": create_mock_time_series_data("precipitation", time_axis, **kwargs),
            "relative_humidity": create_mock_time_series_data("relative_humidity", time_axis, **kwargs),
            "wind_speed": create_mock_time_series_data("wind_speed", time_axis, **kwargs),
            "radiation": create_mock_time_series_data("radiation", time_axis, **kwargs)}
示例#5
0
    def _transform_raw(self, data, time):

        #def noop_time(t):
        #    return api.TimeAxisFixedDeltaT(api.utctime(t[0]), api.timespan(t[1] - t[0]), len(t))

        def noop_space(x):
            return x

        def air_temp_conv(x):
            return x - 273.15

        def prec_conv(x):
            return x*3600

        convert_map = {"wind_speed": lambda x, ta: (noop_space(x), ta),
                       "radiation": lambda x, ta: (noop_space(x), ta),
                       "temperature": lambda x, ta: (air_temp_conv(x), ta),
                       "precipitation": lambda x, ta: (prec_conv(x), ta),
                       "relative_humidity": lambda x, ta: (noop_space(x), ta)}

        ta = api.TimeAxisFixedDeltaT(int(time[0]), int(time[1] - time[0]), len(time))
        res = {}
        for k, v in data.items():
            res[k] = convert_map[k](v, ta)
        return res
示例#6
0
    def test_time_shift(self):
        c=api.Calendar()
        t0=c.time(2016, 1, 1)
        t1=c.time(2017, 1, 1)
        dt=api.deltahours(1)
        n=240
        ta=api.TimeAxisFixedDeltaT(t0, dt, n)
        ts0=api.TimeSeries(ta=ta, fill_value=3.0, point_fx=api.point_interpretation_policy.POINT_AVERAGE_VALUE)
        tsa=api.TimeSeries('a')

        ts1=api.time_shift(tsa, t1 - t0)
        self.assertTrue(ts1.needs_bind())
        ts1_blob=ts1.serialize()
        ts1=api.TimeSeries.deserialize(ts1_blob)
        tsb=ts1.find_ts_bind_info()
        self.assertEqual(len(tsb), 1)
        tsb[0].ts.bind(ts0)
        ts1.bind_done()
        self.assertFalse(ts1.needs_bind())

        ts2=2.0*ts1.time_shift(t0 - t1)  # just to verify it still can take part in an expression

        for i in range(ts0.size()):
            self.assertAlmostEqual(ts0.value(i), ts1.value(i), 3, "expect values to be equal")
            self.assertAlmostEqual(ts0.value(i)*2.0, ts2.value(i), 3, "expect values to be double value")
            self.assertEqual(ts0.time(i) + (t1 - t0), ts1.time(i), "expect time to be offset delta_t different")
            self.assertEqual(ts0.time(i), ts2.time(i), "expect time to be equal")
示例#7
0
 def test_abs(self):
     c=api.Calendar()
     t0=c.time(2016, 1, 1)
     dt=api.deltahours(1)
     n=4
     v=api.DoubleVector([1.0, -1.5, float("nan"), 3.0])
     ta=api.TimeAxisFixedDeltaT(t0, dt, n)
     ts0=api.TimeSeries(ta=ta, values=v, point_fx=api.point_interpretation_policy.POINT_AVERAGE_VALUE)
     tsa=api.TimeSeries('a')
     ts1=tsa.abs()
     ts1_blob=ts1.serialize()
     ts1=api.TimeSeries.deserialize(ts1_blob)
     self.assertTrue(ts1.needs_bind())
     bts=ts1.find_ts_bind_info()
     self.assertEqual(len(bts), 1)
     bts[0].ts.bind(ts0)
     ts1.bind_done()
     self.assertFalse(ts1.needs_bind())
     self.assertAlmostEqual(ts0.value(0), ts1.value(0), 6)
     self.assertAlmostEqual(abs(ts0.value(1)), ts1.value(1), 6)
     self.assertTrue(math.isnan(ts1.value(2)))
     self.assertAlmostEqual(ts0.value(3), ts1.value(3), 6)
     tsv0=api.TsVector()
     tsv0.append(ts0)
     tsv1=tsv0.abs()
     self.assertAlmostEqual(tsv0[0].value(0), tsv1[0].value(0), 6)
     self.assertAlmostEqual(abs(tsv0[0].value(1)), tsv1[0].value(1), 6)
     self.assertTrue(math.isnan(tsv1[0].value(2)))
     self.assertAlmostEqual(tsv0[0].value(3), tsv1[0].value(3), 6)
示例#8
0
    def test_can_run_bayesian_kriging_from_arome25_to_1km(self):
        """
        Verify that if we run btk interpolation, we do get updated time-series according to time-axis and range
        specified.

        """
        # arrange the test with a btk_parameter, a source grid and a destination grid
        btk_parameter = api.BTKParameter(temperature_gradient=-0.6,
                                         temperature_gradient_sd=0.25,
                                         sill=25.0,
                                         nugget=0.5,
                                         range=20000.0,
                                         zscale=20.0)
        fx = lambda z: api.DoubleVector.from_numpy(
            (20.0 - 0.6 * z / 100) + 3.0 * np.sin(
                np.arange(start=0, stop=self.n, step=1) * 2 * np.pi / 24.0 - np
                .pi / 2.0))
        arome_grid = self._create_geo_temperature_grid(self.nx, self.ny,
                                                       self.dx_arome, fx)
        destination_grid = self._create_geo_point_grid(self.mnx, self.mny,
                                                       self.dx_model)
        ta = api.TimeAxisFixedDeltaT(self.t, self.d * 3, int(self.n / 3))
        # act, - run the bayesian_kriging_temperature algoritm.
        r = api.bayesian_kriging_temperature(arome_grid, destination_grid, ta,
                                             btk_parameter)
        # assert
        self.assertIsNotNone(r)
        self.assertEqual(len(r), self.mnx * self.mny)
        for gts in r:  # do some sanity checks for the btk. Note that full-range checking is already done elsewhere
            self.assertEqual(gts.ts.size(), ta.size())
            self.assertLess(np.max(gts.ts.values.to_numpy()),
                            23.0)  # all values less than ~max
            self.assertGreater(np.min(gts.ts.values.to_numpy()),
                               7.0)  # all values larger than ~ min
示例#9
0
    def test_can_run_ordinary_kriging_from_observation_sites_to_1km_grid(self):
        """
        Somewhat more complex test, first do kriging of 1 timeseries out to grid (expect same values flat)
        then do kriging of 3 time-series out to the grid (expect different values, no real verification here since this is done elsewhere

        """
        # arrange the test with a btk_parameter, a source grid and a destination grid
        ok_parameter = api.OKParameter(c=1.0,a=10.0*1000.0,cov_type=api.OKCovarianceType.EXPONENTIAL,z_scale=1.0)
        fx = lambda z: api.DoubleVector.from_numpy(np.zeros(self.n))

        grid_1km_1 = self._create_geo_point_grid(self.mnx, self.mny, self.dx_model)
        grid_1km_3 = self._create_geo_point_grid(self.mnx, self.mny, self.dx_model)

        observation_sites = api.GeoPointSourceVector()
        ta_obs = api.TimeAxisFixedDeltaT(self.t, self.d * 3, int(self.n / 3))
        ta_grid = api.TimeAxisFixedDeltaT(self.t, self.d, self.n)
        point_fx = api.point_interpretation_policy.POINT_AVERAGE_VALUE
        ts_site_1 = api.TimeSeries(ta_obs, values=api.DoubleVector.from_numpy(
            (1.0) + 0.1 * np.sin(np.arange(start=0, stop=ta_obs.size(), step=1) * 2 * np.pi / 8.0 - np.pi / 2.0)), point_fx=point_fx)
        ts_site_2 = api.TimeSeries(ta_obs, values=api.DoubleVector.from_numpy(
            (0.8) + 0.2 * np.sin(np.arange(start=0, stop=ta_obs.size(), step=1) * 2 * np.pi / 8.0 - np.pi / 2.0)), point_fx=point_fx)
        ts_site_3 = api.TimeSeries(ta_obs, values=api.DoubleVector.from_numpy(
            (1.2) + 0.1 * np.sin(np.arange(start=0, stop=ta_obs.size(), step=1) * 2 * np.pi / 8.0 - np.pi / 2.0)), point_fx=point_fx)

        observation_sites.append(api.GeoPointSource(api.GeoPoint(50.0, 50.0, 5.0), ts_site_1))

        # act 1: just one time-series put into the system, should give same ts (true-averaged) in all the grid-1km_ts (which can be improved using std.gradient..)
        grid_1km_1ts = api.ordinary_kriging(observation_sites, grid_1km_1, ta_grid, ok_parameter)

        # assert 1:
        self.assertEqual(len(grid_1km_1ts), self.mnx * self.mny)
        expected_grid_1ts_values = ts_site_1.average(api.TimeAxis(ta_grid)).values.to_numpy()

        for gts in grid_1km_1ts:
            self.assertEqual(gts.ts.size(), ta_grid.size())
            self.assertTrue(np.allclose(expected_grid_1ts_values, gts.ts.values.to_numpy()))

        observation_sites.append(api.GeoPointSource(api.GeoPoint(9000.0, 500.0, 500), ts_site_2))
        observation_sites.append(api.GeoPointSource(api.GeoPoint(9000.0, 12000.0, 1050.0), ts_site_3))
        ok_parameter.cov_type= api.OKCovarianceType.GAUSSIAN  # just to switch covariance formula
        grid_1km_3ts = api.ordinary_kriging(observation_sites, grid_1km_3, ta_grid, ok_parameter)

        self.assertEqual(len(grid_1km_3ts), self.mnx * self.mny)

        for gts in grid_1km_3ts:
            self.assertEqual(gts.ts.size(), ta_grid.size())
            self.assertFalse(np.allclose(expected_grid_1ts_values, gts.ts.values.to_numpy()))
示例#10
0
 def _convert_to_timeseries(self, data, t, ts_id):
     ta = api.TimeAxisFixedDeltaT(int(t[0]), int(t[1]) - int(t[0]),  len(t))
     tsc = api.TsFactory().create_point_ts
     def construct(d):
         return tsc(ta.size(), ta.start, ta.delta_t,
                     api.DoubleVector.FromNdArray(d))
     ts = [construct(data[:,j]) for j in range(data.shape[-1])]
     return {k:v for k, v in zip(ts_id,ts)}
示例#11
0
 def test_source_uid(self):
     cal = api.Calendar()
     time_axis = api.TimeAxisFixedDeltaT(cal.time(api.YMDhms(2015, 1, 1, 0, 0, 0)), api.deltahours(1), 240)
     mid_point = api.GeoPoint(1000, 1000, 100)
     precip_source = self._create_constant_geo_ts(api.PrecipitationSource, mid_point, time_axis.total_period(), 5.0)
     self.assertIsNotNone(precip_source.uid)
     precip_source.uid = 'abc'
     self.assertEqual(precip_source.uid, 'abc')
 def concat_t(t):
     t_stretch = np.ravel(
         np.repeat(t, self.fc_len_to_concat).reshape(
             len(t), self.fc_len_to_concat) +
         lead_time[0:self.fc_len_to_concat])
     return api.TimeAxisFixedDeltaT(
         int(t_stretch[0]),
         int(t_stretch[1]) - int(t_stretch[0]), len(t_stretch))
示例#13
0
 def test_create_region_environment(self):
     cal = api.Calendar()
     time_axis = api.TimeAxisFixedDeltaT(cal.time(api.YMDhms(2015, 1, 1, 0, 0, 0)), api.deltahours(1), 240)
     re = self.create_dummy_region_environment(time_axis, api.GeoPoint(1000, 1000, 100))
     self.assertIsNotNone(re)
     self.assertEqual(len(re.radiation), 1)
     self.assertAlmostEqual(re.radiation[0].ts.value(0), 300.0)
     vv = re.radiation.values_at_time(time_axis.time(0))  # verify .values_at_time(t)
     self.assertEqual(len(vv), len(re.radiation))
     self.assertAlmostEqual(vv[0], 300.0)
示例#14
0
 def test_create_timeaxis(self):
     self.assertEqual(self.ta.size(), self.n)
     self.assertEqual(len(self.ta), self.n)
     self.assertEqual(self.ta(0).start, self.t)
     self.assertEqual(self.ta(0).end, self.t + self.d)
     self.assertEqual(self.ta(1).start, self.t + self.d)
     self.assertEqual(self.ta.total_period().start, self.t)
     va = np.array([86400, 3600, 3], dtype=np.int64)
     xta = api.TimeAxisFixedDeltaT(int(va[0]), int(va[1]), int(va[2]))
     self.assertEqual(xta.size(), 3)
示例#15
0
def time_axis_from_dict(t_dict):
    utc = api.Calendar()

    sim_start = dt.datetime.strptime(t_dict['start_datetime'], "%Y-%m-%dT%H:%M:%S")
    utc_start = utc.time(
        api.YMDhms(sim_start.year, sim_start.month, sim_start.day, sim_start.hour, sim_start.minute, sim_start.second))
    tstep = t_dict['run_time_step']
    nstep = t_dict['number_of_steps']
    time_axis = api.TimeAxisFixedDeltaT(utc_start, tstep, nstep)

    return time_axis
 def test_region_environment_variable_list(self):
     """ just to verify ARegionEnvironment.variables container exposed to ease scripting"""
     cal = api.Calendar()
     time_axis = api.TimeAxisFixedDeltaT(cal.time(2015, 1, 1, 0, 0, 0), api.deltahours(1), 240)
     e = self.create_dummy_region_environment(time_axis, api.GeoPoint(0.0, 1.0, 2.0))
     self.assertIsNotNone(e)
     self.assertEqual(len(e.variables), 5)
     for v in e.variables:
         self.assertIsNotNone(v[1])
         self.assertEqual(getattr(e, v[0])[0].mid_point_, v[1][0].mid_point_)  # equivalent, just check first midpoint
         self.assertEqual(getattr(e, v[0])[0].ts.value(0), v[1][0].ts.value(0))  # and value
示例#17
0
    def setUp(self):
        self.cal = api.Calendar()
        self.dt = api.deltahours(1)
        self.nt = 24 * 10
        self.t0 = self.cal.time(2016, 1, 1)
        self.ta = api.TimeAxis(self.t0, self.dt, self.nt)
        self.ta1 = api.TimeAxisFixedDeltaT(self.t0, self.dt, self.nt)

        self.geo_points = api.GeoPointVector()
        self.geo_points.append(api.GeoPoint(100, 100, 1000))
        self.geo_points.append(api.GeoPoint(5100, 100, 1150))
        self.geo_points.append(api.GeoPoint(100, 5100, 850))
示例#18
0
 def test_unit_conversion(self):
     utc = api.Calendar()
     t_num = np.arange(
         -24, 24, 1, dtype=np.float64
     )  # we use both before and after epoch to ensure sign is ok
     t_converted = convert_netcdf_time('hours since 1970-01-01 00:00:00',
                                       t_num)
     t_axis = api.TimeAxisFixedDeltaT(utc.time(1969, 12, 31, 0, 0, 0),
                                      api.deltahours(1), 2 * 24)
     [
         self.assertEqual(t_converted[i],
                          t_axis(i).start) for i in range(t_axis.size())
     ]
示例#19
0
    def test_compute_lwc_percentiles(self):
        # Simulation time axis
        year, month, day, hour = 2010, 9, 1, 0
        dt = api.deltahours(24)
        n_steps = 400
        utc = api.Calendar()  # No offset gives Utc
        t0 = utc.time(year, month, day, hour)
        time_axis = api.TimeAxisFixedDeltaT(t0, dt, n_steps)

        # Some fake ids
        region_id = 0
        interpolation_id = 0

        # Simulation coordinate system
        epsg = "32633"

        # Model
        model_t = pt_gs_k.PTGSKModel

        # Configs and repositories
        dataset_config_file = path.join(path.dirname(__file__), "netcdf",
                                        "atnsjoen_datasets.yaml")
        region_config_file = path.join(path.dirname(__file__), "netcdf",
                                       "atnsjoen_calibration_region.yaml")
        region_config = RegionConfig(region_config_file)
        model_config = ModelConfig(self.model_config_file)
        dataset_config = YamlContent(dataset_config_file)
        region_model_repository = RegionModelRepository(
            region_config, model_config, model_t, epsg)
        interp_repos = InterpolationParameterRepository(model_config)
        netcdf_geo_ts_repos = []
        for source in dataset_config.sources:
            station_file = source["params"]["stations_met"]
            netcdf_geo_ts_repos.append(
                GeoTsRepository(source["params"], station_file, ""))
        geo_ts_repository = GeoTsRepositoryCollection(netcdf_geo_ts_repos)

        # Construct target discharge series
        simulator = DefaultSimulator(region_id, interpolation_id,
                                     region_model_repository,
                                     geo_ts_repository, interp_repos, None)
        n_cells = simulator.region_model.size()
        state_repos = DefaultStateRepository(model_t, n_cells)
        cid = 1
        simulator.region_model.set_state_collection(cid, True)
        simulator.run(time_axis, state_repos.get_state(0))
        self.assertAlmostEqual(
            simulator.region_model.cells[0].rc.pe_output.values[0],
            0.039768354, 5)  # just to verify pot.evap by regression, mm/h

        percentile_list = [10, 25, 50, 75, 90]
示例#20
0
    def test_idw_rel_hum_from_set_to_grid(self):
        """
        Test IDW interpolation transforms wind_speed time-series according to time-axis and range.

        """
        idw_p = api.IDWParameter()
        self.assertEqual(idw_p.max_distance, 200000)
        self.assertEqual(idw_p.max_members, 10)
        fx = lambda z : [15 for x in range(self.n)]
        arome_grid = self._create_geo_rel_hum_grid(self.nx, self.ny, self.dx_arome, fx)
        dest_grid_points = self._create_geo_point_grid(self.mnx, self.mny, self.dx_model)
        ta = api.TimeAxisFixedDeltaT(self.t, self.d * 3, int(self.n / 3))
        dest_grid = api.idw_relative_humidity(arome_grid, dest_grid_points, ta, idw_p)
        self.assertIsNotNone(dest_grid)
        self.assertEqual(len(dest_grid), self.mnx * self.mny)
示例#21
0
 def test_average_accessor(self):
     dv=np.arange(self.ta.size())
     v=api.DoubleVector.from_numpy(dv)
     t=api.UtcTimeVector()
     for i in range(self.ta.size()):
         t.push_back(self.ta(i).start)
     t.push_back(
         self.ta(self.ta.size() - 1).end)  # important! needs n+1 points to determine n periods in the timeaxis
     tsf=api.TsFactory()
     ts1=tsf.create_point_ts(self.ta.size(), self.t, self.d, v)
     ts2=tsf.create_time_point_ts(self.ta.total_period(), t, v)
     tax=api.TimeAxisFixedDeltaT(self.ta.total_period().start + api.deltaminutes(30), api.deltahours(1),
                                 self.ta.size())
     avg1=api.AverageAccessorTs(ts1, tax)
     self.assertEqual(avg1.size(), tax.size())
     self.assertIsNotNone(ts2)
示例#22
0
    def run_simulator(self, model_t):
        # Simulation time axis
        dt0 = api.YMDhms(2015, 8, 24, 6)
        n_hours = 30
        dt = api.deltahours(1)
        utc = api.Calendar()  # No offset gives Utc
        t0 = utc.time(dt0)
        time_axis = api.TimeAxisFixedDeltaT(t0, dt, n_hours)

        # Some dummy ids not needed for the netcdf based repositories
        region_id = 0
        interpolation_id = 0

        # Simulation coordinate system
        epsg = "32633"

        # Configs and repositories
        region_config = RegionConfig(self.region_config_file)
        model_config = ModelConfig(self.model_config_file)
        region_model_repository = RegionModelRepository(
            region_config, model_config, model_t, epsg)
        interp_repos = InterpolationParameterRepository(model_config)
        date_str = "{}{:02}{:02}_{:02}".format(dt0.year, dt0.month, dt0.day,
                                               dt0.hour)
        base_dir = path.join(shyftdata_dir, "repository",
                             "arome_data_repository")
        f1 = "arome_metcoop_red_default2_5km_{}.nc".format(date_str)
        f2 = "arome_metcoop_red_test2_5km_{}.nc".format(date_str)

        ar1 = AromeDataRepository(epsg,
                                  base_dir,
                                  filename=f1,
                                  allow_subset=True)
        ar2 = AromeDataRepository(epsg,
                                  base_dir,
                                  filename=f2,
                                  elevation_file=f1,
                                  allow_subset=True)

        geo_ts_repository = GeoTsRepositoryCollection([ar1, ar2])

        simulator = DefaultSimulator(region_id, interpolation_id,
                                     region_model_repository,
                                     geo_ts_repository, interp_repos, None)
        n_cells = simulator.region_model.size()
        state_repos = DefaultStateRepository(model_t, n_cells)
        simulator.run(time_axis, state_repos.get_state(0))
示例#23
0
    def test_run_arome_ensemble(self):
        # Simulation time axis
        utc = api.Calendar()  # No offset gives Utc
        t0 = utc.time(2015, 7, 26, 0)
        n_hours = 30
        dt = api.deltahours(1)
        time_axis = api.TimeAxisFixedDeltaT(t0, dt, n_hours)

        # Some dummy ids not needed for the netcdf based repositories
        region_id = 0
        interpolation_id = 0

        # Simulation coordinate system
        epsg = "32633"

        # Model
        model_t = pt_gs_k.PTGSKOptModel

        # Configs and repositories
        region_config = RegionConfig(self.region_config_file)
        model_config = ModelConfig(self.model_config_file)
        region_model_repository = RegionModelRepository(
            region_config, model_config, model_t, epsg)
        interp_repos = InterpolationParameterRepository(model_config)
        base_dir = path.join(shyftdata_dir, "netcdf", "arome")
        pattern = "fc*.nc"
        try:
            geo_ts_repository = AromeDataRepository(epsg,
                                                    base_dir,
                                                    filename=pattern,
                                                    allow_subset=True)
        except Exception as e:
            print("**** test_run_arome_ensemble: Arome data missing or"
                  " wrong, test inconclusive ****")
            print("****{}****".format(e))
            self.skipTest(
                "**** test_run_arome_ensemble: Arome data missing or wrong, test "
                "inconclusive ****\n\t exception:{}".format(e))
        simulator = DefaultSimulator(region_id, interpolation_id,
                                     region_model_repository,
                                     geo_ts_repository, interp_repos, None)
        n_cells = simulator.region_model.size()
        state_repos = DefaultStateRepository(model_t, n_cells)
        simulators = simulator.create_ensembles(time_axis, t0,
                                                state_repos.get_state(0))
        for s in simulators:
            s.simulate()
示例#24
0
    def test_idw_radiation_transform_from_set_to_grid(self):
        """
        Test IDW interpolation transforms wind_speed time-series according to time-axis and range.

        """
        idw_p = api.IDWParameter()
        self.assertEqual(idw_p.max_distance, 200000)
        self.assertEqual(idw_p.max_members, 10)
        fx = lambda z : [15 for x in range(self.n)]
        arome_grid = self._create_geo_radiation_grid(self.nx, self.ny, self.dx_arome, fx)
        dest_grid_points = self._create_geo_point_grid(self.mnx, self.mny, self.dx_model)
        ta = api.TimeAxisFixedDeltaT(self.t, self.d * 3, int(self.n / 3))
        radiation_slope_factors = api.DoubleVector()
        radiation_slope_factors[:] = [ 0.9 for i in range(len(dest_grid_points))]
        dest_grid = api.idw_radiation(arome_grid, dest_grid_points, ta, idw_p, radiation_slope_factors)
        self.assertIsNotNone(dest_grid)
        self.assertEqual(len(dest_grid), self.mnx * self.mny)
示例#25
0
 def get_destination_repo(self):
     if not hasattr(self.datasets_config, 'destinations'):
         return []
     dst_repo = [{
         'repository': repo['repository'](**repo['params']),
         '1D_timeseries': [dst for dst in repo['1D_timeseries']]
     } for repo in self.datasets_config.destinations]
     [
         dst.update({'time_axis': self.time_axis})
         if dst['time_axis'] is None else dst.update({
             'time_axis':
             api.TimeAxisFixedDeltaT(
                 utctime_from_datetime(dst['time_axis']['start_datetime']),
                 dst['time_axis']['time_step_length'],
                 dst['time_axis']['number_of_steps'])
         }) for repo in dst_repo for dst in repo['1D_timeseries']
     ]
     return dst_repo
示例#26
0
    def test_timeseries_vector(self):
        c=api.Calendar()
        t0=api.utctime_now()
        dt=api.deltahours(1)
        n=240
        ta=api.TimeAxisFixedDeltaT(t0, dt, n)

        a=api.TimeSeries(ta=ta, fill_value=3.0, point_fx=api.point_interpretation_policy.POINT_AVERAGE_VALUE)
        b=api.TimeSeries(ta=ta, fill_value=2.0, point_fx=api.point_interpretation_policy.POINT_AVERAGE_VALUE)

        v=api.TsVector()
        v.append(a)
        v.append(b)

        self.assertEqual(len(v), 2)
        self.assertAlmostEqual(v[0].value(0), 3.0, "expect first ts to be 3.0")
        aa=api.TimeSeries(ta=a.time_axis, values=a.values,
                          point_fx=api.point_interpretation_policy.POINT_AVERAGE_VALUE)  # copy construct (really copy the values!)
        a.fill(1.0)
        self.assertAlmostEqual(v[0].value(0), 1.0, "expect first ts to be 1.0, because the vector keeps a reference ")
        self.assertAlmostEqual(aa.value(0), 3.0)
示例#27
0
    def test_ts_transform(self):
        dv=np.arange(self.ta.size())
        v=api.DoubleVector.from_numpy(dv)
        t=api.UtcTimeVector();
        for i in range(self.ta.size()):
            t.push_back(self.ta(i).start)
        # t.push_back(self.ta(self.ta.size()-1).end) #important! needs n+1 points to determine n periods in the timeaxis
        t_start=self.ta.total_period().start
        dt=api.deltahours(1)
        tax=api.TimeAxisFixedDeltaT(t_start + api.deltaminutes(30), dt, self.ta.size())
        tsf=api.TsFactory()
        ts1=tsf.create_point_ts(self.ta.size(), self.t, self.d, v)
        ts2=tsf.create_time_point_ts(self.ta.total_period(), t, v)
        ts3=api.TsFixed(tax, v, api.POINT_INSTANT_VALUE)

        tst=api.TsTransform()
        tt1=tst.to_average(t_start, dt, tax.size(), ts1)
        tt2=tst.to_average(t_start, dt, tax.size(), ts2)
        tt3=tst.to_average(t_start, dt, tax.size(), ts3)
        self.assertEqual(tt1.size(), tax.size())
        self.assertEqual(tt2.size(), tax.size())
        self.assertEqual(tt3.size(), tax.size())
示例#28
0
    def test_compute_lwc_percentiles(self):
        # Simulation time axis
        year, month, day, hour = 2013, 9, 1, 0
        dt = api.deltahours(24)
        n_steps = 364
        utc = api.Calendar()  # No offset gives Utc
        t0 = utc.time(year, month, day, hour)
        time_axis = api.TimeAxisFixedDeltaT(t0, dt, n_steps)

        # Some fake ids
        region_id = 0
        interpolation_id = 0

        # Model
        region_model_repository = CFRegionModelRepository(
            self.region_config, self.model_config)
        interp_repos = InterpolationParameterRepository(
            self.interpolation_config)
        netcdf_geo_ts_repos = [
            CFDataRepository(32633,
                             source["params"]["filename"],
                             padding=source["params"]['padding'])
            for source in self.dataset_config.sources
        ]
        geo_ts_repository = GeoTsRepositoryCollection(netcdf_geo_ts_repos)

        # Construct target discharge series
        simulator = DefaultSimulator(region_id, interpolation_id,
                                     region_model_repository,
                                     geo_ts_repository, interp_repos, None)
        state_repos = DefaultStateRepository(simulator.region_model)
        cid = 1228
        simulator.region_model.set_state_collection(cid, True)
        simulator.run(time_axis=time_axis, state=state_repos.get_state(0))
        # TODO: Update the regression test below with correct result
        # self.assertAlmostEqual(simulator.region_model.cells[0].rc.pe_output.values[0], 0.039768354, 5)  # just to verify pot.evap by regression, mm/h

        percentile_list = [10, 25, 50, 75, 90]
示例#29
0
    def test_ts_extend(self):
        t0=api.utctime_now()
        dt=api.deltahours(1)
        n=512
        ta_a=api.TimeAxisFixedDeltaT(t0, dt, 2*n)
        ta_b=api.TimeAxisFixedDeltaT(t0 + n*dt, dt, 2*n)
        ta_c=api.TimeAxisFixedDeltaT(t0 + 2*n*dt, dt, 2*n)
        ta_d=api.TimeAxisFixedDeltaT(t0 + 3*n*dt, dt, 2*n)

        a=api.TimeSeries(ta=ta_a, fill_value=1.0, point_fx=api.point_interpretation_policy.POINT_AVERAGE_VALUE)
        b=api.TimeSeries(ta=ta_b, fill_value=2.0, point_fx=api.point_interpretation_policy.POINT_AVERAGE_VALUE)
        c=api.TimeSeries(ta=ta_c, fill_value=4.0, point_fx=api.point_interpretation_policy.POINT_AVERAGE_VALUE)
        d=api.TimeSeries(ta=ta_d, fill_value=8.0, point_fx=api.point_interpretation_policy.POINT_AVERAGE_VALUE)

        # default behavior: extend from end of a
        ac=a.extend(c)

        for i in range(2*n):  # valus from first ts
            self.assertEqual(ac(t0 + i*dt), 1.0)
        for i in range(2*n):  # values from extension ts
            self.assertEqual(ac(t0 + (i + 2*n)*dt), 4.0)

        # default behavior: extend from end of a, fill gap with nan
        ad=a.extend(d)

        for i in range(2*n):  # values from first
            self.assertEqual(ad(t0 + i*dt), 1.0)
        for i in range(n):  # gap
            self.assertTrue(math.isnan(ad(t0 + (i + 2*n)*dt)))
        for i in range(2*n):  # extension
            self.assertEqual(ad(t0 + (i + 3*n)*dt), 8.0)

        # split at the first value of d instead of last of c
        cd=c.extend(d, split_policy=api.extend_split_policy.RHS_FIRST)

        for i in range(n):  # first, only until the extension start
            self.assertEqual(cd(t0 + (2*n + i)*dt), 4.0)
        for i in range(2*n):  # extension
            self.assertEqual(cd(t0 + (3*n + i)*dt), 8.0)

        # split at a given time step, and extend the last value through the gap
        ac=a.extend(c, split_policy=api.extend_split_policy.AT_VALUE, split_at=(t0 + dt*n//2),
                    fill_policy=api.extend_fill_policy.USE_LAST)

        for i in range(n//2):  # first, only until the given split value
            self.assertEqual(ac(t0 + i*dt), 1.0)
        for i in range(3*n//2):  # gap, uses last value before gap
            self.assertEqual(ac(t0 + (n//2 + i)*dt), 1.0)
        for i in range(2*n):  # extension
            self.assertEqual(ac(t0 + (2*n + i)*dt), 4.0)

        # split at the beginning of the ts to extend when the extension start before it
        cb=c.extend(b, split_policy=api.extend_split_policy.AT_VALUE, split_at=(t0 + 2*n*dt))

        for i in range(n):  # don't extend before
            self.assertTrue(math.isnan(cb(t0 + (n + i)*dt)))
        for i in range(n):  # we split at the beginning => only values from extension
            self.assertEqual(cb(t0 + (2*n + i)*dt), 2.0)
        for i in range(n):  # no values after extension
            self.assertTrue(math.isnan(cb(t0 + (3*n + i)*dt)))

        # extend with ts starting after the end, fill the gap with a given value
        ad=a.extend(d, fill_policy=api.extend_fill_policy.FILL_VALUE, fill_value=5.5)

        for i in range(2*n):  # first
            self.assertEqual(ad(t0 + i*dt), 1.0)
        for i in range(n):  # gap, filled with 5.5
            self.assertEqual(ad(t0 + (2*n + i)*dt), 5.5)
        for i in range(2*n):  # extension
            self.assertEqual(ad(t0 + (3*n + i)*dt), 8.0)

        # check extend with more exotic combination of time-axis(we had an issue with this..)
        a=api.TimeSeries(api.TimeAxis(0, 1, 10), fill_value=1.0, point_fx=api.POINT_AVERAGE_VALUE)
        b=api.TimeSeries(api.TimeAxis(api.Calendar(), 0, 1, 20), fill_value=2.0, point_fx=api.POINT_AVERAGE_VALUE)
        ab=a.extend(b)
        ba=b.extend(a, split_policy=api.extend_split_policy.AT_VALUE, split_at=a.time_axis.time(5))
        self.assertAlmostEqual(ab.value(0), 1.0)
        self.assertAlmostEqual(ab.value(11), 2.0)
        self.assertAlmostEqual(ba.value(0), 2.0)
        self.assertAlmostEqual(ab.value(7), 1.0)
示例#30
0
    def test_a_time_series_vector(self):
        c=api.Calendar()
        t0=api.utctime_now()
        dt=api.deltahours(1)
        n=240
        ta=api.TimeAxisFixedDeltaT(t0, dt, n)

        a=api.TimeSeries(ta=ta, fill_value=3.0, point_fx=api.point_interpretation_policy.POINT_AVERAGE_VALUE)
        b=api.TimeSeries(ta=ta, fill_value=2.0, point_fx=api.point_interpretation_policy.POINT_AVERAGE_VALUE)
        c=api.TimeSeries(ta=ta, fill_value=10.0, point_fx=api.point_interpretation_policy.POINT_AVERAGE_VALUE)
        v=api.TsVector()
        v.append(a)
        v.append(b)

        self.assertEqual(len(v), 2)
        self.assertAlmostEqual(v[0].value(0), 3.0, "expect first ts to be 3.0")
        aa=api.TimeSeries(ta=a.time_axis, values=a.values,
                          point_fx=api.point_interpretation_policy.POINT_AVERAGE_VALUE)  # copy construct (really copy the values!)
        a.fill(1.0)
        self.assertAlmostEqual(v[0].value(0), 1.0, "expect first ts to be 1.0, because the vector keeps a reference ")
        self.assertAlmostEqual(aa.value(0), 3.0)

        vt=v.values_at(t0).to_numpy()
        self.assertEqual(len(vt), len(v))
        v1=v[0:1]
        self.assertEqual(len(v1), 1)
        self.assertAlmostEqual(v1[0].value(0), 1.0)
        v_clone=api.TsVector(v)
        self.assertEqual(len(v_clone), len(v))
        del v_clone[-1]
        self.assertEqual(len(v_clone), 1)
        self.assertEqual(len(v), 2)
        v_slice_all=v.slice(api.IntVector())
        v_slice_1=v.slice(api.IntVector([1]))
        v_slice_12=v.slice(api.IntVector([0, 1]))
        self.assertEqual(len(v_slice_all), 2)
        self.assertEqual(len(v_slice_1), 1)
        self.assertAlmostEqual(v_slice_1[0].value(0), 2.0)
        self.assertEqual(len(v_slice_12), 2)
        self.assertAlmostEqual(v_slice_12[0].value(0), 1.0)

        # multiplication by scalar
        v_x_2a=v*2.0
        v_x_2b=2.0*v
        for i in range(len(v)):
            self.assertAlmostEqual(v_x_2a[i].value(0), 2*v[i].value(0))
            self.assertAlmostEqual(v_x_2b[i].value(0), 2*v[i].value(0))

        # division by scalar
        v_d_a=v/3.0
        v_d_b=3.0/v
        for i in range(len(v)):
            self.assertAlmostEqual(v_d_a[i].value(0), v[i].value(0)/3.0)
            self.assertAlmostEqual(v_d_b[i].value(0), 3.0/v[i].value(0))

        # addition by scalar
        v_a_a=v + 3.0
        v_a_b=3.0 + v
        for i in range(len(v)):
            self.assertAlmostEqual(v_a_a[i].value(0), v[i].value(0) + 3.0)
            self.assertAlmostEqual(v_a_b[i].value(0), 3.0 + v[i].value(0))

        # sub by scalar
        v_s_a=v - 3.0
        v_s_b=3.0 - v
        for i in range(len(v)):
            self.assertAlmostEqual(v_s_a[i].value(0), v[i].value(0) - 3.0)
            self.assertAlmostEqual(v_s_b[i].value(0), 3.0 - v[i].value(0))

        # multiplication vector by ts
        v_x_ts=v*c
        ts_x_v=c*v
        for i in range(len(v)):
            self.assertAlmostEqual(v_x_ts[i].value(0), v[i].value(0)*c.value(0))
            self.assertAlmostEqual(ts_x_v[i].value(0), c.value(0)*v[i].value(0))

        # division vector by ts
        v_d_ts=v/c
        ts_d_v=c/v
        for i in range(len(v)):
            self.assertAlmostEqual(v_d_ts[i].value(0), v[i].value(0)/c.value(0))
            self.assertAlmostEqual(ts_d_v[i].value(0), c.value(0)/v[i].value(0))

        # add vector by ts
        v_a_ts=v + c
        ts_a_v=c + v
        for i in range(len(v)):
            self.assertAlmostEqual(v_a_ts[i].value(0), v[i].value(0) + c.value(0))
            self.assertAlmostEqual(ts_a_v[i].value(0), c.value(0) + v[i].value(0))

        # sub vector by ts
        v_s_ts=v - c
        ts_s_v=c - v
        for i in range(len(v)):
            self.assertAlmostEqual(v_s_ts[i].value(0), v[i].value(0) - c.value(0))
            self.assertAlmostEqual(ts_s_v[i].value(0), c.value(0) - v[i].value(0))

        # vector mult vector
        va=v
        vb=2.0*v

        v_m_v=va*vb
        self.assertEqual(len(v_m_v), len(va))
        for i in range(len(va)):
            self.assertAlmostEqual(v_m_v[i].value(0), va[i].value(0)*vb[i].value(0))

        # vector div vector
        v_d_v=va/vb
        self.assertEqual(len(v_d_v), len(va))
        for i in range(len(va)):
            self.assertAlmostEqual(v_d_v[i].value(0), va[i].value(0)/vb[i].value(0))

        # vector add vector
        v_a_v=va + vb
        self.assertEqual(len(v_a_v), len(va))
        for i in range(len(va)):
            self.assertAlmostEqual(v_a_v[i].value(0), va[i].value(0) + vb[i].value(0))

        # vector sub vector
        v_s_v=va - vb
        self.assertEqual(len(v_s_v), len(va))
        for i in range(len(va)):
            self.assertAlmostEqual(v_s_v[i].value(0), va[i].value(0) - vb[i].value(0))

        # vector unary minus
        v_u=- va
        self.assertEqual(len(v_u), len(va))
        for i in range(len(va)):
            self.assertAlmostEqual(v_u[i].value(0), -va[i].value(0))

        # integral functions, just to verify exposure works, and one value is according to spec.
        ta2=api.TimeAxis(t0, dt*24, n//24)
        v_avg=v.average(ta2)
        v_int=v.integral(ta2)
        v_acc=v.accumulate(ta2)
        v_sft=v.time_shift(dt*24)
        self.assertIsNotNone(v_avg)
        self.assertIsNotNone(v_int)
        self.assertIsNotNone(v_acc)
        self.assertIsNotNone(v_sft)
        self.assertAlmostEqual(v_avg[0].value(0), 1.0)
        self.assertAlmostEqual(v_int[0].value(0), 86400.0)
        self.assertAlmostEqual(v_acc[0].value(0), 0.0)
        self.assertAlmostEqual(v_sft[0].time(0), t0 + dt*24)

        # min/max functions
        min_v_double=va.min(-1000.0)
        max_v_double=va.max(1000.0)
        self.assertAlmostEqual(min_v_double[0].value(0), -1000.0)
        self.assertAlmostEqual(max_v_double[0].value(0), +1000.0)
        min_v_double=api.min(va, -1000.0)
        max_v_double=api.max(va, +1000.0)
        self.assertAlmostEqual(min_v_double[0].value(0), -1000.0)
        self.assertAlmostEqual(max_v_double[0].value(0), +1000.0)
        # c = 10.0
        c1000=100.0*c
        min_v_double=va.min(-c1000)
        max_v_double=va.max(c1000)
        self.assertAlmostEqual(min_v_double[0].value(0), -c1000.value(0))
        self.assertAlmostEqual(max_v_double[0].value(0), c1000.value(0))
        min_v_double=api.min(va, -c1000)
        max_v_double=api.max(va, c1000)
        self.assertAlmostEqual(min_v_double[0].value(0), -c1000.value(0))
        self.assertAlmostEqual(max_v_double[0].value(0), c1000.value(0))

        v1000=va*1000.0
        min_v_double=va.min(-v1000)
        max_v_double=va.max(v1000)
        self.assertAlmostEqual(min_v_double[0].value(0), -v1000[0].value(0))
        self.assertAlmostEqual(max_v_double[0].value(0), v1000[0].value(0))
        min_v_double=api.min(va, -v1000)
        max_v_double=api.max(va, v1000)
        self.assertAlmostEqual(min_v_double[0].value(0), -v1000[0].value(0))
        self.assertAlmostEqual(max_v_double[0].value(0), v1000[0].value(0))

        # finally, test that exception is raised if we try to multiply two unequal sized vectors

        try:
            x=v_clone*va
            self.assertTrue(False, 'We expected exception for unequal sized ts-vector op')
        except RuntimeError as re:
            pass

        # also test that empty vector + vector -> vector etc.
        va_2=va + api.TsVector()
        va_3=api.TsVector() + va
        va_4=va - api.TsVector()
        va_5=api.TsVector() - va
        va_x=api.TsVector() + api.TsVector()
        self.assertEqual(len(va_2), len(va))
        self.assertEqual(len(va_3), len(va))
        self.assertEqual(len(va_4), len(va))
        self.assertEqual(len(va_5), len(va))
        self.assertEqual(not va_x, True)
        self.assertEqual(not va_2, False)
        va_2_ok=False
        va_x_ok=True
        if va_2:
            va_2_ok=True
        if va_x:
            va_x_ok=False
        self.assertTrue(va_2_ok)
        self.assertTrue(va_x_ok)