Exemplo n.º 1
0
    def test_extend_vector_of_timeseries(self):
        t0=api.utctime_now()
        dt=api.deltahours(1)
        n=512

        tsvector=api.TsVector()

        ta=api.TimeAxisFixedDeltaT(t0 + 3*n*dt, dt, 2*n)

        tsvector.push_back(api.TimeSeries(
            ta=api.TimeAxisFixedDeltaT(t0, dt, 2*n),
            fill_value=1.0, point_fx=api.point_interpretation_policy.POINT_AVERAGE_VALUE))
        tsvector.push_back(api.TimeSeries(
            ta=api.TimeAxisFixedDeltaT(t0 + 2*n*dt, dt, 2*n),
            fill_value=2.0, point_fx=api.point_interpretation_policy.POINT_AVERAGE_VALUE))

        extension=api.TimeSeries(ta=ta, fill_value=8.0, point_fx=api.point_interpretation_policy.POINT_AVERAGE_VALUE)

        # extend after all time-series in the vector
        extended_tsvector=tsvector.extend_ts(extension)

        # assert first element
        for i in range(2*n):
            self.assertEqual(extended_tsvector[0](t0 + i*dt), 1.0)
        for i in range(n):
            self.assertTrue(math.isnan(extended_tsvector[0](t0 + (2*n + i)*dt)))
        for i in range(2*n):
            self.assertEqual(extended_tsvector[0](t0 + (3*n + i)*dt), 8.0)

        # assert second element
        for i in range(2*n):
            self.assertEqual(extended_tsvector[1](t0 + (2*n + i)*dt), 2.0)
        for i in range(n):
            self.assertEqual(extended_tsvector[1](t0 + (4*n + i)*dt), 8.0)

        tsvector_2=api.TsVector()
        tsvector_2.push_back(api.TimeSeries(
            ta=api.TimeAxisFixedDeltaT(t0 + 2*n*dt, dt, 4*n),
            fill_value=10.0, point_fx=api.point_interpretation_policy.POINT_AVERAGE_VALUE))
        tsvector_2.push_back(api.TimeSeries(
            ta=api.TimeAxisFixedDeltaT(t0 + 4*n*dt, dt, 4*n),
            fill_value=20.0, point_fx=api.point_interpretation_policy.POINT_AVERAGE_VALUE))

        # extend each element in tsvector by the corresponding element in tsvector_2
        extended_tsvector=tsvector.extend_ts(tsvector_2)

        # assert first element
        for i in range(2*n):
            self.assertEqual(extended_tsvector[0](t0 + i*dt), 1.0)
        for i in range(4*n):
            self.assertEqual(extended_tsvector[0](t0 + (2*n + i)*dt), 10.0)

        # assert second element
        for i in range(2*n):
            self.assertEqual(extended_tsvector[1](t0 + (2*n + i)*dt), 2.0)
        for i in range(4*n):
            self.assertEqual(extended_tsvector[1](t0 + (4*n + i)*dt), 20.0)
Exemplo n.º 2
0
 def test_abs(self):
     c=api.Calendar()
     t0=c.time(2016, 1, 1)
     dt=api.deltahours(1)
     n=4
     v=api.DoubleVector([1.0, -1.5, float("nan"), 3.0])
     ta=api.TimeAxisFixedDeltaT(t0, dt, n)
     ts0=api.TimeSeries(ta=ta, values=v, point_fx=api.point_interpretation_policy.POINT_AVERAGE_VALUE)
     tsa=api.TimeSeries('a')
     ts1=tsa.abs()
     ts1_blob=ts1.serialize()
     ts1=api.TimeSeries.deserialize(ts1_blob)
     self.assertTrue(ts1.needs_bind())
     bts=ts1.find_ts_bind_info()
     self.assertEqual(len(bts), 1)
     bts[0].ts.bind(ts0)
     ts1.bind_done()
     self.assertFalse(ts1.needs_bind())
     self.assertAlmostEqual(ts0.value(0), ts1.value(0), 6)
     self.assertAlmostEqual(abs(ts0.value(1)), ts1.value(1), 6)
     self.assertTrue(math.isnan(ts1.value(2)))
     self.assertAlmostEqual(ts0.value(3), ts1.value(3), 6)
     tsv0=api.TsVector()
     tsv0.append(ts0)
     tsv1=tsv0.abs()
     self.assertAlmostEqual(tsv0[0].value(0), tsv1[0].value(0), 6)
     self.assertAlmostEqual(abs(tsv0[0].value(1)), tsv1[0].value(1), 6)
     self.assertTrue(math.isnan(tsv1[0].value(2)))
     self.assertAlmostEqual(tsv0[0].value(3), tsv1[0].value(3), 6)
Exemplo n.º 3
0
    def test_percentiles(self):
        c=api.Calendar()
        t0=c.time(2016, 1, 1)
        dt=api.deltahours(1)
        n=240
        ta=api.TimeAxisFixedDeltaT(t0, dt, n)
        timeseries=api.TsVector()

        for i in range(10):
            timeseries.append(
                api.TimeSeries(ta=ta, fill_value=i, point_fx=api.point_interpretation_policy.POINT_AVERAGE_VALUE))

        wanted_percentiles=api.IntVector([api.statistics_property.MIN_EXTREME,
                                          0, 10, 50,
                                          api.statistics_property.AVERAGE,
                                          70, 100,
                                          api.statistics_property.MAX_EXTREME])
        ta_day=api.TimeAxisFixedDeltaT(t0, dt*24, n//24)
        ta_day2=api.TimeAxis(t0, dt*24, n//24)
        percentiles=api.percentiles(timeseries, ta_day, wanted_percentiles)
        percentiles2=timeseries.percentiles(ta_day2, wanted_percentiles)  # just to verify it works with alt. syntax

        self.assertEqual(len(percentiles2), len(percentiles))

        for i in range(len(ta_day)):
            self.assertAlmostEqual(0.0, percentiles[0].value(i), 3, "min-extreme ")
            self.assertAlmostEqual(0.0, percentiles[1].value(i), 3, "  0-percentile")
            self.assertAlmostEqual(0.9, percentiles[2].value(i), 3, " 10-percentile")
            self.assertAlmostEqual(4.5, percentiles[3].value(i), 3, " 50-percentile")
            self.assertAlmostEqual(4.5, percentiles[4].value(i), 3, "   -average")
            self.assertAlmostEqual(6.3, percentiles[5].value(i), 3, " 70-percentile")
            self.assertAlmostEqual(9.0, percentiles[6].value(i), 3, "100-percentile")
            self.assertAlmostEqual(9.0, percentiles[7].value(i), 3, "max-extreme")
Exemplo n.º 4
0
    def _create_forecast_set(self, n_fc, t0, dt, n_steps, dt_fc, fx):
        """

        Parameters
        ----------
        n_fc : int number of forecasts, e.g. 8
        t0 : utctime start of first forecast
        dt : utctimespan delta t for forecast-ts
        n_steps : number of steps in one forecast-ts
        dt_fc : utctimespan delta t between each forecast, like deltahours(6)
        fx : lambda time_axis:  a function returning a DoubleVector with values for the supplied time-axis

        Returns
        -------
        api.TsVector()

        """
        fc_set = api.TsVector()
        for i in range(n_fc):
            ta = api.Timeaxis2(t0 + i * dt_fc, dt, n_steps)
            ts = api.Timeseries(
                ta=ta,
                values=fx(ta),
                point_fx=api.point_interpretation_policy.POINT_AVERAGE_VALUE)
            fc_set.append(ts)
        return fc_set
Exemplo n.º 5
0
    def _call_qm(self, prep_fcst_lst, weights, geo_points, ta,
                 input_source_types, nb_prior_scenarios):

        # Check interpolation period is within time axis (ta)
        ta_start = ta.time(0)
        ta_end = ta.time(ta.size() - 1)  # start of last time step
        interp_start = ta_start + api.deltahours(self.qm_interp_hours[0])
        interp_end = ta_start + api.deltahours(self.qm_interp_hours[1])
        if interp_start > ta_end:
            interp_start = api.no_utctime
            interp_end = api.no_utctime
        if interp_end > ta_end:
            interp_end = ta_end

        # Re-organize data before sending to api.quantile_map_forecast. For each source type and geo_point, group
        # forecasts as TsVectorSets, send to api.quantile_map_forecast and return results as ensemble of source-keyed
        # dictionaries of  geo-ts
        # First re-organize weights - one weight per TVS.
        weight_sets = api.DoubleVector([w for ws in weights for w in ws])

        # New version
        results = [{} for i in range(nb_prior_scenarios)]
        for src in input_source_types:
            qm_scenarios = []
            for geo_pt_idx, geo_pt in enumerate(geo_points):
                forecast_sets = api.TsVectorSet()
                for i, fcst_group in enumerate(prep_fcst_lst):
                    for j, forecast in enumerate(fcst_group):
                        scenarios = api.TsVector()
                        for member in forecast:
                            scenarios.append(member[src][geo_pt_idx].ts)
                        forecast_sets.append(scenarios)
                        if i == self.repo_prior_idx and j == 0:
                            prior_data = scenarios
                            # TODO: read prior if repo_prior_idx is None

                qm_scenarios.append(
                    api.quantile_map_forecast(forecast_sets, weight_sets,
                                              prior_data, ta, interp_start,
                                              interp_end, True))

            # Alternative: convert to array to enable slicing
            # arr = np.array(qm_scenarios)

            # Now organize to desired output format: ensemble of source-keyed dictionaries of  geo-ts
            for i in range(0, nb_prior_scenarios):
                # source_dict = {}
                # ts_vct = arr[:, i]
                ts_vct = [x[i] for x in qm_scenarios]
                vct = self.source_vector_map[src]()
                [
                    vct.append(self.source_type_map[src](geo_pt, ts))
                    for geo_pt, ts in zip(geo_points, ts_vct)
                ]
                # Alternatives:
                # vct[:] = [self.source_type_map[src](geo_pt, ts) for geo_pt, ts in zip(geo_points, ts_vct)]
                # vct = self.source_vector_map[src]([self.source_type_map[src](geo_pt, ts) for geo_pt, ts in zip(geo_points, ts_vct)])
                results[i][src] = vct
        return results
Exemplo n.º 6
0
 def test_vector_of_timeseries(self):
     dv = np.arange(self.ta.size())
     v = api.DoubleVector.from_numpy(dv)
     tsf = api.TsFactory();
     tsa = tsf.create_point_ts(self.n, self.t, self.d, v)
     tsvector = api.TsVector()
     self.assertEqual(len(tsvector), 0)
     tsvector.push_back(tsa)
     self.assertEqual(len(tsvector), 1)
Exemplo n.º 7
0
 def test_ts_factory(self):
     dv=np.arange(self.ta.size())
     v=api.DoubleVector.from_numpy(dv)
     t=api.UtcTimeVector();
     for i in range(self.ta.size()):
         t.push_back(self.ta(i).start)
     t.push_back(self.ta(self.ta.size() - 1).end)
     tsf=api.TsFactory()
     ts1=tsf.create_point_ts(self.ta.size(), self.t, self.d, v)
     ts2=tsf.create_time_point_ts(self.ta.total_period(), t, v)
     tslist=api.TsVector()
     tslist.push_back(ts1)
     tslist.push_back(ts2)
     self.assertEqual(tslist.size(), 2)
Exemplo n.º 8
0
    def test_percentiles_with_min_max_extremes(self):
        """ the percentiles function now also supports picking out the min-max peak value
            within each interval.
            Setup test-data so that we have a well known percentile result,
            but also have peak-values within the interval that we can
            verify.
            We let hour ts 0..9 have values 0..9 constant 24*10 days
               then modify ts[1], every day first  value to a peak min value equal to - day_no*1
                                  every day second value to a peak max value equal to + day_no*1
                                  every day 3rd    value to a nan value
            ts[1] should then have same average value for each day (so same percentile)
                                            but min-max extreme should be equal to +- day_no*1
        """
        c=api.Calendar()
        t0=c.time(2016, 1, 1)
        dt=api.deltahours(1)
        n=240
        ta=api.TimeAxis(t0, dt, n)
        timeseries=api.TsVector()
        p_fx=api.point_interpretation_policy.POINT_AVERAGE_VALUE
        for i in range(10):
            timeseries.append(api.TimeSeries(ta=ta, fill_value=i, point_fx=p_fx))

        ts=timeseries[1]  # pick this one to insert min/max extremes
        for i in range(0, 240, 24):
            ts.set(i + 0, 1.0 - 100*i/24.0)
            ts.set(i + 1, 1.0 + 100*i/24.0)  # notice that when i==0, this gives 1.0
            ts.set(i + 2, float('nan'))  # also put in a nan, just to verify it is ignored during average processing

        wanted_percentiles=api.IntVector([api.statistics_property.MIN_EXTREME,
                                          0, 10, 50,
                                          api.statistics_property.AVERAGE,
                                          70, 100,
                                          api.statistics_property.MAX_EXTREME])
        ta_day=api.TimeAxis(t0, dt*24, n//24)
        percentiles=api.percentiles(timeseries, ta_day, wanted_percentiles)
        for i in range(len(ta_day)):
            if i == 0:  # first timestep, the min/max extremes are picked from 0'th and 9'th ts.
                self.assertAlmostEqual(0.0, percentiles[0].value(i), 3, "min-extreme ")
                self.assertAlmostEqual(9.0, percentiles[7].value(i), 3, "min-extreme ")
            else:
                self.assertAlmostEqual(1.0 - 100.0*i*24.0/24.0, percentiles[0].value(i), 3, "min-extreme ")
                self.assertAlmostEqual(1.0 + 100.0*i*24.0/24.0, percentiles[7].value(i), 3, "max-extreme")
            self.assertAlmostEqual(0.0, percentiles[1].value(i), 3, "  0-percentile")
            self.assertAlmostEqual(0.9, percentiles[2].value(i), 3, " 10-percentile")
            self.assertAlmostEqual(4.5, percentiles[3].value(i), 3, " 50-percentile")
            self.assertAlmostEqual(4.5, percentiles[4].value(i), 3, "   -average")
            self.assertAlmostEqual(6.3, percentiles[5].value(i), 3, " 70-percentile")
            self.assertAlmostEqual(9.0, percentiles[6].value(i), 3, "100-percentile")
Exemplo n.º 9
0
 def test_vector_of_timeseries(self):
     dv=np.arange(self.ta.size())
     v=api.DoubleVector.from_numpy(dv)
     tsf=api.TsFactory()
     tsa=tsf.create_point_ts(self.n, self.t, self.d, v)
     tsvector=api.TsVector()
     self.assertEqual(len(tsvector), 0)
     tsvector.push_back(tsa)
     self.assertEqual(len(tsvector), 1)
     tsvector.push_back(tsa)
     vv=tsvector.values_at_time(self.ta.time(3))  # verify it's easy to get out vectorized results at time t
     self.assertEqual(len(vv), len(tsvector))
     self.assertAlmostEqual(vv[0], 3.0)
     self.assertAlmostEqual(vv[1], 3.0)
     ts_list=[tsa, tsa]
     vv=api.ts_vector_values_at_time(ts_list, self.ta.time(4))  # also check it work with list(TimeSeries)
     self.assertEqual(len(vv), len(tsvector))
     self.assertAlmostEqual(vv[0], 4.0)
     self.assertAlmostEqual(vv[1], 4.0)
Exemplo n.º 10
0
    def test_timeseries_vector(self):
        c=api.Calendar()
        t0=api.utctime_now()
        dt=api.deltahours(1)
        n=240
        ta=api.TimeAxisFixedDeltaT(t0, dt, n)

        a=api.TimeSeries(ta=ta, fill_value=3.0, point_fx=api.point_interpretation_policy.POINT_AVERAGE_VALUE)
        b=api.TimeSeries(ta=ta, fill_value=2.0, point_fx=api.point_interpretation_policy.POINT_AVERAGE_VALUE)

        v=api.TsVector()
        v.append(a)
        v.append(b)

        self.assertEqual(len(v), 2)
        self.assertAlmostEqual(v[0].value(0), 3.0, "expect first ts to be 3.0")
        aa=api.TimeSeries(ta=a.time_axis, values=a.values,
                          point_fx=api.point_interpretation_policy.POINT_AVERAGE_VALUE)  # copy construct (really copy the values!)
        a.fill(1.0)
        self.assertAlmostEqual(v[0].value(0), 1.0, "expect first ts to be 1.0, because the vector keeps a reference ")
        self.assertAlmostEqual(aa.value(0), 3.0)
Exemplo n.º 11
0
from statkraft.ltm.state import quantity
from statkraft.ltm.scripting import plot_ts
import matplotlib.pyplot as plt

rr = RunRepository()
t0 = sa.utctime_now()
run_id = rr.find_closest_operational_run(t0)

run = rr.recreate(run_id=run_id)
calendar = sa.Calendar("Europe/Oslo")
dt = 3 * calendar.HOUR
n = calendar.diff_units(run.time_axis.total_period().start,
                        run.time_axis.total_period().end, dt)
time_axis = sa.TimeAxis(run.start_utc, dt, n)
norway = run.model.countries['Norway']
tsv = quantity(sa.TsVector(), "EUR")

legend_list = []

#val_in_eur = quantity(sa.TsVector(), "EUR")
values = None

for m_area in norway.aggregation_list:
    price = m_area.power_price(time_axis=time_axis, unit="EUR/MWh")
    production = m_area.production(time_axis=time_axis, unit="MWh")
    val_in_eur = price * production
    if values is None:
        values = val_in_eur
    else:
        values += val_in_eur
Exemplo n.º 12
0
    def _call_qm_old(self, prep_fcst_lst, weights, geo_points, ta,
                     input_source_types, nb_prior_scenarios):

        # TODO: Extend handling to cover all cases and send out warnings if interpolation period is modified
        # Check ta against interpolation start and end times
        # Simple logic for time being, should be refined for the overlap cases
        ta_start = ta.time(0)
        ta_end = ta.time(ta.size() - 1)  # start of last time step
        interp_start = ta_start + api.deltahours(self.qm_interp_hours[0])
        interp_end = ta_start + api.deltahours(self.qm_interp_hours[1])
        if interp_start > ta_end:
            interp_start = api.no_utctime
            interp_end = api.no_utctime
        if interp_end > ta_end:
            interp_end = ta_end

        # Re-organize data before sending to api.quantile_map_forecast. For each source type and geo_point, group
        # forecasts as TsVectorSets, send to api.quantile_map_forecast and return results as ensemble of source-keyed
        # dictionaries of  geo-ts
        # First re-organize weights - one weight per TVS.
        weight_sets = api.DoubleVector([w for ws in weights for w in ws])

        # New version
        # results = [{} for i in range(nb_prior_scenarios)]
        # for src in input_source_types:
        #     qm_scenarios = []
        #     for geo_pt_idx, geo_pt in enumerate(geo_points):
        #         forecast_sets = api.TsVectorSet()
        #         for i, fcst_group in enumerate(prep_fcst_lst) :
        #            for j, forecast in enumerate(fcst_group):
        #                 scenarios = api.TsVector()
        #                 for member in forecast:
        #                     scenarios.append(member[src][geo_pt_idx].ts)
        #                 forecast_sets.append(scenarios)
        #                 if i == self.repo_prior_idx and j==0:
        #                     prior_data = scenarios
        #                     # TODO: read prior if repo_prior_idx is None
        #
        #         qm_scenarios.append(api.quantile_map_forecast(forecast_sets, weight_sets, prior_data, ta,
        #                                                  interp_start, interp_end, True))
        #
        #     # Alternative: convert to array to enable slicing
        #     # arr = np.array(qm_scenarios)
        #
        #     # Now organize to desired output format: ensemble of source-keyed dictionaries of  geo-ts
        #     for i in range(0,nb_prior_scenarios):
        #     # source_dict = {}
        #         # ts_vct = arr[:, i]
        #         ts_vct = [x[i] for x in qm_scenarios]
        #         vct = self.source_vector_map[src]()
        #         [vct.append(self.source_type_map[src](geo_pt, ts)) for geo_pt, ts in zip(geo_points, ts_vct)]
        #         # Alternatives:
        #         # vct[:] = [self.source_type_map[src](geo_pt, ts) for geo_pt, ts in zip(geo_points, ts_vct)]
        #         # vct = self.source_vector_map[src]([self.source_type_map[src](geo_pt, ts) for geo_pt, ts in zip(geo_points, ts_vct)])
        #         results[i][src] = vct
        # return results

        # Old version
        weight_sets = api.DoubleVector([w for ws in weights for w in ws])
        dict = {}
        for src in input_source_types:
            qm_scenarios = []
            for geo_pt_idx, geo_pt in enumerate(geo_points):
                forecast_sets = api.TsVectorSet()
                for i, fcst_group in enumerate(prep_fcst_lst):
                    for j, forecast in enumerate(fcst_group):
                        scenarios = api.TsVector()
                        for member in forecast:
                            scenarios.append(member[src][geo_pt_idx].ts)
                        forecast_sets.append(scenarios)

                        # TODO: handle prior similarly if repo_prior_idx is None
                        if i == self.repo_prior_idx and j == 0:
                            prior_data = scenarios

                qm_scenarios.append(
                    api.quantile_map_forecast(forecast_sets, weight_sets,
                                              prior_data, ta, interp_start,
                                              interp_end, True))
            dict[src] = np.array(qm_scenarios)

        # Now organize to desired output format: ensenble of source-keyed dictionaries of  geo-ts
        # TODO: write function to extract info about prior like number of scenarios
        nb_prior_scenarios = dict[input_source_types[0]].shape[1]
        results = []
        for i in range(0, nb_prior_scenarios):
            source_dict = {}
            for src in input_source_types:
                ts_vct = dict[src][:, i]
                vct = self.source_vector_map[src]()
                [
                    vct.append(self.source_type_map[src](geo_pt, ts))
                    for geo_pt, ts in zip(geo_points, ts_vct)
                ]
                # Alternatives:
                # vct[:] = [self.source_type_map[src](geo_pt, ts) for geo_pt, ts in zip(geo_points, ts_vct)]
                # vct = self.source_vector_map[src]([self.source_type_map[src](geo_pt, ts) for geo_pt, ts in zip(geo_points, ts_vct)])
                source_dict[src] = vct
            results.append(source_dict)
        return results
Exemplo n.º 13
0
from statkraft.ltm.io.run_repository import RunRepository
import shyft.api as sa
from statkraft.ltm.state import quantity
from statkraft.ltm.io.converter import to_pandas

rr = RunRepository()
t0 = sa.utctime_now() - sa.Calendar.DAY

run_id = rr.find_closest_operational_run(t0)
run = rr.recreate(run_id=run_id)
areas = ["SE1", "SE2", "SE3", "SE4"]
energy_inflow = quantity(sa.TsVector(), "GWh")
time_axis = sa.TimeAxis(sa.utctime_now(), sa.Calendar.MONTH, 24)

years = run.run_info.scenario_years

for key in run.model.areas.keys():
    area = run.model.areas[key]
    if area.market_price_area != None and area.market_price_area.name in areas:
        storable_inflow = area.aggregated_hydro.storable_inflow(
            unit="GWh", time_axis=time_axis)
        non_storable_inflow = area.aggregated_hydro.nonstorable_inflow(
            unit="GWh", time_axis=time_axis)
        bypass = area.aggregated_hydro.bypass(unit="GWh", time_axis=time_axis)
        spill = area.aggregated_hydro.spillage(unit="GWh", time_axis=time_axis)
        energy_inflow += storable_inflow + non_storable_inflow - bypass - spill

df, pip = to_pandas(energy_inflow)
df = df.magnitude
df.columns = years + [df.columns[-1]]
df.to_csv("inflow_sweden.csv")
Exemplo n.º 14
0
import matplotlib.pyplot as plt
import shyft.api as sa
from statkraft.ltm.io.run_repository import RunRepository
from statkraft.ltm.scripting import plot_ts
from statkraft.ltm.state import quantity

rr = RunRepository()
rid = rr.find_closest_operational_run(sa.utctime_now())
run = rr.recreate(run_id=rid)

time_axis = sa.TimeAxis(run.start_utc, sa.Calendar.DAY, 52 * 7)

tsv = quantity(sa.TsVector(), "EUR/MWh")

legend_list = []
for area_name, area in run.model.areas.items():
    legend_list.append(area_name)
    tsv.extend(area.power_price.mean(time_axis=time_axis).magnitude)

plot_ts(tsv)
plt.legend(legend_list)
plt.show()
Exemplo n.º 15
0
from statkraft.ltm.io.run_repository import RunRepository
import shyft.api as sa
from statkraft.ltm.state import quantity
from statkraft.ltm.scripting import plot_ts
import matplotlib.pyplot as plt
from statkraft.ltm.io.converter import to_pandas

rr = RunRepository()
t0 = sa.utctime_now() - sa.Calendar.DAY * 2
res = rr.search(labels=["operational", "norway"], created_from=t0)
areas = ["NO1", "NO2", "NO5"]
tsv = quantity(sa.TsVector(), "GWh")

tot_cons_list = []
legend_list = []
for key in res.keys():
    run_info = res[key]
    run = rr.recreate(run_id=key)
    legend_list.append(key)
    time_axis = sa.TimeAxis(sa.utctime_now(), sa.Calendar.DAY, 365)
    tot_cons = quantity(sa.TsVector(), "GWh")
    for key1 in run.model.market.areas.keys():
        this_area = run.model.market.areas[key1]
        if key1 in areas:
            cons = this_area.consumption.mean(unit="GWh", time_axis=time_axis)
            tot_cons += cons
    tot_cons_list.append(tot_cons)

diff_cons = tot_cons_list[0] - tot_cons_list[1]
tsv.extend(diff_cons.magnitude)
Exemplo n.º 16
0
    def test_a_time_series_vector(self):
        c=api.Calendar()
        t0=api.utctime_now()
        dt=api.deltahours(1)
        n=240
        ta=api.TimeAxisFixedDeltaT(t0, dt, n)

        a=api.TimeSeries(ta=ta, fill_value=3.0, point_fx=api.point_interpretation_policy.POINT_AVERAGE_VALUE)
        b=api.TimeSeries(ta=ta, fill_value=2.0, point_fx=api.point_interpretation_policy.POINT_AVERAGE_VALUE)
        c=api.TimeSeries(ta=ta, fill_value=10.0, point_fx=api.point_interpretation_policy.POINT_AVERAGE_VALUE)
        v=api.TsVector()
        v.append(a)
        v.append(b)

        self.assertEqual(len(v), 2)
        self.assertAlmostEqual(v[0].value(0), 3.0, "expect first ts to be 3.0")
        aa=api.TimeSeries(ta=a.time_axis, values=a.values,
                          point_fx=api.point_interpretation_policy.POINT_AVERAGE_VALUE)  # copy construct (really copy the values!)
        a.fill(1.0)
        self.assertAlmostEqual(v[0].value(0), 1.0, "expect first ts to be 1.0, because the vector keeps a reference ")
        self.assertAlmostEqual(aa.value(0), 3.0)

        vt=v.values_at(t0).to_numpy()
        self.assertEqual(len(vt), len(v))
        v1=v[0:1]
        self.assertEqual(len(v1), 1)
        self.assertAlmostEqual(v1[0].value(0), 1.0)
        v_clone=api.TsVector(v)
        self.assertEqual(len(v_clone), len(v))
        del v_clone[-1]
        self.assertEqual(len(v_clone), 1)
        self.assertEqual(len(v), 2)
        v_slice_all=v.slice(api.IntVector())
        v_slice_1=v.slice(api.IntVector([1]))
        v_slice_12=v.slice(api.IntVector([0, 1]))
        self.assertEqual(len(v_slice_all), 2)
        self.assertEqual(len(v_slice_1), 1)
        self.assertAlmostEqual(v_slice_1[0].value(0), 2.0)
        self.assertEqual(len(v_slice_12), 2)
        self.assertAlmostEqual(v_slice_12[0].value(0), 1.0)

        # multiplication by scalar
        v_x_2a=v*2.0
        v_x_2b=2.0*v
        for i in range(len(v)):
            self.assertAlmostEqual(v_x_2a[i].value(0), 2*v[i].value(0))
            self.assertAlmostEqual(v_x_2b[i].value(0), 2*v[i].value(0))

        # division by scalar
        v_d_a=v/3.0
        v_d_b=3.0/v
        for i in range(len(v)):
            self.assertAlmostEqual(v_d_a[i].value(0), v[i].value(0)/3.0)
            self.assertAlmostEqual(v_d_b[i].value(0), 3.0/v[i].value(0))

        # addition by scalar
        v_a_a=v + 3.0
        v_a_b=3.0 + v
        for i in range(len(v)):
            self.assertAlmostEqual(v_a_a[i].value(0), v[i].value(0) + 3.0)
            self.assertAlmostEqual(v_a_b[i].value(0), 3.0 + v[i].value(0))

        # sub by scalar
        v_s_a=v - 3.0
        v_s_b=3.0 - v
        for i in range(len(v)):
            self.assertAlmostEqual(v_s_a[i].value(0), v[i].value(0) - 3.0)
            self.assertAlmostEqual(v_s_b[i].value(0), 3.0 - v[i].value(0))

        # multiplication vector by ts
        v_x_ts=v*c
        ts_x_v=c*v
        for i in range(len(v)):
            self.assertAlmostEqual(v_x_ts[i].value(0), v[i].value(0)*c.value(0))
            self.assertAlmostEqual(ts_x_v[i].value(0), c.value(0)*v[i].value(0))

        # division vector by ts
        v_d_ts=v/c
        ts_d_v=c/v
        for i in range(len(v)):
            self.assertAlmostEqual(v_d_ts[i].value(0), v[i].value(0)/c.value(0))
            self.assertAlmostEqual(ts_d_v[i].value(0), c.value(0)/v[i].value(0))

        # add vector by ts
        v_a_ts=v + c
        ts_a_v=c + v
        for i in range(len(v)):
            self.assertAlmostEqual(v_a_ts[i].value(0), v[i].value(0) + c.value(0))
            self.assertAlmostEqual(ts_a_v[i].value(0), c.value(0) + v[i].value(0))

        # sub vector by ts
        v_s_ts=v - c
        ts_s_v=c - v
        for i in range(len(v)):
            self.assertAlmostEqual(v_s_ts[i].value(0), v[i].value(0) - c.value(0))
            self.assertAlmostEqual(ts_s_v[i].value(0), c.value(0) - v[i].value(0))

        # vector mult vector
        va=v
        vb=2.0*v

        v_m_v=va*vb
        self.assertEqual(len(v_m_v), len(va))
        for i in range(len(va)):
            self.assertAlmostEqual(v_m_v[i].value(0), va[i].value(0)*vb[i].value(0))

        # vector div vector
        v_d_v=va/vb
        self.assertEqual(len(v_d_v), len(va))
        for i in range(len(va)):
            self.assertAlmostEqual(v_d_v[i].value(0), va[i].value(0)/vb[i].value(0))

        # vector add vector
        v_a_v=va + vb
        self.assertEqual(len(v_a_v), len(va))
        for i in range(len(va)):
            self.assertAlmostEqual(v_a_v[i].value(0), va[i].value(0) + vb[i].value(0))

        # vector sub vector
        v_s_v=va - vb
        self.assertEqual(len(v_s_v), len(va))
        for i in range(len(va)):
            self.assertAlmostEqual(v_s_v[i].value(0), va[i].value(0) - vb[i].value(0))

        # vector unary minus
        v_u=- va
        self.assertEqual(len(v_u), len(va))
        for i in range(len(va)):
            self.assertAlmostEqual(v_u[i].value(0), -va[i].value(0))

        # integral functions, just to verify exposure works, and one value is according to spec.
        ta2=api.TimeAxis(t0, dt*24, n//24)
        v_avg=v.average(ta2)
        v_int=v.integral(ta2)
        v_acc=v.accumulate(ta2)
        v_sft=v.time_shift(dt*24)
        self.assertIsNotNone(v_avg)
        self.assertIsNotNone(v_int)
        self.assertIsNotNone(v_acc)
        self.assertIsNotNone(v_sft)
        self.assertAlmostEqual(v_avg[0].value(0), 1.0)
        self.assertAlmostEqual(v_int[0].value(0), 86400.0)
        self.assertAlmostEqual(v_acc[0].value(0), 0.0)
        self.assertAlmostEqual(v_sft[0].time(0), t0 + dt*24)

        # min/max functions
        min_v_double=va.min(-1000.0)
        max_v_double=va.max(1000.0)
        self.assertAlmostEqual(min_v_double[0].value(0), -1000.0)
        self.assertAlmostEqual(max_v_double[0].value(0), +1000.0)
        min_v_double=api.min(va, -1000.0)
        max_v_double=api.max(va, +1000.0)
        self.assertAlmostEqual(min_v_double[0].value(0), -1000.0)
        self.assertAlmostEqual(max_v_double[0].value(0), +1000.0)
        # c = 10.0
        c1000=100.0*c
        min_v_double=va.min(-c1000)
        max_v_double=va.max(c1000)
        self.assertAlmostEqual(min_v_double[0].value(0), -c1000.value(0))
        self.assertAlmostEqual(max_v_double[0].value(0), c1000.value(0))
        min_v_double=api.min(va, -c1000)
        max_v_double=api.max(va, c1000)
        self.assertAlmostEqual(min_v_double[0].value(0), -c1000.value(0))
        self.assertAlmostEqual(max_v_double[0].value(0), c1000.value(0))

        v1000=va*1000.0
        min_v_double=va.min(-v1000)
        max_v_double=va.max(v1000)
        self.assertAlmostEqual(min_v_double[0].value(0), -v1000[0].value(0))
        self.assertAlmostEqual(max_v_double[0].value(0), v1000[0].value(0))
        min_v_double=api.min(va, -v1000)
        max_v_double=api.max(va, v1000)
        self.assertAlmostEqual(min_v_double[0].value(0), -v1000[0].value(0))
        self.assertAlmostEqual(max_v_double[0].value(0), v1000[0].value(0))

        # finally, test that exception is raised if we try to multiply two unequal sized vectors

        try:
            x=v_clone*va
            self.assertTrue(False, 'We expected exception for unequal sized ts-vector op')
        except RuntimeError as re:
            pass

        # also test that empty vector + vector -> vector etc.
        va_2=va + api.TsVector()
        va_3=api.TsVector() + va
        va_4=va - api.TsVector()
        va_5=api.TsVector() - va
        va_x=api.TsVector() + api.TsVector()
        self.assertEqual(len(va_2), len(va))
        self.assertEqual(len(va_3), len(va))
        self.assertEqual(len(va_4), len(va))
        self.assertEqual(len(va_5), len(va))
        self.assertEqual(not va_x, True)
        self.assertEqual(not va_2, False)
        va_2_ok=False
        va_x_ok=True
        if va_2:
            va_2_ok=True
        if va_x:
            va_x_ok=False
        self.assertTrue(va_2_ok)
        self.assertTrue(va_x_ok)
Exemplo n.º 17
0
l_reservoirs = {}
legend_list = []
for key in run.model.areas.keys():
    emps_area = run.model.areas[key]
    max_volume_as_energy = quantity(0, "GWh")
    max_tag = None
    #print(emps_area.name)
    if not emps_area.detailed_hydro is None:
        for res_key in emps_area.detailed_hydro.reservoirs.keys():
            res = emps_area.detailed_hydro.reservoirs[res_key]
            res_volume_as_energy = res.max_volume_as_energy
            if res_volume_as_energy > max_volume_as_energy:
                max_volume_as_energy = res_volume_as_energy
                max_tag = res.local_tag
        if max_tag is not None:
            max_res = emps_area.detailed_hydro.reservoirs[max_tag]
            l_reservoirs[key] = max_res
        else:
            print("hæh?")

tsv = quantity(sa.TsVector(), "GWh")
#print(l_reservoirs)
for key in l_reservoirs.keys():
    obj_res = l_reservoirs[key]
    legend_list.append(obj_res.name)
    obj_res_mean_volume = obj_res.energy.mean(time_axis=time_axis, unit="GWh")
    tsv.extend(obj_res_mean_volume.magnitude)

plot_ts(tsv)
plt.legend(legend_list)
plt.show(block=True)
Exemplo n.º 18
0
l_reservoirs = {}
legend_list = []
for key in run.model.areas.keys():
    emps_area = run.model.areas[key]
    max_volume = 0
    max_tag = None

    if not emps_area.detailed_hydro is None:
        for res_key in emps_area.detailed_hydro.reservoirs.keys():
            res = emps_area.detailed_hydro.reservoirs[res_key]
            res_volume = res.max_volume.magnitude
            if res_volume > max_volume:
                max_volume = res_volume
                max_tag = res.local_tag
        if max_tag is not None:
            max_res = emps_area.detailed_hydro.reservoirs[max_tag]
            l_reservoirs[key] = max_res
        else:
            print("hæh?")

tsv = quantity(sa.TsVector(), "m**3")
print(l_reservoirs)
for key in l_reservoirs.keys():
    obj_res = l_reservoirs[key]
    legend_list.append(obj_res.name)
    obj_res_mean_volume = obj_res.volume.mean(time_axis=time_axis, unit="m**3")
    tsv.extend(obj_res_mean_volume.magnitude)

plot_ts(tsv)
plt.legend(legend_list)
plt.show(block=True)