def test_update_thermal_demand_forecast(ctrl, obs, ua_mocks, monkeypatch): ctrl_proxy = ua_mocks[0].ctrl_agent # Note: Controller for testing is initialized with current system time. # *start* has thus to be set in the future of utcnow() start = arrow.utcnow().replace(days=1, hour=0, minute=0, second=0, microsecond=0) res = 900 data = np.arange(96 * 2) fc = util.TimeSeries(start, res, data) target_schedule = util.TimeSeries(start, res, [0 for i in range(96)]) weights = util.TimeSeries(start, res, [0 for i in range(96)]) # Patch ControllerAgent @coroutine def run_negotiation(neg_start): return start monkeypatch.setattr(ctrl, '_run_negotiation', run_negotiation) # Call method and assert that a new negotiation has been started yield from ctrl_proxy.update_thermal_demand_forecast( target_schedule, weights, fc) ret = yield from ctrl._task_negotiation assert ret == start # Assert that all UnitAgents got the (same) data futs = [ua.update_forecast_called for ua in ua_mocks] ret = yield from gather(*futs) for i in ret: assert i == fc
def get_day_ahead_plan(self, dap_start, dap_end): """Return the day ahead plan (which is the result of a negotiation). """ fc = self._forecast_p if dap_start < fc.start: raise ValueError('dap_start must be >= the forecast start') if dap_end > fc.end: raise ValueError('dap_end must be <= the forecast end') # TODO FIXME: is this still correct? # FIXME cont: Maybe _forecast_p should be handled different? # # Update target_schedule and weights array. # Because we communicate our planned generation to an external party, # we have to set it as new target schedule for our VPP: self._target_schedule = util.TimeSeries(fc.start, fc.res, np.zeros(len(fc))) self._target_schedule[:dap_end] = fc[:dap_end] self._weights = util.TimeSeries(fc.start, fc.res, np.zeros(len(fc))) self._weights[:dap_end] = 1 # Extract DAP dap = fc[dap_start:dap_end] return dap
def test_user_get_thermal_demand_forecast(user): start = arrow.get(demand_meta['start_time']) fc = user._get_thermal_demand_forecast(start, 2) assert fc == util.TimeSeries(start, demand_meta['interval_minutes'] * 60, [0, 25]) start = start.replace(hours=1) fc = user._get_thermal_demand_forecast(start, 2) assert fc == util.TimeSeries(start, demand_meta['interval_minutes'] * 60, [25, 60])
def _get_target(self, date): """Sets and returns self._ts and self._weights, both TimeSeries, depending on the given date. If no target has been given for date, zero target with zero weights is given. Raises a RunTimeError if more than one target is passed for one day, as this has to be handled by the intraday routines.""" # get appropriate target start_keys = [] for start_key in self._target_gens: date_diff = start_key - date if date_diff.days >= 0 and date_diff.days < 1: start_keys.append(start_key) if len(start_keys) > 1: raise RuntimeError('More than 1 target given for day %s' % date) # TODO FIXME: We have hardcoded stuff here for dap far too much if len(start_keys) == 0: logger.info('UserAgent:_get_target: no target for day %s,' 'taking zero ts' % date) target_res = 15 * 60 # TODO FIXME: hardcoded default resolution # day-ahead-planning relies on targets starting at midnight local # time, so we have to use this for zero target as well dap_target_start = util.get_tomorrow(date, 'local') self._ts = util.TimeSeries(dap_target_start, target_res, [0 for i in range(96)]) self._weights = util.TimeSeries(dap_target_start, target_res, [0 for i in range(96)]) else: # date_str = start_keys[0].format('YYYY-MM-DD HH:mm:ss ZZ') logger.info('UserAgent:_get_target: starting with target %s' % start_keys[0]) target_start = start_keys[0] target_res, target_gen = self._target_gens[target_start] # read all target and weights data entries from file ts_data = [] weights_data = [] for line in target_gen: data = line.strip().split(',') val, weight = map(float, data) ts_data.append(val) weights_data.append(weight) self._ts = util.TimeSeries(target_start, target_res, ts_data) self._weights = util.TimeSeries(target_start, target_res, weights_data) # target has to be dap-compatible: assert self._ts.start.to('local').hour == 0 return (self._ts, self._weights)
def test_update_thermal_demand_forecast_argchecks(ctrl, obs, err_kwargs): """Check various bad arguments.""" kwargs = { 'start': ctrl._da_scheduler._forecast_p.start, 'res': ctrl._da_scheduler._scheduling_res, 'data': np.zeros(ctrl._da_scheduler._scheduling_intervals), } kwargs.update(err_kwargs) fc = util.TimeSeries(**kwargs) target_schedule = util.TimeSeries(**kwargs) weights = util.TimeSeries(**kwargs) with pytest.raises(ValueError): yield from ctrl.update_thermal_demand_forecast(target_schedule, weights, fc)
def _get_thermal_demand_forecast(self, date, hours): """Return a :class:`~util.TimeSeries` with a thermal demand forecast for the period from *date* for *hours* hours.""" # The forecast usually covers a period of 48h and we request a new # forecast every 24 hours. # # The forecast data comes from a large file. We don't want to keep the # full content of that file in memory. # # So in order to create a new forecast, we # 1. reuse the old one, # 2. strip all data from before the new start date, and # 3. extend it with new data from the data file # demand_start, demand_res, demand_gen = self._demand_gen if self._fc is None: # Create an initial, empty forecast if there is none self._fc = util.TimeSeries(date, demand_res, []) # Strip old data from the forecast self._fc.lstrip(date) # We need data for [start_date, end_date) from the data file # (in the first run, we load 2 days, but on second day, we only # have to add the data for the second, as the first has been # read already) period = hours * 3600 - self._fc.period start_date = self._fc.end end_date = start_date.replace(seconds=period) # Skip lines until we reach "start_date" start_diff = util.get_intervals_between(start_date, demand_start, demand_res) [next(demand_gen) for i in range(start_diff)] # Get the data from the file and extend the forecast n_lines = util.get_intervals_between(end_date, start_date, demand_res) fc_data = [] for i in range(n_lines): data = next(demand_gen).strip().split(',') p_el, p_th_heat, p_th_water = map(float, data) sum_p_th = p_th_heat + p_th_water fc_data.append(sum_p_th) new_data = util.TimeSeries(start_date, demand_res, fc_data) self._fc.extend(new_data) self._demand_gen = (end_date, demand_res, demand_gen) return self._fc
def test_get_day_ahead_plan(ctrl_obs): ctrl, obs = ctrl_obs ctrl._da_scheduler._forecast_p = util.TimeSeries( arrow.get('2015-01-01T09:00:00'), ctrl._da_scheduler._scheduling_res, np.arange(ctrl._da_scheduler._scheduling_intervals)) ctrl._task_negotiation = asyncio.Future() ctrl._task_negotiation.set_result(None) # 15h, because its 09:00 r_start = int(15 * 3600 / ctrl._da_scheduler._scheduling_res) r_end = r_start + (24 * 3600 // ctrl._da_scheduler._scheduling_res) expected_dap = list(range(r_start, r_end)) # Assert that the ts returned is the correct sub-range of the forecast: dap_start = arrow.get('2015-01-02') dap_end = dap_start.replace(days=1) dap = yield from ctrl.get_day_ahead_plan(dap_start, dap_end) assert list(dap) == expected_dap # Assert that the correct target_schedule and weight vector have been set exp_len = ctrl._da_scheduler._scheduling_intervals pad_len = exp_len - r_end # Length of 0-padding assert len(ctrl._da_scheduler._target_schedule) == exp_len assert list(ctrl._da_scheduler._target_schedule.data) == \ list(range(r_end)) + [0] * pad_len assert len(ctrl._da_scheduler._weights) == exp_len assert list( ctrl._da_scheduler._weights.data) == [1] * r_end + [0] * pad_len
def test_run_negotiation_timeout(ctrl, obs, ua_mocks): """UAs don't detect termination and we time out.""" ctrl._agents = collections.OrderedDict( sorted(ctrl._agents.items(), key=lambda i: i[1])) ctrl._neg_timeout = 0.1 start = arrow.get('2015-01-01') res = ctrl._da_scheduler._scheduling_res = 15 ctrl._da_scheduler._forecast_p = util.TimeSeries(start, res, [0] * 4) ctrl._da_scheduler._target_schedule = util.TimeSeries( start, res, list(range(4))) ctrl._da_scheduler._weights = util.TimeSeries(start, res, [1] * 4) yield from aiomas. async (ctrl._run_negotiation(start)) # Check solution returned to the BGA assert ctrl._da_scheduler._forecast_p == util.TimeSeries( start, res, [18, 22, 26])
def test_user_get_dap_dates(user, now, exp_start, exp_end): now, exp_start, exp_end = [arrow.get(d) for d in [now, exp_start, exp_end]] ts = util.TimeSeries(exp_start, 900, [0 for i in range(96)]) user._ts = ts s, e = user._get_dap_dates(user.container.clock) assert s.tzinfo == dateutil.tz.tzutc() assert s == exp_start assert e.tzinfo == dateutil.tz.tzutc() assert e == exp_end
def test_udpate_thermal_demand_forecast_neg_running(ctrl, obs): """Raise an error if the forecast is update while a negotiation is currently running.""" ctrl._task_negotiation = asyncio.Future() kwargs = { 'start': ctrl._da_scheduler._forecast_p.start, 'res': ctrl._da_scheduler._scheduling_res, 'data': np.zeros(ctrl._da_scheduler._scheduling_intervals), } fc = util.TimeSeries(**kwargs) target_schedule = util.TimeSeries(**kwargs) weights = util.TimeSeries(**kwargs) with pytest.raises(RuntimeError): yield from ctrl.update_thermal_demand_forecast(target_schedule, weights, fc) # Try again: ctrl._task_negotiation.set_result(None) yield from ctrl.update_thermal_demand_forecast(target_schedule, weights, fc)
def test_dummy(uas, ctrl_obs, target): # logging.basicConfig(level=logging.DEBUG) ctrl, obs = ctrl_obs results = [] msgs = [] for i in range(NUM_EXP): start = arrow.get().replace(minute=0, second=0, microsecond=0) ctrl._target_schedule = util.TimeSeries(start, 900, target) ctrl._weights = util.TimeSeries(start, 900, [1] * 5) ctrl._forecast_p = util.TimeSeries(start, 900, [0] * 5) ctrl._scheduling_period = 900 * 5 target_schedule = util.TimeSeries(start, 900, [0] * 5) weights = util.TimeSeries(start, 900, [0] * 5) fc = util.TimeSeries(start, 900, [0] * 5) yield from ctrl.update_thermal_demand_forecast(target_schedule, weights, fc) dap = yield from ctrl.get_day_ahead_plan( start, start.replace(seconds=5 * 900)) diff_abs = np.abs(dap - target) diff_mean = np.mean(diff_abs / target) assert diff_mean <= 0.02 results.append(diff_mean) assert np.mean(results) <= 0.05
def __init__(self, controller, scheduling_res, scheduling_period): self._controller = controller self._scheduling_res = scheduling_res self._scheduling_period = scheduling_period self._scheduling_intervals = int(scheduling_period / scheduling_res) # We need a basic TimeSeries as long as we have no real data. # We take utc 7a.m., as if the schedule were the result of a dap utctime = self._controller.container.clock.utcnow() utctime = utctime.replace(minute=0, second=0, microsecond=0, hour=7) zero_ts = util.TimeSeries(utctime, self._scheduling_res, np.zeros(self._scheduling_intervals)) # TODO wording: Maybe this should be called "planned_p", because its # the result of a negotiation – the aggregate power output of the VPP: self._forecast_p = zero_ts self._target_schedule = zero_ts.copy() # The *weights* list allows us to express that intervals closer to # "now" must match the target schedule closer than intervals farther in # the future: self._weights = zero_ts.copy() self._start = None
def test_run_negotiation(ctrl_obs, ua_mocks): """Test a simple DAP from the ctrl's point of view. No negotitation at unit agents is performed (mocks used). """ ctrl, obs = ctrl_obs obs._termination_detector._num_agents = len(ua_mocks) # General config ctrl._agents = collections.OrderedDict( sorted(ctrl._agents.items(), key=lambda i: i[1])) start = arrow.get('2015-01-01') res = ctrl._scheduling_res = 15 ctrl._da_scheduler._forecast_p = util.TimeSeries(start, res, [0] * 4) ctrl._da_scheduler._target_schedule = util.TimeSeries( start, res, list(range(4))) ctrl._da_scheduler._weights = util.TimeSeries(start, res, [1] * 4) ts = list(ctrl._da_scheduler._target_schedule.data) weights = list(ctrl._da_scheduler._weights.data) # conn = {} # t = [0, 1, 2, 3] # w = [0, 0, 0, 0] neg_task = aiomas. async (ctrl._run_negotiation(start)) # Test init_negotiation() futs = [ua.init_negotiation_called for ua in ua_mocks] ret = yield from gather(*futs) assert ret == [ (['tcp://127.0.0.1:5556/1', 'tcp://127.0.0.1:5556/2'], start, res, ts, weights, True), # NOQA (['tcp://127.0.0.1:5556/0', 'tcp://127.0.0.1:5556/2'], start, res, ts, weights, False), # NOQA (['tcp://127.0.0.1:5556/0', 'tcp://127.0.0.1:5556/1'], start, res, ts, weights, False), # NOQA ] # UA agents found a solution for ua in ua_mocks: yield from ua.obs_agent.update_stats(ua, 0, 0, 0, 1, 1, True) # Retrieve final solution yield from neg_task assert ctrl._da_scheduler._forecast_p == util.TimeSeries( start, res, [18, 22, 26]) # Check solutions sent to each UA futs = [ua.stop_negotiation_called for ua in ua_mocks] yield from gather(*futs) # Sent final solution to observer futs = [ua.obs_agent.update_final_cand(ua.solution) for ua in ua_mocks] yield from gather(*futs) futs = [ua.set_schedule_called for ua in ua_mocks] ret = yield from gather(*futs) solution = ua_mocks[0].solution for i, schedule_id in enumerate(ret): assert schedule_id == solution.sids[i]
def generate_schedules(self, start, res, intervals, state): # TODO for electrical target schedule: generate more than one possible # schedule / first in a simple manner to verify adaptation to # electrical target schedule optimization # TODO: unify resolution check? # TODO: schedule resolution is 60 (thus a minute) if res < self.model.res or res % self.model.res != 0: raise ValueError('Schedule resolution must be >= %d and a ' 'multiple of it' % self.model.res) # The latest unit state should be from time "start", but we want # the state from the interval before. # Example: "start" is 09:00 o'clock, then the last state is for the # interval [09:00, 09:01). If we want to simulate from 09:00, we need # the previous state from [08:59, 09:00). assert state[-1][0] == start state = state[-2][1] self._schedules = {} self._schedule_id = itertools.count() # Start index in the array with the forecast data res_ratio = int(res // self.model.res) n_sim_steps = res_ratio * intervals # Generate initial schedule setpoint = self._setpoint_for(state['chp_p_el']) storage_e = state['storage_e_th'] self.model.reset(chp_setpoint=setpoint, storage_e_th=storage_e) p_el = np.zeros(n_sim_steps) p_th_gen = self._forecast_demand_p_th.iter(start, self.model.res, n_sim_steps) for i, p_th in enumerate(p_th_gen): self.model.step(p_th) p_el[i] = self.model.chp_p_el schedule_id = next(self._schedule_id) unit_schedule = util.TimeSeries(start, self.model.res, p_el) utility = 1 # reshaping to 15 min resolution negotiation_schedule = p_el.reshape(intervals, res_ratio).mean(axis=1) self._schedules[schedule_id] = unit_schedule possible_schedules = [(schedule_id, utility, negotiation_schedule)] # TODO: quick hack for testing multiple schedules - remove # np.random.seed(7) # at least .. deterministic hack # for i in range(10): # tmp_data = np.random.permutation(unit_schedule.data) # tmp = util.TimeSeries(start, self.model.res, tmp_data) # # print(tmp.data) # sched_id = next(self._schedule_id) # self._schedules[sched_id] = tmp # # reshaping to 15 min resolution # negotiation_schedule = tmp_data.reshape( # intervals, res_ratio # ).mean(axis=1) # possible_schedules.append((sched_id, utility, # negotiation_schedule)) # print("sched 6:") # for val in self._schedules[6].data: # print(val) # end quick hack # return [(schedule_id, utility, negotiation_schedule)] return possible_schedules
def finalize_negotiation(self, solution): # TODO wording: forecast_p = aggregierter Einsatzplan forecast_p = self._aggregate_solution(solution) assert self._start is not None res = self._target_schedule.res self._forecast_p = util.TimeSeries(self._start, res, forecast_p)