def _check_repeat(self, prev_run_time): """Check if the :attr:`addon` wants to repeat. And set it up if so. """ self._repeat = (None, None) rcu_copy = self._start_schedule_rcu_copy if rcu_copy is None: rcu_copy = self._rcu.copy() rcu_data = rcu_copy.data if rcu_data.last_idx is None: return chain = rcu_data.ready_chains[rcu_data.last_idx] current_time = yield CPURequest.current_time() done = chain.bottom.is_finished() or chain.bottom.ready_time > current_time repeat, time_slice = self.addon.repeat(rcu_data, prev_run_time, done) if not repeat: return assert not done assert time_slice is None or time_slice > 0 self._repeat = (rcu_copy, time_slice)
def _schedule(self, idx, time_slice, next_ready_time, rcu_copy): """Update :attr:`_rcu` and schedule the chain at `idx`. If `idx` is `None`, yield an idle request. `next_ready_time` should be forwarded from :meth:`schedule`. Yields a :class:`~schedsi.cpurequest.Request`. """ rcu_data = rcu_copy.data rcu_data.last_idx = idx # FIXME: we need to take it out of the ready_chains for multi-vcpu # else we might try to run the same chain in parallel if not self._rcu.update(rcu_copy): return if idx is None: next_chain = self.get_next_waiting(rcu_copy.data) if next_chain: next_ready_time[0] = next_chain.bottom.ready_time current_time = yield CPURequest.current_time() delta = next_chain.bottom.ready_time - current_time assert delta > 0 yield CPURequest.timer(delta) else: next_ready_time[0] = None yield CPURequest.idle() return next_ready_time[0] = 0 yield CPURequest.timer(time_slice) chain = yield CPURequest.resume_chain(rcu_data.ready_chains[idx]) def appliance(data): """Update executed chain.""" data.ready_chains[idx] = chain self._rcu.apply(appliance)
def _start_schedule(self, _prev_run_time): """Prepare making a scheduling decision. Moves ready threads to the ready queue and finished ones to the finished queue. Returns a tuple ( * RCUCopy of :attr:`_rcu` * list where previously scheduled chain ended up * (`rcu_copy_{ready,waiting,finished}_chains`) * index of previously scheduled chain * as passed to :meth:`_schedule` * *not* necessarily the index into the list where the chain ended up ). Yields an idle or execute :class:`~schedsi.cpurequest.Request`. Consumes the current time. """ current_time = yield CPURequest.current_time() while True: rcu_copy = self._rcu.copy() rcu_data = rcu_copy.data # check if the last scheduled thread is done now # move to a different queue is necessary dest = None last_idx = None if rcu_data.last_idx is not None: last_idx = rcu_data.last_idx last_context = rcu_data.ready_chains[last_idx] if last_context.bottom.is_finished(): dest = rcu_data.finished_chains elif last_context.bottom.ready_time > current_time: dest = rcu_data.waiting_chains else: assert last_context.bottom.ready_time != -1 if dest is None: dest = rcu_data.ready_chains else: dest.append(rcu_data.ready_chains.pop(last_idx)) # if not self._rcu.update(rcu_copy): # # current_time = yield CPURequest.execute(1) # continue self._update_ready_chains(current_time, rcu_data) rcu_data.last_idx = None return rcu_copy, dest, last_idx
def _start_schedule(self, prev_run_time): """See :meth:`Scheduler._start_schedule`. This skips :meth:`Scheduler._start_schedule` if :attr:`_start_schedule_rcu_copy` is set. Resets :attr:`_start_schedule_rcu_copy` to `None`. """ if self._start_schedule_rcu_copy is None: return (yield from super()._start_schedule(prev_run_time)) rcu_copy = self._start_schedule_rcu_copy self._start_schedule_rcu_copy = None idx = rcu_copy.data.last_idx self._update_ready_chains((yield CPURequest.current_time()), rcu_copy.data) rcu_copy.data.last_idx = None return rcu_copy, rcu_copy.data.ready_chains, idx
def _start_schedule(self, prev_run_time): # pylint: disable=method-hidden """See :meth:`Scheduler._start_schedule`. Lower priority of last thread if it outran its time-slice. Boost priorities if it's time. """ rcu_copy, last_queue, last_idx = yield from super()._start_schedule(prev_run_time) rcu_data = rcu_copy.data prev_still_ready = last_queue is rcu_data.ready_chains prev_has_run = prev_run_time is not None and prev_run_time > 0 prev_level = next(i for i, v in enumerate(rcu_data.ready_queues) if v is rcu_data.ready_chains) if prev_has_run: # important: == vs is; empty arrays will compare equal with == current_time = (yield CPURequest.current_time()) if self._priority_boost(rcu_data, prev_level, current_time) \ and prev_still_ready: last_queue = rcu_data.ready_queues[0] if prev_still_ready: # rotate queue for round robin last_queue.append(last_queue.pop(last_idx)) last_idx = len(last_queue) - 1 # switch to highest priority queue rcu_data.ready_chains = next((x for x in rcu_data.ready_queues if x), rcu_data.ready_queues[0]) if prev_has_run and not last_queue[-1].bottom.is_finished(): # and (rcu_data.last_prio_boost is None or rcu_data.last_prio_boost != current_time) ? last_queue = self._priority_reduction(rcu_data, last_queue, prev_still_ready, prev_level, prev_run_time) if rcu_data.waiting_chains: assert last_queue is rcu_data.waiting_chains last_queue = rcu_data.waiting_queues[prev_level] last_queue.append(rcu_data.waiting_chains.pop()) assert not rcu_data.waiting_chains return rcu_copy, last_queue, last_idx
def _sched_loop(self, rcu_copy): """Schedule the next :class:`~schedsi.threads.Thread`. See :meth:`Scheduler._sched_loop() <schedsi.scheduler.Scheduler._sched_loop>`. """ rcu_data = rcu_copy.data if not rcu_data.ready_chains: return None, None # important: == vs is; empty arrays will compare equal with == level = next(i for i, v in enumerate(rcu_data.ready_queues) if v is rcu_data.ready_chains) time_slice = self.level_time_slices[level] rcu_data.last_finish_time = None if time_slice is not None: rcu_data.last_finish_time = (yield CPURequest.current_time()) + time_slice return 0, time_slice
def _sched_loop(self, rcu_copy): # pylint: disable=no-self-use """See :meth:`FCFS._sched_loop`.""" idx, time_slice = yield from super()._sched_loop(rcu_copy) if idx is not None: assert idx == 0 threads = (c.bottom for c in rcu_copy.data.waiting_chains) next_thread = next(threads, None) for thread in threads: if thread.ready_time < next_thread.ready_time or ( thread.ready_time == next_thread.ready_time and thread.remaining < next_thread.remaining): next_thread = thread current_remaining = rcu_copy.data.ready_chains[0].bottom.remaining if next_thread is not None and next_thread.remaining is not None and ( current_remaining is None or next_thread.remaining < current_remaining): # calculate time to preemption current_time = yield CPURequest.current_time() time_slice = next_thread.ready_time - current_time assert time_slice > 0 return idx, time_slice
def _schedule(self, idx, time_slice, next_ready_time, rcu_copy): """See :meth:`Scheduler._schedule`. This will also call the :attr:`addon`'s :meth:`~Addon.schedule`. """ proceed, time_slice = self.addon.schedule(idx, time_slice, rcu_copy.data) if not proceed: rcu_copy.data.last_idx = idx self._start_schedule_rcu_copy = rcu_copy if self._repeat[0] is not None: # block repeating self.addon.repeat(rcu_copy.data, 0, True) return schedule = super()._schedule(idx, time_slice, next_ready_time, rcu_copy) answer = None while True: try: request = schedule.send(answer) except StopIteration: break if request.rtype == CPURequestType.timer: delta = None if next_ready_time[0] != 0: if next_ready_time[0] is not None: current_time = yield CPURequest.current_time() delta = next_ready_time[0] - current_time # if this assumption does not hold we need to decide # whether we really want to override this assert delta == request.arg if time_slice is None or delta is None: request.arg = time_slice answer = yield request