def __init__(self, filename=None): """ Args: - `filename` A file can be used to initialize the configuration. """ if filename: parser = Parser(filename) self.etm = parser.etm self.duration = parser.duration self.cycles_per_ms = parser.cycles_per_ms self._caches_list = parser.caches_list self.memory_access_time = parser.memory_access_time self._task_info_list = parser.task_info_list self.task_data_fields = parser.task_data_fields self._proc_info_list = parser.proc_info_list self.proc_data_fields = parser.proc_data_fields self._scheduler_info = parser.scheduler_info self.penalty_preemption = parser.penalty_preemption self.penalty_migration = parser.penalty_migration else: self.etm = "ofrp" self.duration = 100 #100000000 self.penalty_preemption = 0 self.penalty_migration = 0 self.cycles_per_ms = 1 #1000000 self._caches_list = [] self._task_info_list = [] self.task_data_fields = {} self._proc_info_list = [] self.proc_data_fields = {} self.memory_access_time = 100 self._scheduler_info = SchedulerInfo() self.calc_penalty_cache() self._set_filename(filename)
def _parse_scheduler(self): overhead = 0 overhead_activate = 0 overhead_terminate = 0 sched = self._dom.getElementsByTagName('sched')[0] attr = sched.attributes filename = attr['className'].value if 'overhead' in attr: overhead = int(float(attr['overhead'].value)) if 'overhead_activate' in attr: overhead_activate = int(float(attr['overhead_activate'].value)) if 'overhead_terminate' in attr: overhead_terminate = int(float(attr['overhead_terminate'].value)) data = {} fields = sched.getElementsByTagName('field') for field in fields: name = field.attributes['name'].value type_ = field.attributes['type'].value value = field.attributes['value'].value data[name] = (convert_function[type_](value), type_) self.scheduler_info = SchedulerInfo( overhead=overhead, overhead_activate=overhead_activate, overhead_terminate=overhead_terminate, fields=data) if filename[0] != '/': filename = self.cur_dir + '/' + filename self.scheduler_info.set_name(filename, self.cur_dir)
def __init__(self, filename=None): """ Args: - `filename` A file can be used to initialize the configuration. """ if filename: parser = Parser(filename) self.etm = parser.etm self.duration = parser.duration self.cycles_per_ms = parser.cycles_per_ms self._caches_list = parser.caches_list self.memory_access_time = parser.memory_access_time self._task_info_list = parser.task_info_list self.task_data_fields = parser.task_data_fields self._proc_info_list = parser.proc_info_list self.proc_data_fields = parser.proc_data_fields self._scheduler_info = parser.scheduler_info self.penalty_preemption = parser.penalty_preemption self.penalty_migration = parser.penalty_migration else: self.etm = "wcet" self.duration = 100000000 self.penalty_preemption = 0 self.penalty_migration = 0 self.cycles_per_ms = 1000000 self._caches_list = [] self._task_info_list = [] self.task_data_fields = {} self._proc_info_list = [] self.proc_data_fields = {} self.memory_access_time = 100 self._scheduler_info = SchedulerInfo() self.calc_penalty_cache() self._set_filename(filename)
def init(self): # Mapping task to scheduler. self.map_task_sched = {} cpus = [] for cpu in self.processors: # Append the processor to a list with an initial utilization of 0. cpus.append([cpu, Fraction(0)]) # Instantiate a scheduler. sched = EDF_modified(self.sim, SchedulerInfo()) sched.add_processor(cpu) sched.init() # Affect the scheduler to the processor. map_cpu_sched[cpu] = sched # First Fit for task in self.task_list: j = 0 # Find a processor with free space. while cpus[j][1] + Fraction(task.wcet) / Fraction( task.period) > 1.0: j += 1 if j >= len(self.processors): migrating_tasks[task] = [] break if j == len(self.processors): continue # Get the scheduler for this processor. sched = map_cpu_sched[cpus[j][0]] # Affect it to the task. self.map_task_sched[task.identifier] = sched sched.add_task(task) # Put the task on that processor. task.cpu = cpus[j][0] self.sim.logger.log("task " + task.name + " on " + task.cpu.name) # Update utilization. cpus[j][1] += Fraction(task.wcet) / Fraction(task.period) for task, l in migrating_tasks.items(): rem = Fraction(task.wcet) / Fraction(task.period) for cpu, cpu_u in cpus: if cpu_u < 1 and rem > 0: u = min(rem, 1 - cpu_u) l.append( (cpu, ceil(u * task.period * self.sim.cycles_per_ms))) rem -= u
def init(self): # Mapping processor to scheduler. self.map_cpu_sched = {} # Mapping task to scheduler. self.map_task_sched = {} cpus = [] for cpu in self.processors: # Append the processor to a list with an initial utilization of 0. cpus.append([cpu, 0]) # Instantiate a scheduler. sched = EDF_mono(self.sim, SchedulerInfo("simso.schedulers.EDF_mono")) sched.add_processor(cpu) sched.init() # Affect the scheduler to the processor. self.map_cpu_sched[cpu.identifier] = sched # First Fit for task in self.task_list: j = 0 # Find a processor with free space. while cpus[j][1] + float(task.wcet) / task.period > 1.0: j += 1 if j >= len(self.processors): print("oops bin packing failed.") return # Get the scheduler for this processor. sched = self.map_cpu_sched[cpus[j][0].identifier] # Affect it to the task. self.map_task_sched[task.identifier] = sched sched.add_task(task) # Put the task on that processor. task.cpu = cpus[j][0] self.sim.logger.log("task " + task.name + " on " + task.cpu.name) # Update utilization. cpus[j][1] += float(task.wcet) / task.period
def _parse_scheduler(self): overhead = 0 overhead_activate = 0 overhead_terminate = 0 sched = self._dom.getElementsByTagName('sched')[0] attr = sched.attributes if 'class' in attr: clas = attr['class'].value else: clas = '' if 'className' in attr: filename = attr['className'].value else: filename = '' if 'overhead' in attr: overhead = int(float(attr['overhead'].value)) if 'overhead_activate' in attr: overhead_activate = int(float(attr['overhead_activate'].value)) if 'overhead_terminate' in attr: overhead_terminate = int(float(attr['overhead_terminate'].value)) data = {} fields = sched.getElementsByTagName('field') for field in fields: name = field.attributes['name'].value type_ = field.attributes['type'].value value = field.attributes['value'].value data[name] = (convert_function[type_](value), type_) self.scheduler_info = SchedulerInfo( clas=clas, overhead=overhead, overhead_activate=overhead_activate, overhead_terminate=overhead_terminate, fields=data) if filename and filename[0] != '/': filename = self.cur_dir + '/' + filename self.scheduler_info.filename = filename
def init(self): PartitionedScheduler.init(self, SchedulerInfo("simso.schedulers.RM_mono"))
class Parser(object): """ Simulation file parser. """ def __init__(self, filename): self.filename = filename self.cur_dir = os.path.split(filename)[0] if not self.cur_dir: self.cur_dir = '.' self._dom = parse(filename) self._parse_etm() self._parse_duration() self._parse_cycles_per_ms() self._parse_caches() self._parse_tasks() self._parse_processors() self._parse_scheduler() self._parse_penalty() def _parse_caches(self): self.caches_list = [] caches_element = self._dom.getElementsByTagName('caches')[0] caches = caches_element.getElementsByTagName('cache') attr = caches_element.attributes self.memory_access_time = 100 if 'memory_access_time' in attr: self.memory_access_time = int(attr['memory_access_time'].value) for cache in caches: attr = cache.attributes if attr['policy'].value == 'LRU' and attr['type'].value == 'data': access_time = 1 associativity = int(attr['size'].value) if 'access_time' in attr: access_time = int(attr['access_time'].value) if 'associativity' in attr: associativity = int(attr['associativity'].value) cache = Cache_LRU(attr['name'].value, int(attr['id'].value), int(attr['size'].value), associativity, access_time) self.caches_list.append(cache) # TODO Généraliser aux autres types de cache. def _parse_tasks(self): tasks_el = self._dom.getElementsByTagName('tasks')[0] self.task_data_fields = {} for field in tasks_el.getElementsByTagName('field'): attr = field.attributes self.task_data_fields[attr['name'].value] = attr['type'].value tasks = tasks_el.getElementsByTagName('task') self.task_info_list = [] for task in tasks: attr = task.attributes data = dict( (k, convert_function[self.task_data_fields[k]](attr[k].value)) for k in attr.keys() if k in self.task_data_fields) task_type = 'Periodic' if 'task_type' in attr and attr['task_type'].value in task_types: task_type = attr['task_type'].value elif 'periodic' in attr and attr['periodic'].value == 'no': task_type = 'APeriodic' list_activation_dates = [] if 'list_activation_dates' in attr and attr['list_activation_dates'].value != '': list_activation_dates = sorted( map(float, attr['list_activation_dates'].value.split(','))) t = TaskInfo( attr['name'].value, int(attr['id'].value), task_type, 'abort_on_miss' in attr and attr['abort_on_miss'].value == 'yes', float(attr['period'].value), float(attr['activationDate'].value) if 'activationDate' in attr else 0, int(attr['instructions'].value), float(attr['mix'].value), (self.cur_dir + '/' + attr['stack'].value, self.cur_dir) if 'stack' in attr else ("", self.cur_dir), float(attr['WCET'].value), float(attr['ACET'].value) if 'ACET' in attr else 0, float(attr['et_stddev'].value) if 'et_stddev' in attr else 0, float(attr['deadline'].value), float(attr['base_cpi'].value), int(attr['followed_by'].value) if 'followed_by' in attr else None, list_activation_dates, int(float(attr['preemption_cost'].value)) if 'preemption_cost' in attr else 0, data) self.task_info_list.append(t) def _parse_processors(self): processors_el = self._dom.getElementsByTagName('processors')[0] processors = self._dom.getElementsByTagName('processors')[0] attr = processors.attributes migration_overhead = 0 if 'migration_overhead' in attr: migration_overhead = int(attr['migration_overhead'].value) self.proc_data_fields = {} for field in processors_el.getElementsByTagName('field'): attr = field.attributes self.proc_data_fields[attr['name'].value] = attr['type'].value cpus = processors.getElementsByTagName('processor') self.proc_info_list = [] for cpu in cpus: attr = cpu.attributes data = dict( (k, convert_function[self.proc_data_fields[k]](attr[k].value)) for k in attr.keys() if k in self.proc_data_fields) cl_overhead = 0 cs_overhead = 0 if 'cl_overhead' in attr: cl_overhead = int(float(attr['cl_overhead'].value)) if 'cs_overhead' in attr: cs_overhead = int(float(attr['cs_overhead'].value)) speed = 1.0 if 'speed' in attr: speed = float(attr['speed'].value) proc = ProcInfo(name=attr['name'].value, identifier=int(attr['id'].value), cs_overhead=cs_overhead, cl_overhead=cl_overhead, migration_overhead=migration_overhead, speed=speed, data=data) caches = cpu.getElementsByTagName('cache') for cache_element in caches: attr = cache_element.attributes for cache in self.caches_list: if cache.identifier == int(attr['ref'].value): proc.add_cache(cache) self.proc_info_list.append(proc) def _parse_etm(self): simulation = self._dom.getElementsByTagName('simulation')[0] if 'etm' in simulation.attributes: self.etm = simulation.attributes['etm'].value else: use_wcet = True if 'use_wcet' in simulation.attributes: use_wcet = (simulation.attributes['use_wcet'].value in ('true', 'yes')) if use_wcet: self.etm = "wcet" else: self.etm = "cache" def _parse_duration(self): simulation = self._dom.getElementsByTagName('simulation')[0] if 'duration' in simulation.attributes: self.duration = int(simulation.attributes['duration'].value) else: self.duration = 50000 def _parse_penalty(self): simulation = self._dom.getElementsByTagName('simulation')[0] if 'penalty_preemption' in simulation.attributes: self.penalty_preemption = int( simulation.attributes['penalty_preemption'].value) else: self.penalty_preemption = 100000 if 'penalty_migration' in simulation.attributes: self.penalty_migration = int( simulation.attributes['penalty_migration'].value) else: self.penalty_migration = 100000 def _parse_cycles_per_ms(self): simulation = self._dom.getElementsByTagName('simulation')[0] if 'cycles_per_ms' in simulation.attributes: self.cycles_per_ms = int( simulation.attributes['cycles_per_ms'].value) else: self.cycles_per_ms = 1000000 def _parse_scheduler(self): overhead = 0 overhead_activate = 0 overhead_terminate = 0 sched = self._dom.getElementsByTagName('sched')[0] attr = sched.attributes filename = attr['className'].value if 'overhead' in attr: overhead = int(float(attr['overhead'].value)) if 'overhead_activate' in attr: overhead_activate = int(float(attr['overhead_activate'].value)) if 'overhead_terminate' in attr: overhead_terminate = int(float(attr['overhead_terminate'].value)) data = {} fields = sched.getElementsByTagName('field') for field in fields: name = field.attributes['name'].value type_ = field.attributes['type'].value value = field.attributes['value'].value data[name] = (convert_function[type_](value), type_) self.scheduler_info = SchedulerInfo( overhead=overhead, overhead_activate=overhead_activate, overhead_terminate=overhead_terminate, fields=data) if filename[0] != '/': filename = self.cur_dir + '/' + filename self.scheduler_info.set_name(filename, self.cur_dir)
class Configuration(object): """ The configuration class store all the details about a system. An instance of this class will be passed to the constructor of the :class:`Model <simso.core.Model.Model>` class. """ def __init__(self, filename=None): """ Args: - `filename` A file can be used to initialize the configuration. """ if filename: parser = Parser(filename) self.etm = parser.etm self.duration = parser.duration self.cycles_per_ms = parser.cycles_per_ms self._caches_list = parser.caches_list self.memory_access_time = parser.memory_access_time self._task_info_list = parser.task_info_list self.task_data_fields = parser.task_data_fields self._proc_info_list = parser.proc_info_list self.proc_data_fields = parser.proc_data_fields self._scheduler_info = parser.scheduler_info self.penalty_preemption = parser.penalty_preemption self.penalty_migration = parser.penalty_migration else: self.etm = "ofrp" self.duration = 100 #100000000 self.penalty_preemption = 0 self.penalty_migration = 0 self.cycles_per_ms = 1 #1000000 self._caches_list = [] self._task_info_list = [] self.task_data_fields = {} self._proc_info_list = [] self.proc_data_fields = {} self.memory_access_time = 100 self._scheduler_info = SchedulerInfo() self.calc_penalty_cache() self._set_filename(filename) def _set_filename(self, filename): self._simulation_file = filename if filename: self._cur_dir = os.path.split(filename)[0] if not self._cur_dir: self._cur_dir = os.curdir else: self._cur_dir = os.curdir def save(self, simulation_file=None): """ Save the current configuration in a file. If no file is given as argument, the previous file used to write or read the configuration is used again. """ if simulation_file: old_dir = self._cur_dir self._cur_dir = os.path.split(simulation_file)[0] or '.' for task in self._task_info_list: if task.stack_file: task.set_stack_file( old_dir + '/' + task.stack_file, self._cur_dir) self._simulation_file = simulation_file conf_file = open(self._simulation_file, 'w') conf_file.write(generate(self)) def calc_penalty_cache(self): for proc in self.proc_info_list: access_time = self.memory_access_time for cache in reversed(proc.caches): cache.penalty = access_time - cache.access_time access_time = cache.access_time proc.penalty = access_time def check_all(self): """ Check the correctness of the configuration (without simulating it). """ self.check_general() self.check_scheduler() self.check_processors() self.check_tasks() self.check_caches() def check_general(self): assert self.duration >= 0, \ "Simulation duration must be a positive number." assert self.cycles_per_ms >= 0, \ "Cycles / ms must be a positive number." assert self.memory_access_time >= 0, \ "The memory access time must be a positive number." def check_scheduler(self): cls = self._scheduler_info.get_cls() assert cls is not None, \ "A scheduler is needed." assert issubclass(cls, Scheduler), \ "Must inherits from Scheduler." assert self._scheduler_info.overhead >= 0, \ "An overhead must not be negative." def check_processors(self): # At least one processor: assert len(self._proc_info_list) > 0, \ "At least one processor is needed." # Caches inclusifs : succ = {} for proc in self._proc_info_list: cur = None for cache in reversed(proc.caches): assert not (cache in succ and succ[cache] != cur), \ "Caches must be inclusives." succ[cache] = cur cur = cache for index, proc in enumerate(self._proc_info_list): # Nom correct : assert re.match('^[a-zA-Z][a-zA-Z0-9 _-]*$', proc.name), \ "A processor name must begins with a letter and must not "\ "contains any special character." # Id unique : assert proc.identifier not in [ x.identifier for x in self._proc_info_list[index + 1:]], \ "Processors' identifiers must be uniques." # Overheads positifs : assert proc.cs_overhead >= 0, \ "Context Save overhead can't be negative." assert proc.cl_overhead >= 0, \ "Context Load overhead can't be negative." def check_tasks(self): assert len(self._task_info_list) > 0, "At least one task is needed." for index, task in enumerate(self._task_info_list): # Id unique : assert task.identifier not in [ x.identifier for x in self._task_info_list[index + 1:]], \ "Tasks' identifiers must be uniques." # Nom correct : assert re.match('^[a-zA-Z][a-zA-Z0-9 _-]*$', task.name), "A task "\ "name must begins with a letter and must not contains any "\ "special character." # Activation date >= 0: assert task.activation_date >= 0, \ "Activation date must be positive." # Period >= 0: assert task.period >= 0, "Tasks' periods must be positives." # Deadline >= 0: assert task.deadline >= 0, "Tasks' deadlines must be positives." # N_instr >= 0: assert task.n_instr >= 0, \ "A number of instructions must be positive." # WCET >= 0: assert task.wcet >= 0, "WCET must be positive." # ACET >= 0: assert task.acet >= 0, "ACET must be positive." # ET-STDDEV >= 0: assert task.et_stddev >= 0, \ "A standard deviation is a positive number." # mix in [0.0, 2.0] assert 0.0 <= task.mix <= 2.0, \ "A mix must be positive and less or equal than 2.0" if self.etm == "cache": # stack assert task.stack_file, "A task needs a stack profile." # stack ok assert task.csdp, "Stack not found or empty." def check_caches(self): for index, cache in enumerate(self._caches_list): # Id unique : assert cache.identifier not in [ x.identifier for x in self._caches_list[index + 1:]], \ "Caches' identifiers must be uniques." # Nom correct : assert re.match('^[a-zA-Z][a-zA-Z0-9_-]*$', cache.name), \ "A cache name must begins with a letter and must not " \ "contains any spacial character nor space." # Taille positive : assert cache.size >= 0, "A cache size must be positive." # Access time >= 0: assert cache.access_time >= 0, "An access time must be positive." def get_hyperperiod(self): """ Compute and return the hyperperiod of the tasks. """ return _lcm([x.period for x in self.task_info_list]) @property def duration_ms(self): return self.duration / self.cycles_per_ms @property def simulation_file(self): return self._simulation_file @property def cur_dir(self): return self._cur_dir @property def caches_list(self): return self._caches_list @property def task_info_list(self): """ List of tasks (TaskInfo objects). """ return self._task_info_list @property def proc_info_list(self): """ List of processors (ProcInfo objects). """ return self._proc_info_list @property def scheduler_info(self): """ SchedulerInfo object. """ return self._scheduler_info def add_task(self, name, identifier, task_type="Periodic", abort_on_miss=True, period=20, activation_date=0, n_instr=0, mix=0.5, stack_file="", wcet=3, acet=0, et_stddev=0, deadline=20, base_cpi=1.0, followed_by=None, list_activation_dates=[], preemption_cost=0, data=None): """ Helper method to create a TaskInfo and add it to the list of tasks. """ if data is None: data = dict((k, None) for k in self.task_data_fields) task = TaskInfo(name, identifier, task_type, abort_on_miss, period, activation_date, n_instr, mix, (stack_file, self.cur_dir), wcet, acet, et_stddev, deadline, base_cpi, followed_by, list_activation_dates, preemption_cost, data) self.task_info_list.append(task) return task def add_processor(self, name, identifier, cs_overhead=0, cl_overhead=0, migration_overhead=0, speed=1.0): """ Helper method to create a ProcInfo and add it to the list of processors. """ proc = ProcInfo( identifier, name, cs_overhead, cl_overhead, migration_overhead, speed) self.proc_info_list.append(proc) return proc
class Configuration(object): """ The configuration class store all the details about a system. An instance of this class will be passed to the constructor of the :class:`Model <simso.core.Model.Model>` class. """ def __init__(self, filename=None): """ Args: - `filename` A file can be used to initialize the configuration. """ if filename: parser = Parser(filename) self.etm = parser.etm self.duration = parser.duration self.cycles_per_ms = parser.cycles_per_ms self._caches_list = parser.caches_list self.memory_access_time = parser.memory_access_time self._task_info_list = parser.task_info_list self.task_data_fields = parser.task_data_fields self._proc_info_list = parser.proc_info_list self.proc_data_fields = parser.proc_data_fields self._scheduler_info = parser.scheduler_info self.penalty_preemption = parser.penalty_preemption self.penalty_migration = parser.penalty_migration else: self.etm = "wcet" self.duration = 100000000 self.penalty_preemption = 0 self.penalty_migration = 0 self.cycles_per_ms = 1000000 self._caches_list = [] self._task_info_list = [] self.task_data_fields = {} self._proc_info_list = [] self.proc_data_fields = {} self.memory_access_time = 100 self._scheduler_info = SchedulerInfo() self.calc_penalty_cache() self._set_filename(filename) def _set_filename(self, filename): self._simulation_file = filename if filename: self._cur_dir = os.path.split(filename)[0] if not self._cur_dir: self._cur_dir = os.curdir else: self._cur_dir = os.curdir def save(self, simulation_file=None): """ Save the current configuration in a file. If no file is given as argument, the previous file used to write or read the configuration is used again. """ if simulation_file: old_dir = self._cur_dir self._cur_dir = os.path.split(simulation_file)[0] or '.' for task in self._task_info_list: if task.stack_file: task.set_stack_file( old_dir + '/' + task.stack_file, self._cur_dir) self._simulation_file = simulation_file conf_file = open(self._simulation_file, 'w') conf_file.write(generate(self)) def calc_penalty_cache(self): for proc in self.proc_info_list: access_time = self.memory_access_time for cache in reversed(proc.caches): cache.penalty = access_time - cache.access_time access_time = cache.access_time proc.penalty = access_time def check_all(self): """ Check the correctness of the configuration (without simulating it). """ self.check_general() self.check_scheduler() self.check_processors() self.check_tasks() self.check_caches() def check_general(self): assert self.duration >= 0, \ "Simulation duration must be a positive number." assert self.cycles_per_ms >= 0, \ "Cycles / ms must be a positive number." assert self.memory_access_time >= 0, \ "The memory access time must be a positive number." def check_scheduler(self): cls = self._scheduler_info.get_cls() assert cls is not None, \ "A scheduler is needed." assert issubclass(cls, Scheduler), \ "Must inherits from Scheduler." assert self._scheduler_info.overhead >= 0, \ "An overhead must not be negative." def check_processors(self): # At least one processor: assert len(self._proc_info_list) > 0, \ "At least one processor is needed." # Caches inclusifs : succ = {} for proc in self._proc_info_list: cur = None for cache in reversed(proc.caches): assert not (cache in succ and succ[cache] != cur), \ "Caches must be inclusives." succ[cache] = cur cur = cache for index, proc in enumerate(self._proc_info_list): # Nom correct : assert re.match('^[a-zA-Z][a-zA-Z0-9 _-]*$', proc.name), \ "A processor name must begins with a letter and must not "\ "contains any special character." # Id unique : assert proc.identifier not in [ x.identifier for x in self._proc_info_list[index + 1:]], \ "Processors' identifiers must be uniques." # Overheads positifs : assert proc.cs_overhead >= 0, \ "Context Save overhead can't be negative." assert proc.cl_overhead >= 0, \ "Context Load overhead can't be negative." def check_tasks(self): assert len(self._task_info_list) > 0, "At least one task is needed." for index, task in enumerate(self._task_info_list): # Id unique : assert task.identifier not in [ x.identifier for x in self._task_info_list[index + 1:]], \ "Tasks' identifiers must be uniques." # Nom correct : assert re.match('^[a-zA-Z][a-zA-Z0-9 _-]*$', task.name), "A task "\ "name must begins with a letter and must not contains any "\ "special character." # Activation date >= 0: assert task.activation_date >= 0, \ "Activation date must be positive." # Period >= 0: assert task.period >= 0, "Tasks' periods must be positives." # Deadline >= 0: assert task.deadline >= 0, "Tasks' deadlines must be positives." # N_instr >= 0: assert task.n_instr >= 0, \ "A number of instructions must be positive." # WCET >= 0: assert task.wcet >= 0, "WCET must be positive." # ACET >= 0: assert task.acet >= 0, "ACET must be positive." # ET-STDDEV >= 0: assert task.et_stddev >= 0, \ "A standard deviation is a positive number." # mix in [0.0, 2.0] assert 0.0 <= task.mix <= 2.0, \ "A mix must be positive and less or equal than 2.0" if self.etm == "cache": # stack assert task.stack_file, "A task needs a stack profile." # stack ok assert task.csdp, "Stack not found or empty." def check_caches(self): for index, cache in enumerate(self._caches_list): # Id unique : assert cache.identifier not in [ x.identifier for x in self._caches_list[index + 1:]], \ "Caches' identifiers must be uniques." # Nom correct : assert re.match('^[a-zA-Z][a-zA-Z0-9_-]*$', cache.name), \ "A cache name must begins with a letter and must not " \ "contains any spacial character nor space." # Taille positive : assert cache.size >= 0, "A cache size must be positive." # Access time >= 0: assert cache.access_time >= 0, "An access time must be positive." def get_hyperperiod(self): """ Compute and return the hyperperiod of the tasks. """ return _lcm([x.period for x in self.task_info_list]) @property def duration_ms(self): return self.duration / self.cycles_per_ms @property def simulation_file(self): return self._simulation_file @property def cur_dir(self): return self._cur_dir @property def caches_list(self): return self._caches_list @property def task_info_list(self): """ List of tasks (TaskInfo objects). """ return self._task_info_list @property def proc_info_list(self): """ List of processors (ProcInfo objects). """ return self._proc_info_list @property def scheduler_info(self): """ SchedulerInfo object. """ return self._scheduler_info def add_task(self, name, identifier, task_type="Periodic", abort_on_miss=True, period=10, activation_date=0, n_instr=0, mix=0.5, stack_file="", wcet=0, acet=0, et_stddev=0, deadline=10, base_cpi=1.0, followed_by=None, list_activation_dates=[], preemption_cost=0, data=None): """ Helper method to create a TaskInfo and add it to the list of tasks. """ if data is None: data = dict((k, None) for k in self.task_data_fields) task = TaskInfo(name, identifier, task_type, abort_on_miss, period, activation_date, n_instr, mix, (stack_file, self.cur_dir), wcet, acet, et_stddev, deadline, base_cpi, followed_by, list_activation_dates, preemption_cost, data) self.task_info_list.append(task) return task def add_processor(self, name, identifier, cs_overhead=0, cl_overhead=0, migration_overhead=0, speed=1.0): """ Helper method to create a ProcInfo and add it to the list of processors. """ proc = ProcInfo( identifier, name, cs_overhead, cl_overhead, migration_overhead, speed) self.proc_info_list.append(proc) return proc
def init(self): PartitionedScheduler.init(self, SchedulerInfo("simso.schedulers.EDF_mono"), decreasing_worst_fit)
def init(self): PartitionedScheduler.init(self, SchedulerInfo("EDF_mono", EDF_mono))
def init(self): self.groups = [] self.task_to_group = {} try: k = self.data['K'] except KeyError: k = len(self.processors) m = len(self.processors) sep = Fraction(k) / Fraction(1 + k) if k < m else 1 light_tasks = [t for t in self.task_list if t.wcet < sep * t.period] heavy_tasks = [t for t in self.task_list if t.wcet >= sep * t.period] # Mapping task to scheduler. self.map_task_sched = {} self.map_cpu_sched = {} cpus = [] for i, cpu in enumerate(self.processors): # Instantiate a scheduler. sched = Modified_EDF(self.sim, SchedulerInfo()) sched.add_processor(cpu) sched.init() # Append the processor to a list with an initial utilization of 0. cpus.append([cpu, sched, Fraction(0)]) # Affect the scheduler to the processor. self.map_cpu_sched[cpu.identifier] = sched # Affect to the correct group. if i >= len(heavy_tasks): if (i - len(heavy_tasks)) % k == 0: group = Group(self.sim) group.schedulers.append(sched) self.groups.append(group) else: self.groups[-1].schedulers.append(sched) # Affect heavy tasks to individual processors. p = 0 for task in heavy_tasks: cpu, sched, _ = cpus[p] # Affect the task to the processor. self.map_task_sched[task.identifier] = sched sched.add_task(task) # Put the task on that processor. task.cpu = cpu p += 1 self.task_to_group[task] = None # Custom Next Fit for task in light_tasks: g = (p - len(heavy_tasks)) // k if cpus[p][2] + Fraction(task.wcet) / Fraction(task.period) <= 1.0: cpu, sched, _ = cpus[p] # Affect the task to the processors. self.map_task_sched[task.identifier] = sched sched.add_task(task) # Put the task on that processor. task.cpu = cpu cpus[p][2] += Fraction(task.wcet) / Fraction(task.period) self.groups[g].tasks.append(task) self.task_to_group[task] = self.groups[g] if cpus[p][2] == 1: p += 1 else: if (p + 1 - len(heavy_tasks)) % k == 0: cpu, sched, _ = cpus[p + 1] # Affect the task to the processor. self.map_task_sched[task.identifier] = sched sched.add_task(task) # Put the task on that processor. task.cpu = cpu cpus[p + 1][2] += \ Fraction(task.wcet) / Fraction(task.period) self.groups[g + 1].tasks.append(task) self.task_to_group[task] = self.groups[g + 1] else: # Split in 2. u1 = 1 - cpus[p][2] u2 = Fraction(task.wcet) / Fraction(task.period) - u1 cpus[p][1].migrating_task2 = (task, u1) cpus[p + 1][1].migrating_task1 = (task, u2) cpus[p + 1][2] = u2 self.groups[g].tasks.append(task) self.task_to_group[task] = self.groups[g] p += 1