def allocate( self, platform: Platform, tasks_data: TasksData, ) -> (TasksAllocations, List[Anomaly], List[Metric]): rules = [] if self.rules: rules.extend(self.rules) if self.config: if not os.path.exists(self.config): log.warning( 'StaticAllocator: cannot find config file %r - ignoring!', self.config) else: rules.extend(load_config(self.config)) if len(rules) == 0: log.warning('StaticAllocator: no rules were provided!') return {}, [], [] log.log(TRACE, 'StaticAllocator: handling allocations for %i tasks. ', len(tasks_data)) for task, data in tasks_data.items(): log.log( TRACE, '%s', ' '.join('%s=%s' % (k, v) for k, v in sorted(data.labels.items()))) tasks_allocations = _build_allocations_from_rules(tasks_data, rules) log.debug('StaticAllocator: final tasks allocations: \n %s', pprint.pformat(tasks_allocations)) return tasks_allocations, [], []
def _build_tasks_memory(tasks_data: TasksData, platform: Platform) -> TasksMemory: total_memory = _get_platform_total_memory(platform) tasks_memory = [] for task, data in tasks_data.items(): tasks_memory.append( (task, _get_task_memory_limit(data.measurements, total_memory, task, data.resources), _get_numa_node_preferences(data.measurements, platform.numa_nodes))) return sorted(tasks_memory, reverse=True, key=lambda x: x[1])
def _build_tasks_metrics(tasks_data: TasksData) -> List[Metric]: """Build metrics for all tasks.""" tasks_metrics: List[Metric] = [] for task, data in tasks_data.items(): task_metrics = export_metrics_from_measurements(data.measurements) # Decorate metrics with task specific labels. for task_metric in task_metrics: task_metric.labels.update(data.labels) tasks_metrics += task_metrics return tasks_metrics
def _update_tasks_data_with_allocations(tasks_data: TasksData, current_allocations: TasksAllocations): for task, data in tasks_data.items(): if task in current_allocations: # no need to make deep copy, as only one level and unmutable types as leafs data.allocations = dict(current_allocations[task])
def _build_allocations_from_rules(tasks_data: TasksData, rules): tasks_allocations = {} # Iterate over rules and apply one by one. for rule_idx, rule in enumerate(rules): if 'allocations' not in rule: log.warning('StaticAllocator(%s): missing "allocations" - ignore!', rule_idx) continue log.debug('StaticAllocator(%s): processing %s rule.', rule_idx, '(%s)' % rule['name'] if 'name' in rule else '') new_task_allocations = rule['allocations'] if not new_task_allocations: log.log(TRACE, 'StaticAllocator(%s): allocations are empty - ignore!', rule_idx) continue # Convert if nessesary. if 'rdt' in new_task_allocations and isinstance( new_task_allocations['rdt'], dict): new_task_allocations[AllocationType.RDT] = RDTAllocation( **new_task_allocations['rdt']) # Prepare match_task_ids filter: if 'task_id' in rule: # by task_id task_id = rule['task_id'] match_task_ids = {task_id} log.log(TRACE, 'StaticAllocator(%s): match by task_id=%r', rule_idx, rule['task_id']) # Find all tasks that matches. elif 'labels' in rule: labels = rule['labels'] # by labels match_task_ids = set() for task, data in tasks_data.items(): matching_label_names = set(data.labels) & set(labels) for label_name in matching_label_names: if re.match(str(labels[label_name]), data.labels[label_name]): match_task_ids.add(task) log.log( TRACE, 'StaticAllocator(%s): match task %r by label=%s', rule_idx, task_id, label_name) else: # match everything log.log(TRACE, 'StaticAllocator(%s): match all tasks', rule_idx) match_task_ids = tasks_data.keys() # for matching tasks calculate and remember target_tasks_allocations log.log(TRACE, 'StaticAllocator(%s): applying allocations for %i tasks', rule_idx, len(match_task_ids)) rule_tasks_allocations = {} # Set rules for every matching task. for match_task_id in match_task_ids: rule_tasks_allocations[match_task_id] = new_task_allocations # Merge rules with previous rules. tasks_allocations = merge_rules(tasks_allocations, rule_tasks_allocations) return tasks_allocations