def configure_augmentations(self, context, pm): self.logger.info('Configuring augmentations') with indentcontext(): instruments_to_enable = set() output_processors_to_enable = set() enabled_instruments = set(i.name for i in instrument.get_enabled()) enabled_output_processors = set(p.name for p in pm.get_enabled()) for augmentation in list(self.spec.augmentations.values()): augmentation_cls = context.cm.plugin_cache.get_plugin_class( augmentation) if augmentation_cls.kind == 'instrument': instruments_to_enable.add(augmentation) elif augmentation_cls.kind == 'output_processor': output_processors_to_enable.add(augmentation) # Disable unrequired instruments for instrument_name in enabled_instruments.difference( instruments_to_enable): instrument.disable(instrument_name) # Enable additional instruments for instrument_name in instruments_to_enable.difference( enabled_instruments): instrument.enable(instrument_name) # Disable unrequired output_processors for processor in enabled_output_processors.difference( output_processors_to_enable): pm.disable(processor) # Enable additional output_processors for processor in output_processors_to_enable.difference( enabled_output_processors): pm.enable(processor)
def _discover_in_module(self, module): # NOQA pylint: disable=too-many-branches self.logger.debug('Checking module %s', module.__name__) with log.indentcontext(): for obj in vars(module).values(): if inspect.isclass(obj): if not issubclass(obj, Plugin): continue if obj.__module__ != module.__name__: continue if not obj.kind: message = 'Skipping plugin {} as it does not define a kind' self.logger.debug(message.format(obj.__name__)) continue if not obj.name: message = 'Skipping {} {} as it does not define a name' self.logger.debug( message.format(obj.kind, obj.__name__)) continue try: self._add_found_plugin(obj) except PluginLoaderError as e: if self.keep_going: self.logger.warning(e) else: raise e
def configure_augmentations(self, context, pm): self.logger.info('Configuring augmentations') with indentcontext(): instruments_to_enable = set() output_processors_to_enable = set() enabled_instruments = set(i.name for i in instrument.get_enabled()) enabled_output_processors = set(p.name for p in pm.get_enabled()) for augmentation in list(self.spec.augmentations.values()): augmentation_cls = context.cm.plugin_cache.get_plugin_class(augmentation) if augmentation_cls.kind == 'instrument': instruments_to_enable.add(augmentation) elif augmentation_cls.kind == 'output_processor': output_processors_to_enable.add(augmentation) # Disable unrequired instruments for instrument_name in enabled_instruments.difference(instruments_to_enable): instrument.disable(instrument_name) # Enable additional instruments for instrument_name in instruments_to_enable.difference(enabled_instruments): instrument.enable(instrument_name) # Disable unrequired output_processors for processor in enabled_output_processors.difference(output_processors_to_enable): pm.disable(processor) # Enable additional output_processors for processor in output_processors_to_enable.difference(enabled_output_processors): pm.enable(processor)
def teardown(self, context): if not context.tm.is_responsive: self.logger.info('Target unresponsive; not tearing down.') return self.logger.info('Tearing down job {}'.format(self)) with indentcontext(): with signal.wrap('WORKLOAD_TEARDOWN', self, context): self.workload.teardown(context)
def initialize(self, context): self.logger.info('Initializing job {}'.format(self)) with indentcontext(): with signal.wrap('WORKLOAD_INITIALIZED', self, context): self.workload.logger.context = context self.workload.initialize(context) self.set_status(Status.PENDING) self._has_been_initialized = True
def load(self): for gettercls in self.loader.list_plugins('resource_getter'): self.logger.debug('Loading getter {}'.format(gettercls.name)) getter = self.loader.get_plugin(name=gettercls.name, kind="resource_getter") with log.indentcontext(): getter.initialize() getter.register(self) self.getters.append(getter)
def run(self, context): self.logger.info('Running job {}'.format(self)) with indentcontext(): with signal.wrap('WORKLOAD_EXECUTION', self, context): start_time = datetime.utcnow() try: self.workload.run(context) finally: self.run_time = datetime.utcnow() - start_time
def initialize_run(self): self.logger.info('Initializing run') signal.connect(self._error_signalled_callback, signal.ERROR_LOGGED) signal.connect(self._warning_signalled_callback, signal.WARNING_LOGGED) self.context.start_run() self.pm.initialize(self.context) with log.indentcontext(): self.context.initialize_jobs() self.context.write_state()
def initialize(self, context): self.logger.info('Initializing job {}'.format(self)) with indentcontext(): with signal.wrap('WORKLOAD_INITIALIZED', self, context): self.workload.logger.context = context self.workload.initialize(context) self.set_status(Status.PENDING) self._has_been_initialized = True context.update_job_state(self)
def finalize(self, context): if not self._has_been_initialized: return if not context.tm.is_responsive: self.logger.info('Target unresponsive; not finalizing.') return self.logger.info('Finalizing job {} '.format(self)) with indentcontext(): with signal.wrap('WORKLOAD_FINALIZED', self, context): self.workload.finalize(context)
def process_output(self, context): if not context.tm.is_responsive: self.logger.info('Target unresponsive; not processing job output.') return self.logger.info('Processing output for job {}'.format(self)) with indentcontext(): if self.status != Status.FAILED: with signal.wrap('WORKLOAD_RESULT_EXTRACTION', self, context): self.workload.extract_results(context) context.extract_results() with signal.wrap('WORKLOAD_OUTPUT_UPDATE', self, context): self.workload.update_output(context)
def do_for_each_proc(self, method_name, message, *args): with indentcontext(): for proc in self.processors: if proc.is_enabled: proc_func = getattr(proc, method_name, None) if proc_func is None: continue try: self.logger.info(message.format(proc.name)) proc_func(*args) except Exception as e: # pylint: disable=broad-except if isinstance(e, KeyboardInterrupt): raise log_error(e, self.logger)
def finalize_run(self): self.logger.info('Run completed') with log.indentcontext(): for job in self.context.completed_jobs: job.finalize(self.context) self.logger.info('Finalizing run') self.context.end_run() self.pm.enable_all() with signal.wrap('RUN_OUTPUT_PROCESSED', self): self.pm.process_run_output(self.context) self.pm.export_run_output(self.context) self.pm.finalize(self.context) signal.disconnect(self._error_signalled_callback, signal.ERROR_LOGGED) signal.disconnect(self._warning_signalled_callback, signal.WARNING_LOGGED)
def configure_target(self, context): self.logger.info('Configuring target for job {}'.format(self)) with indentcontext(): context.tm.commit_runtime_parameters(self.spec.runtime_parameters)
def _log_self(self): logger.debug('Creating {} node'.format(self.kind)) with log.indentcontext(): for key, value in self.config.items(): logger.debug('"{}" to "{}"'.format(key, value))
def add_section(self, section, workloads, group): new_node = self.root_node.add_section(section, group) with log.indentcontext(): for workload in workloads: new_node.add_workload(workload)
def setup(self, context): self.logger.info('Setting up job {}'.format(self)) with indentcontext(): with signal.wrap('WORKLOAD_SETUP', self, context): self.workload.setup(context)