def load(self, state, raw, source): logger.debug('Parsing agenda from "{}"'.format(source)) log.indent() try: if not isinstance(raw, dict): raise ConfigError( 'Invalid agenda, top level entry must be a dict') self._populate_and_validate_config(state, raw, source) sections = self._pop_sections(raw) global_workloads = self._pop_workloads(raw) if not global_workloads: msg = 'No jobs avaliable. Please ensure you have specified at '\ 'least one workload to run.' raise ConfigError(msg) if raw: msg = 'Invalid top level agenda entry(ies): "{}"' raise ConfigError(msg.format('", "'.join(list(raw.keys())))) sect_ids, wkl_ids = self._collect_ids(sections, global_workloads) self._process_global_workloads(state, global_workloads, wkl_ids) self._process_sections(state, sections, sect_ids, wkl_ids) state.agenda = source except (ConfigError, SerializerSyntaxError) as e: raise ConfigError('Error in "{}":\n\t{}'.format(source, str(e))) finally: log.dedent()
def load(self, state, raw, source): logger.debug('Parsing agenda from "{}"'.format(source)) log.indent() try: if not isinstance(raw, dict): raise ConfigError('Invalid agenda, top level entry must be a dict') self._populate_and_validate_config(state, raw, source) sections = self._pop_sections(raw) global_workloads = self._pop_workloads(raw) if not global_workloads: msg = 'No jobs avaliable. Please ensure you have specified at '\ 'least one workload to run.' raise ConfigError(msg) if raw: msg = 'Invalid top level agenda entry(ies): "{}"' raise ConfigError(msg.format('", "'.join(list(raw.keys())))) sect_ids, wkl_ids = self._collect_ids(sections, global_workloads) self._process_global_workloads(state, global_workloads, wkl_ids) self._process_sections(state, sections, sect_ids, wkl_ids) state.agenda = source except (ConfigError, SerializerSyntaxError) as e: raise ConfigError('Error in "{}":\n\t{}'.format(source, str(e))) finally: log.dedent()
def generate_jobs(self, context): job_specs = self.jobs_config.generate_job_specs(context.tm) exec_order = self.run_config.execution_order log.indent() for spec, i in permute_iterations(job_specs, exec_order): job = Job(spec, i, context) job.load(context.tm.target) self._jobs.append(job) context.run_state.add_job(job) log.dedent() self._jobs_generated = True
def load(self, state, raw, source, wrap_exceptions=True): # pylint: disable=too-many-branches logger.debug('Parsing config from "{}"'.format(source)) log.indent() try: state.plugin_cache.add_source(source) if 'run_name' in raw: msg = '"run_name" can only be specified in the config '\ 'section of an agenda' raise ConfigError(msg) if 'id' in raw: raise ConfigError('"id" cannot be set globally') merge_augmentations(raw) # Get WA core configuration for cfg_point in state.settings.configuration.values(): value = pop_aliased_param(cfg_point, raw) if value is not None: logger.debug('Setting meta "{}" to "{}"'.format( cfg_point.name, value)) state.settings.set(cfg_point.name, value) # Get run specific configuration for cfg_point in state.run_config.configuration.values(): value = pop_aliased_param(cfg_point, raw) if value is not None: logger.debug('Setting run "{}" to "{}"'.format( cfg_point.name, value)) state.run_config.set(cfg_point.name, value) # Get global job spec configuration for cfg_point in JobSpec.configuration.values(): value = pop_aliased_param(cfg_point, raw) if value is not None: logger.debug('Setting global "{}" to "{}"'.format( cfg_point.name, value)) state.jobs_config.set_global_value(cfg_point.name, value) for name, values in raw.items(): # Assume that all leftover config is for a plug-in or a global # alias it is up to PluginCache to assert this assumption logger.debug('Caching "{}" with "{}"'.format( identifier(name), values)) state.plugin_cache.add_configs(identifier(name), values, source) except ConfigError as e: if wrap_exceptions: raise ConfigError('Error in "{}":\n{}'.format(source, str(e))) else: raise e finally: log.dedent()
def generate_jobs(self, context): job_specs = self.jobs_config.generate_job_specs(context.tm) if not job_specs: msg = 'No jobs available for running.' raise ConfigError(msg) exec_order = self.run_config.execution_order log.indent() for spec, i in permute_iterations(job_specs, exec_order): job = Job(spec, i, context) job.load(context.tm.target) self._jobs.append(job) context.run_state.add_job(job) log.dedent() self._jobs_generated = True
def load(self, state, raw, source, wrap_exceptions=True): # pylint: disable=too-many-branches logger.debug('Parsing config from "{}"'.format(source)) log.indent() try: state.plugin_cache.add_source(source) if 'run_name' in raw: msg = '"run_name" can only be specified in the config '\ 'section of an agenda' raise ConfigError(msg) if 'id' in raw: raise ConfigError('"id" cannot be set globally') merge_augmentations(raw) # Get WA core configuration for cfg_point in state.settings.configuration.values(): value = pop_aliased_param(cfg_point, raw) if value is not None: logger.debug('Setting meta "{}" to "{}"'.format(cfg_point.name, value)) state.settings.set(cfg_point.name, value) # Get run specific configuration for cfg_point in state.run_config.configuration.values(): value = pop_aliased_param(cfg_point, raw) if value is not None: logger.debug('Setting run "{}" to "{}"'.format(cfg_point.name, value)) state.run_config.set(cfg_point.name, value) # Get global job spec configuration for cfg_point in JobSpec.configuration.values(): value = pop_aliased_param(cfg_point, raw) if value is not None: logger.debug('Setting global "{}" to "{}"'.format(cfg_point.name, value)) state.jobs_config.set_global_value(cfg_point.name, value) for name, values in raw.items(): # Assume that all leftover config is for a plug-in or a global # alias it is up to PluginCache to assert this assumption logger.debug('Caching "{}" with "{}"'.format(identifier(name), values)) state.plugin_cache.add_configs(identifier(name), values, source) except ConfigError as e: if wrap_exceptions: raise ConfigError('Error in "{}":\n{}'.format(source, str(e))) else: raise e finally: log.dedent()
def run_next_job(self, context): job = context.start_job() self.logger.info('Running job {}'.format(job.id)) try: log.indent() if self.context.reboot_policy.reboot_on_each_job: self.logger.info('Rebooting on new job.') self.context.tm.reboot(context) elif self.context.reboot_policy.reboot_on_each_spec and context.spec_changed: self.logger.info('Rebooting on new spec.') self.context.tm.reboot(context) with signal.wrap('JOB', self, context): context.tm.start() self.do_run_job(job, context) context.set_job_status(job, Status.OK) except (Exception, KeyboardInterrupt) as e: # pylint: disable=broad-except log.log_error(e, self.logger) if isinstance(e, KeyboardInterrupt): context.run_interrupted = True context.set_job_status(job, Status.ABORTED) raise e else: context.set_job_status(job, Status.FAILED) if isinstance(e, TargetNotRespondingError): raise e elif isinstance(e, TargetError): context.tm.verify_target_responsive(context) finally: self.logger.info('Completing job {}'.format(job.id)) self.send(signal.JOB_COMPLETED) context.tm.stop() context.end_job() log.dedent() self.check_job(job)
def run_next_job(self, context): job = context.start_job() self.logger.info('Running job {}'.format(job.id)) try: log.indent() if self.context.reboot_policy.reboot_on_each_job: self.logger.info('Rebooting on new job.') self.context.tm.reboot(context) elif self.context.reboot_policy.reboot_on_each_spec and context.spec_changed: self.logger.info('Rebooting on new spec.') self.context.tm.reboot(context) with signal.wrap('JOB', self, context): context.tm.start() self.do_run_job(job, context) job.set_status(Status.OK) except (Exception, KeyboardInterrupt) as e: # pylint: disable=broad-except log.log_error(e, self.logger) if isinstance(e, KeyboardInterrupt): context.run_interrupted = True job.set_status(Status.ABORTED) raise e else: job.set_status(Status.FAILED) if isinstance(e, TargetNotRespondingError): raise e elif isinstance(e, TargetError): context.tm.verify_target_responsive(context) finally: self.logger.info('Completing job {}'.format(job.id)) self.send(signal.JOB_COMPLETED) context.tm.stop() context.end_job() log.dedent() self.check_job(job)