def test_job_safe(self): self.assertIsNotNone(self.fakejob.timeout) pipeline = TestTimeout.FakePipeline(job=self.fakejob) action = TestTimeout.SafeAction() pipeline.add_action(action) pipeline.add_action(TestTimeout.SafeAction()) finalize = FinalizeAction() finalize.parameters['namespace'] = 'common' pipeline.add_action(finalize) self.fakejob.pipeline = pipeline self.fakejob.device = TestTimeout.FakeDevice() # run() returns 0 in case of success self.assertEqual(self.fakejob.run(), 0)
def test_long_job_safe(self): self.fakejob.timeout.duration = 8 self.assertIsNotNone(self.fakejob.timeout) pipeline = TestTimeout.FakePipeline(job=self.fakejob) self.fakejob.pipeline = pipeline action = TestTimeout.SafeAction() action.timeout.duration = 2 pipeline.add_action(action) pipeline.add_action(action) pipeline.add_action(TestTimeout.FakeSafeAction()) pipeline.add_action(TestTimeout.FakeSafeAction()) finalize = FinalizeAction() finalize.parameters['namespace'] = 'common' pipeline.add_action(finalize) self.fakejob.pipeline = pipeline self.fakejob.device = TestTimeout.FakeDevice() self.assertEqual(self.fakejob.run(), 0)
def test_job_timeout(self): self.assertIsNotNone(self.fakejob.timeout) pipeline = TestTimeout.FakePipeline(job=self.fakejob) action = TestTimeout.LongAction() pipeline.add_action(action) pipeline.add_action(TestTimeout.SafeAction()) pipeline.add_action(FinalizeAction()) self.fakejob.set_pipeline(pipeline) self.fakejob.device = TestTimeout.FakeDevice() with self.assertRaises(JobError): self.fakejob.run()
def test_job_safe(self): self.assertIsNotNone(self.fakejob.timeout) pipeline = TestTimeout.FakePipeline(job=self.fakejob) action = TestTimeout.SafeAction() pipeline.add_action(action) pipeline.add_action(TestTimeout.SafeAction()) pipeline.add_action(FinalizeAction()) self.fakejob.pipeline = pipeline self.fakejob.device = TestTimeout.FakeDevice() # run() returns 0 in case of success self.assertEqual(self.fakejob.run(), 0)
def test_job_timeout(self): self.assertIsNotNone(self.fakejob.timeout) pipeline = TestTimeout.FakePipeline(job=self.fakejob) action = TestTimeout.LongAction() pipeline.add_action(action) pipeline.add_action(TestTimeout.SafeAction()) pipeline.add_action(FinalizeAction()) self.fakejob.pipeline = pipeline self.fakejob.device = TestTimeout.FakeDevice() # run() returns 2 for JobError self.assertEqual(self.fakejob.run(), 2)
def test_job_safe(self): self.assertIsNotNone(self.fakejob.timeout) pipeline = TestTimeout.FakePipeline(job=self.fakejob) action = TestTimeout.SafeAction() pipeline.add_action(action) pipeline.add_action(TestTimeout.SafeAction()) pipeline.add_action(FinalizeAction()) self.fakejob.set_pipeline(pipeline) self.fakejob.device = TestTimeout.FakeDevice() try: self.fakejob.run() except JobError as exc: self.fail(exc)
def test_long_job_safe(self): self.fakejob.timeout.duration = 8 self.assertIsNotNone(self.fakejob.timeout) pipeline = TestTimeout.FakePipeline(job=self.fakejob) self.fakejob.pipeline = pipeline action = TestTimeout.SafeAction() action.timeout.duration = 2 pipeline.add_action(action) pipeline.add_action(action) pipeline.add_action(TestTimeout.FakeSafeAction()) pipeline.add_action(TestTimeout.FakeSafeAction()) pipeline.add_action(FinalizeAction()) self.fakejob.pipeline = pipeline self.fakejob.device = TestTimeout.FakeDevice() self.assertEqual(self.fakejob.run(), 0)
def parse(self, content, device, job_id, zmq_config, dispatcher_config, output_dir=None, env_dut=None): self.loader = yaml.Loader(content) self.loader.compose_node = self.compose_node self.loader.construct_mapping = self.construct_mapping data = self.loader.get_single_data() job = Job(job_id, data, zmq_config) test_counts = {} job.device = device job.parameters['output_dir'] = output_dir job.parameters['env_dut'] = env_dut # Load the dispatcher config job.parameters['dispatcher'] = {} if dispatcher_config is not None: config = yaml.load(dispatcher_config) if isinstance(config, dict): job.parameters['dispatcher'] = config # Setup the logging now that we have the parameters job.setup_logging() level_tuple = Protocol.select_all(job.parameters) # sort the list of protocol objects by the protocol class level. job.protocols = [ item[0](job.parameters, job_id) for item in sorted(level_tuple, key=lambda level_tuple: level_tuple[1]) ] pipeline = Pipeline(job=job) self._timeouts(data, job) # deploy and boot classes can populate the pipeline differently depending # on the test action type they are linked with (via namespacing). # This code builds an information dict for each namespace which is then # passed as a parameter to each Action class to use. test_info = {} test_actions = ([ action for action in data['actions'] if 'test' in action ]) for test_action in test_actions: test_parameters = test_action['test'] test_type = LavaTest.select(device, test_parameters) namespace = test_parameters.get('namespace', 'common') if namespace in test_info: test_info[namespace].append({ 'class': test_type, 'parameters': test_parameters }) else: test_info.update({ namespace: [{ 'class': test_type, 'parameters': test_parameters }] }) # FIXME: also read permissable overrides from device config and set from job data # FIXME: ensure that a timeout for deployment 0 does not get set as the timeout for deployment 1 if 1 is default for action_data in data['actions']: action_data.pop('yaml_line', None) for name in action_data: # Set a default namespace if needed namespace = action_data[name].setdefault('namespace', 'common') test_counts.setdefault(namespace, 1) if name == 'deploy' or name == 'boot' or name == 'test': parse_action(action_data, name, device, pipeline, test_info, test_counts[namespace]) if name == 'test': test_counts[namespace] += 1 elif name == 'repeat': count = action_data[name][ 'count'] # first list entry must be the count dict repeats = action_data[name]['actions'] for c_iter in range(count): for repeating in repeats: # block of YAML to repeat for repeat_action in repeating: # name of the action for this block if repeat_action == 'yaml_line': continue repeating[repeat_action][ 'repeat-count'] = c_iter namespace = repeating[ repeat_action].setdefault( 'namespace', 'common') test_counts.setdefault(namespace, 1) parse_action(repeating, repeat_action, device, pipeline, test_info, test_counts[namespace]) if repeat_action == 'test': test_counts[namespace] += 1 elif name == 'command': action = CommandAction() action.parameters = action_data[name] pipeline.add_action(action) else: raise JobError("Unknown action name '%'" % name) # there's always going to need to be a finalize_process action finalize = FinalizeAction() pipeline.add_action(finalize) finalize.populate(None) data['output_dir'] = output_dir job.pipeline = pipeline if 'compatibility' in data: try: job_c = int(job.compatibility) data_c = int(data['compatibility']) except ValueError as exc: raise JobError('invalid compatibility value: %s' % exc) if job_c < data_c: raise JobError( 'Dispatcher unable to meet job compatibility requirement. %d > %d' % (job_c, data_c)) return job
def parse(self, content, device, job_id, socket_addr, output_dir=None, env_dut=None): self.loader = yaml.Loader(content) self.loader.compose_node = self.compose_node self.loader.construct_mapping = self.construct_mapping data = self.loader.get_single_data() self.context['default_action_duration'] = Timeout.default_duration() self.context['default_test_duration'] = Timeout.default_duration() job = Job(job_id, socket_addr, data) counts = {} job.device = device job.parameters['output_dir'] = output_dir job.parameters['env_dut'] = env_dut job.parameters['target'] = device.target for instance in Protocol.select_all(job.parameters): job.protocols.append(instance(job.parameters)) pipeline = Pipeline(job=job) self._timeouts(data, job) # FIXME: also read permissable overrides from device config and set from job data # FIXME: ensure that a timeout for deployment 0 does not get set as the timeout for deployment 1 if 1 is default for action_data in data['actions']: action_data.pop('yaml_line', None) for name in action_data: if type( action_data[name] ) is dict: # FIXME: commands are not fully implemented & may produce a list action_data[name]['default_action_timeout'] = self.context[ 'default_action_duration'] action_data[name]['default_test_timeout'] = self.context[ 'default_test_duration'] counts.setdefault(name, 1) if name == 'deploy' or name == 'boot' or name == 'test': # reset the context before adding a second deployment and again before third etc. if name == 'deploy' and counts[name] >= 2: reset_context = ResetContext() reset_context.section = name pipeline.add_action(reset_context) parse_action(action_data, name, device, pipeline) elif name == 'repeat': count = action_data[name][ 'count'] # first list entry must be the count dict repeats = action_data[name]['actions'] for c_iter in xrange(count): for repeating in repeats: # block of YAML to repeat for repeat_action in repeating: # name of the action for this block if repeat_action == 'yaml_line': continue repeating[repeat_action][ 'repeat-count'] = c_iter parse_action(repeating, repeat_action, device, pipeline) else: # May only end up being used for submit as other actions all need strategy method objects # select the specific action of this class for this job action = Action.select(name)() action.job = job # put parameters (like rootfs_type, results_dir) into the actions. if type(action_data[name]) == dict: action.parameters = action_data[name] elif name == "commands": # FIXME pass elif type(action_data[name]) == list: for param in action_data[name]: action.parameters = param action.summary = name action.timeout = Timeout( action.name, self.context['default_action_duration']) pipeline.add_action(action) counts[name] += 1 # there's always going to need to be a finalize_process action pipeline.add_action(FinalizeAction()) data['output_dir'] = output_dir job.set_pipeline(pipeline) return job
def parse(self, content, device, job_id, socket_addr, master_cert, slave_cert, output_dir=None, env_dut=None): self.loader = yaml.Loader(content) self.loader.compose_node = self.compose_node self.loader.construct_mapping = self.construct_mapping data = self.loader.get_single_data() self.context['default_action_duration'] = Timeout.default_duration() self.context['default_test_duration'] = Timeout.default_duration() self.context['default_connection_duration'] = Timeout.default_duration( ) job = Job(job_id, socket_addr, master_cert, slave_cert, data) counts = {} job.device = device job.parameters['output_dir'] = output_dir job.parameters['env_dut'] = env_dut job.parameters['target'] = device.target level_tuple = Protocol.select_all(job.parameters) # sort the list of protocol objects by the protocol class level. job.protocols = [ item[0](job.parameters, job_id) for item in sorted(level_tuple, key=lambda level_tuple: level_tuple[1]) ] pipeline = Pipeline(job=job) self._timeouts(data, job) # some special handling is needed to tell the overlay classes about the presence or absence of a test action test_action = True test_list = [action for action in data['actions'] if 'test' in action] if test_list and 'test' not in test_list[0]: test_action = False # FIXME: also read permissable overrides from device config and set from job data # FIXME: ensure that a timeout for deployment 0 does not get set as the timeout for deployment 1 if 1 is default for action_data in data['actions']: action_data.pop('yaml_line', None) for name in action_data: if isinstance( action_data[name], dict ): # FIXME: commands are not fully implemented & may produce a list action_data[name].update(self._map_context_defaults()) counts.setdefault(name, 1) if name == 'deploy' or name == 'boot' or name == 'test': parse_action(action_data, name, device, pipeline, test_action) elif name == 'repeat': count = action_data[name][ 'count'] # first list entry must be the count dict repeats = action_data[name]['actions'] for c_iter in range(count): for repeating in repeats: # block of YAML to repeat for repeat_action in repeating: # name of the action for this block if repeat_action == 'yaml_line': continue repeating[repeat_action][ 'repeat-count'] = c_iter parse_action(repeating, repeat_action, device, pipeline, test_action) else: # May only end up being used for submit as other actions all need strategy method objects # select the specific action of this class for this job action = Action.select(name)() action.job = job # put parameters (like rootfs_type, results_dir) into the actions. if isinstance(action_data[name], dict): action.parameters = action_data[name] elif name == "commands": # FIXME pass elif isinstance(action_data[name], list): for param in action_data[name]: action.parameters = param action.summary = name action.timeout = Timeout( action.name, self.context['default_action_duration']) action.connection_timeout = Timeout( action.name, self.context['default_connection_duration']) pipeline.add_action(action) counts[name] += 1 # there's always going to need to be a finalize_process action finalize = FinalizeAction() pipeline.add_action(finalize) finalize.populate(self._map_context_defaults()) data['output_dir'] = output_dir job.set_pipeline(pipeline) if 'compatibility' in data: try: job_c = int(job.compatibility) data_c = int(data['compatibility']) except ValueError as exc: raise JobError('invalid compatibility value: %s' % exc) if job_c < data_c: raise JobError( 'Dispatcher unable to meet job compatibility requirement. %d > %d' % (job_c, data_c)) return job
def parse(self, content, device, job_id, socket_addr, output_dir=None, env_dut=None): self.loader = yaml.Loader(content) self.loader.compose_node = self.compose_node self.loader.construct_mapping = self.construct_mapping data = self.loader.get_single_data() self.context['default_action_duration'] = Timeout.default_duration() self.context['default_test_duration'] = Timeout.default_duration() self.context['default_connection_duration'] = Timeout.default_duration() job = Job(job_id, socket_addr, data) counts = {} job.device = device job.parameters['output_dir'] = output_dir job.parameters['env_dut'] = env_dut job.parameters['target'] = device.target level_tuple = Protocol.select_all(job.parameters) # sort the list of protocol objects by the protocol class level. job.protocols = [item[0](job.parameters) for item in sorted(level_tuple, key=lambda level_tuple: level_tuple[1])] pipeline = Pipeline(job=job) self._timeouts(data, job) # FIXME: also read permissable overrides from device config and set from job data # FIXME: ensure that a timeout for deployment 0 does not get set as the timeout for deployment 1 if 1 is default for action_data in data['actions']: action_data.pop('yaml_line', None) for name in action_data: if type(action_data[name]) is dict: # FIXME: commands are not fully implemented & may produce a list action_data[name].update(self._map_context_defaults()) counts.setdefault(name, 1) if name == 'deploy' or name == 'boot' or name == 'test': parse_action(action_data, name, device, pipeline) elif name == 'repeat': count = action_data[name]['count'] # first list entry must be the count dict repeats = action_data[name]['actions'] for c_iter in xrange(count): for repeating in repeats: # block of YAML to repeat for repeat_action in repeating: # name of the action for this block if repeat_action == 'yaml_line': continue repeating[repeat_action]['repeat-count'] = c_iter parse_action(repeating, repeat_action, device, pipeline) else: # May only end up being used for submit as other actions all need strategy method objects # select the specific action of this class for this job action = Action.select(name)() action.job = job # put parameters (like rootfs_type, results_dir) into the actions. if type(action_data[name]) == dict: action.parameters = action_data[name] elif name == "commands": # FIXME pass elif type(action_data[name]) == list: for param in action_data[name]: action.parameters = param action.summary = name action.timeout = Timeout(action.name, self.context['default_action_duration']) action.connection_timeout = Timeout(action.name, self.context['default_connection_duration']) pipeline.add_action(action) counts[name] += 1 # there's always going to need to be a finalize_process action finalize = FinalizeAction() pipeline.add_action(finalize) finalize.populate(self._map_context_defaults()) data['output_dir'] = output_dir job.set_pipeline(pipeline) logger = logging.getLogger('dispatcher') logger.warn("pipeline contains %x", pipeline) if 'compatibility' in data: try: job_c = int(job.compatibility) data_c = int(data['compatibility']) except ValueError as exc: raise JobError('invalid compatibility value: %s' % exc) if job_c < data_c: raise JobError('Dispatcher unable to meet job compatibility requirement. %d > %d' % (job_c, data_c)) return job