def test_action_timeout(self): factory = Factory() job = factory.create_bbb_job('sample_jobs/uboot-ramdisk.yaml') deploy = [action for action in job.pipeline.actions if action.name == 'tftp-deploy'][0] test_action = [action for action in job.pipeline.actions if action.name == 'lava-test-retry'][0] self.assertEqual(deploy.timeout.duration, 120) # job specifies 2 minutes self.assertEqual(deploy.connection_timeout.duration, Timeout.default_duration()) self.assertEqual(test_action.timeout.duration, 300) self.assertEqual(test_action.connection_timeout.duration, Timeout.default_duration())
def test_action_timeout(self): factory = Factory() job = factory.create_bbb_job('sample_jobs/uboot-ramdisk.yaml') job.validate() deploy = [ action for action in job.pipeline.actions if action.name == 'tftp-deploy' ][0] test_action = [ action for action in job.pipeline.actions if action.name == 'lava-test-retry' ][0] test_shell = [ action for action in test_action.internal_pipeline.actions if action.name == 'lava-test-shell' ][0] self.assertEqual(test_shell.connection_timeout.duration, 240) # job specifies 4 minutes self.assertEqual(test_shell.timeout.duration, 420) # job specifies 7 minutes self.assertEqual(deploy.timeout.duration, 120) # job specifies 2 minutes self.assertNotEqual(deploy.connection_timeout.duration, Timeout.default_duration()) self.assertNotEqual(deploy.connection_timeout.duration, test_shell.connection_timeout) self.assertEqual(test_action.timeout.duration, 300)
def test_action_timeout(self): factory = ConnectionFactory() job = factory.create_bbb_job('sample_jobs/uboot-ramdisk.yaml') job.validate() deploy = [action for action in job.pipeline.actions if action.name == 'tftp-deploy'][0] test_action = [action for action in job.pipeline.actions if action.name == 'lava-test-retry'][0] test_shell = [action for action in test_action.internal_pipeline.actions if action.name == 'lava-test-shell'][0] self.assertEqual(test_shell.connection_timeout.duration, 240) # job specifies 4 minutes self.assertEqual(test_shell.timeout.duration, 300) # job (test action block) specifies 5 minutes self.assertEqual(deploy.timeout.duration, 120) # job specifies 2 minutes self.assertNotEqual(deploy.connection_timeout.duration, Timeout.default_duration()) self.assertNotEqual(deploy.connection_timeout.duration, test_shell.connection_timeout) self.assertEqual(test_action.timeout.duration, 300) uboot = [action for action in job.pipeline.actions if action.name == 'uboot-action'][0] retry = [action for action in uboot.internal_pipeline.actions if action.name == 'uboot-retry'][0] auto = [action for action in retry.internal_pipeline.actions if action.name == 'auto-login-action'][0] self.assertEqual(auto.timeout.duration / 60, 9) # 9 minutes in the job def
def test_testshell(self): testshell = None for action in self.job.pipeline.actions: self.assertIsNotNone(action.name) if isinstance(action, TestShellRetry): testshell = action.pipeline.actions[0] break self.assertIsInstance(testshell, TestShellAction) self.assertTrue(testshell.valid) if 'timeout' in testshell.parameters: time_int = Timeout.parse(testshell.parameters['timeout']) else: time_int = Timeout.default_duration() self.assertEqual( datetime.timedelta(seconds=time_int).total_seconds(), testshell.timeout.duration)
def test_testshell(self): testshell = None for action in self.job.pipeline.actions: self.assertIsNotNone(action.name) if isinstance(action, TestShellRetry): testshell = action.pipeline.children[action.pipeline][0] break self.assertIsInstance(testshell, TestShellAction) self.assertNotIn('boot-result', testshell.data) self.assertTrue(testshell.valid) if 'timeout' in testshell.parameters: time_int = Timeout.parse(testshell.parameters['timeout']) else: time_int = Timeout.default_duration() self.assertEqual( datetime.timedelta(seconds=time_int).total_seconds(), testshell.timeout.duration ) self.assertNotEqual( testshell.parameters['default_action_timeout'], testshell.timeout.duration )
def parse(self, content, device, job_id, socket_addr, output_dir=None, env_dut=None): self.loader = yaml.Loader(content) self.loader.compose_node = self.compose_node self.loader.construct_mapping = self.construct_mapping data = self.loader.get_single_data() self.context['default_action_duration'] = Timeout.default_duration() self.context['default_test_duration'] = Timeout.default_duration() job = Job(job_id, socket_addr, data) counts = {} job.device = device job.parameters['output_dir'] = output_dir job.parameters['env_dut'] = env_dut job.parameters['target'] = device.target for instance in Protocol.select_all(job.parameters): job.protocols.append(instance(job.parameters)) pipeline = Pipeline(job=job) self._timeouts(data, job) # FIXME: also read permissable overrides from device config and set from job data # FIXME: ensure that a timeout for deployment 0 does not get set as the timeout for deployment 1 if 1 is default for action_data in data['actions']: action_data.pop('yaml_line', None) for name in action_data: if type( action_data[name] ) is dict: # FIXME: commands are not fully implemented & may produce a list action_data[name]['default_action_timeout'] = self.context[ 'default_action_duration'] action_data[name]['default_test_timeout'] = self.context[ 'default_test_duration'] counts.setdefault(name, 1) if name == 'deploy' or name == 'boot' or name == 'test': # reset the context before adding a second deployment and again before third etc. if name == 'deploy' and counts[name] >= 2: reset_context = ResetContext() reset_context.section = name pipeline.add_action(reset_context) parse_action(action_data, name, device, pipeline) elif name == 'repeat': count = action_data[name][ 'count'] # first list entry must be the count dict repeats = action_data[name]['actions'] for c_iter in xrange(count): for repeating in repeats: # block of YAML to repeat for repeat_action in repeating: # name of the action for this block if repeat_action == 'yaml_line': continue repeating[repeat_action][ 'repeat-count'] = c_iter parse_action(repeating, repeat_action, device, pipeline) else: # May only end up being used for submit as other actions all need strategy method objects # select the specific action of this class for this job action = Action.select(name)() action.job = job # put parameters (like rootfs_type, results_dir) into the actions. if type(action_data[name]) == dict: action.parameters = action_data[name] elif name == "commands": # FIXME pass elif type(action_data[name]) == list: for param in action_data[name]: action.parameters = param action.summary = name action.timeout = Timeout( action.name, self.context['default_action_duration']) pipeline.add_action(action) counts[name] += 1 # there's always going to need to be a finalize_process action pipeline.add_action(FinalizeAction()) data['output_dir'] = output_dir job.set_pipeline(pipeline) return job
def parse(self, content, device, job_id, socket_addr, output_dir=None, env_dut=None): self.loader = yaml.Loader(content) self.loader.compose_node = self.compose_node self.loader.construct_mapping = self.construct_mapping data = self.loader.get_single_data() self.context['default_action_duration'] = Timeout.default_duration() self.context['default_test_duration'] = Timeout.default_duration() job = Job(job_id, socket_addr, data) counts = {} job.device = device job.parameters['output_dir'] = output_dir job.parameters['env_dut'] = env_dut job.parameters['target'] = device.target level_tuple = Protocol.select_all(job.parameters) # sort the list of protocol objects by the protocol class level. job.protocols = [item[0](job.parameters) for item in sorted(level_tuple, key=lambda level_tuple: level_tuple[1])] pipeline = Pipeline(job=job) self._timeouts(data, job) # FIXME: also read permissable overrides from device config and set from job data # FIXME: ensure that a timeout for deployment 0 does not get set as the timeout for deployment 1 if 1 is default for action_data in data['actions']: action_data.pop('yaml_line', None) for name in action_data: if type(action_data[name]) is dict: # FIXME: commands are not fully implemented & may produce a list action_data[name]['default_action_timeout'] = self.context['default_action_duration'] action_data[name]['default_test_timeout'] = self.context['default_test_duration'] counts.setdefault(name, 1) if name == 'deploy' or name == 'boot' or name == 'test': # reset the context before adding a second deployment and again before third etc. if name == 'deploy' and counts[name] >= 2: reset_context = ResetContext() reset_context.section = name pipeline.add_action(reset_context) parse_action(action_data, name, device, pipeline) elif name == 'repeat': count = action_data[name]['count'] # first list entry must be the count dict repeats = action_data[name]['actions'] for c_iter in xrange(count): for repeating in repeats: # block of YAML to repeat for repeat_action in repeating: # name of the action for this block if repeat_action == 'yaml_line': continue repeating[repeat_action]['repeat-count'] = c_iter parse_action(repeating, repeat_action, device, pipeline) else: # May only end up being used for submit as other actions all need strategy method objects # select the specific action of this class for this job action = Action.select(name)() action.job = job # put parameters (like rootfs_type, results_dir) into the actions. if type(action_data[name]) == dict: action.parameters = action_data[name] elif name == "commands": # FIXME pass elif type(action_data[name]) == list: for param in action_data[name]: action.parameters = param action.summary = name action.timeout = Timeout(action.name, self.context['default_action_duration']) pipeline.add_action(action) counts[name] += 1 # there's always going to need to be a finalize_process action pipeline.add_action(FinalizeAction()) data['output_dir'] = output_dir job.set_pipeline(pipeline) return job
def parse(self, content, device, job_id, socket_addr, master_cert, slave_cert, output_dir=None, env_dut=None): self.loader = yaml.Loader(content) self.loader.compose_node = self.compose_node self.loader.construct_mapping = self.construct_mapping data = self.loader.get_single_data() self.context['default_action_duration'] = Timeout.default_duration() self.context['default_test_duration'] = Timeout.default_duration() self.context['default_connection_duration'] = Timeout.default_duration( ) job = Job(job_id, socket_addr, master_cert, slave_cert, data) counts = {} job.device = device job.parameters['output_dir'] = output_dir job.parameters['env_dut'] = env_dut job.parameters['target'] = device.target level_tuple = Protocol.select_all(job.parameters) # sort the list of protocol objects by the protocol class level. job.protocols = [ item[0](job.parameters, job_id) for item in sorted(level_tuple, key=lambda level_tuple: level_tuple[1]) ] pipeline = Pipeline(job=job) self._timeouts(data, job) # some special handling is needed to tell the overlay classes about the presence or absence of a test action test_action = True test_list = [action for action in data['actions'] if 'test' in action] if test_list and 'test' not in test_list[0]: test_action = False # FIXME: also read permissable overrides from device config and set from job data # FIXME: ensure that a timeout for deployment 0 does not get set as the timeout for deployment 1 if 1 is default for action_data in data['actions']: action_data.pop('yaml_line', None) for name in action_data: if isinstance( action_data[name], dict ): # FIXME: commands are not fully implemented & may produce a list action_data[name].update(self._map_context_defaults()) counts.setdefault(name, 1) if name == 'deploy' or name == 'boot' or name == 'test': parse_action(action_data, name, device, pipeline, test_action) elif name == 'repeat': count = action_data[name][ 'count'] # first list entry must be the count dict repeats = action_data[name]['actions'] for c_iter in range(count): for repeating in repeats: # block of YAML to repeat for repeat_action in repeating: # name of the action for this block if repeat_action == 'yaml_line': continue repeating[repeat_action][ 'repeat-count'] = c_iter parse_action(repeating, repeat_action, device, pipeline, test_action) else: # May only end up being used for submit as other actions all need strategy method objects # select the specific action of this class for this job action = Action.select(name)() action.job = job # put parameters (like rootfs_type, results_dir) into the actions. if isinstance(action_data[name], dict): action.parameters = action_data[name] elif name == "commands": # FIXME pass elif isinstance(action_data[name], list): for param in action_data[name]: action.parameters = param action.summary = name action.timeout = Timeout( action.name, self.context['default_action_duration']) action.connection_timeout = Timeout( action.name, self.context['default_connection_duration']) pipeline.add_action(action) counts[name] += 1 # there's always going to need to be a finalize_process action finalize = FinalizeAction() pipeline.add_action(finalize) finalize.populate(self._map_context_defaults()) data['output_dir'] = output_dir job.set_pipeline(pipeline) if 'compatibility' in data: try: job_c = int(job.compatibility) data_c = int(data['compatibility']) except ValueError as exc: raise JobError('invalid compatibility value: %s' % exc) if job_c < data_c: raise JobError( 'Dispatcher unable to meet job compatibility requirement. %d > %d' % (job_c, data_c)) return job
def parse(self, content, device, job_id, socket_addr, output_dir=None, env_dut=None): self.loader = yaml.Loader(content) self.loader.compose_node = self.compose_node self.loader.construct_mapping = self.construct_mapping data = self.loader.get_single_data() self.context['default_action_duration'] = Timeout.default_duration() self.context['default_test_duration'] = Timeout.default_duration() self.context['default_connection_duration'] = Timeout.default_duration() job = Job(job_id, socket_addr, data) counts = {} job.device = device job.parameters['output_dir'] = output_dir job.parameters['env_dut'] = env_dut job.parameters['target'] = device.target level_tuple = Protocol.select_all(job.parameters) # sort the list of protocol objects by the protocol class level. job.protocols = [item[0](job.parameters) for item in sorted(level_tuple, key=lambda level_tuple: level_tuple[1])] pipeline = Pipeline(job=job) self._timeouts(data, job) # FIXME: also read permissable overrides from device config and set from job data # FIXME: ensure that a timeout for deployment 0 does not get set as the timeout for deployment 1 if 1 is default for action_data in data['actions']: action_data.pop('yaml_line', None) for name in action_data: if type(action_data[name]) is dict: # FIXME: commands are not fully implemented & may produce a list action_data[name].update(self._map_context_defaults()) counts.setdefault(name, 1) if name == 'deploy' or name == 'boot' or name == 'test': parse_action(action_data, name, device, pipeline) elif name == 'repeat': count = action_data[name]['count'] # first list entry must be the count dict repeats = action_data[name]['actions'] for c_iter in xrange(count): for repeating in repeats: # block of YAML to repeat for repeat_action in repeating: # name of the action for this block if repeat_action == 'yaml_line': continue repeating[repeat_action]['repeat-count'] = c_iter parse_action(repeating, repeat_action, device, pipeline) else: # May only end up being used for submit as other actions all need strategy method objects # select the specific action of this class for this job action = Action.select(name)() action.job = job # put parameters (like rootfs_type, results_dir) into the actions. if type(action_data[name]) == dict: action.parameters = action_data[name] elif name == "commands": # FIXME pass elif type(action_data[name]) == list: for param in action_data[name]: action.parameters = param action.summary = name action.timeout = Timeout(action.name, self.context['default_action_duration']) action.connection_timeout = Timeout(action.name, self.context['default_connection_duration']) pipeline.add_action(action) counts[name] += 1 # there's always going to need to be a finalize_process action finalize = FinalizeAction() pipeline.add_action(finalize) finalize.populate(self._map_context_defaults()) data['output_dir'] = output_dir job.set_pipeline(pipeline) logger = logging.getLogger('dispatcher') logger.warn("pipeline contains %x", pipeline) if 'compatibility' in data: try: job_c = int(job.compatibility) data_c = int(data['compatibility']) except ValueError as exc: raise JobError('invalid compatibility value: %s' % exc) if job_c < data_c: raise JobError('Dispatcher unable to meet job compatibility requirement. %d > %d' % (job_c, data_c)) return job
def parse(self, content, device, job_id, zmq_config, dispatcher_config, output_dir=None, env_dut=None): self.loader = yaml.Loader(content) self.loader.compose_node = self.compose_node self.loader.construct_mapping = self.construct_mapping data = self.loader.get_single_data() self.context['default_action_duration'] = Timeout.default_duration() self.context['default_test_duration'] = Timeout.default_duration() self.context['default_connection_duration'] = Timeout.default_duration( ) job = Job(job_id, data, zmq_config) test_counts = {} job.device = device job.parameters['output_dir'] = output_dir job.parameters['env_dut'] = env_dut job.parameters['target'] = device.target # Load the dispatcher config job.parameters['dispatcher'] = {} if dispatcher_config is not None: job.parameters['dispatcher'] = yaml.load(dispatcher_config) # Setup the logging now that we have the parameters job.setup_logging() level_tuple = Protocol.select_all(job.parameters) # sort the list of protocol objects by the protocol class level. job.protocols = [ item[0](job.parameters, job_id) for item in sorted(level_tuple, key=lambda level_tuple: level_tuple[1]) ] pipeline = Pipeline(job=job) self._timeouts(data, job) # deploy and boot classes can populate the pipeline differently depending # on the test action type they are linked with (via namespacing). # This code builds an information dict for each namespace which is then # passed as a parameter to each Action class to use. test_info = {} test_actions = ([ action for action in data['actions'] if 'test' in action ]) for test_action in test_actions: test_parameters = test_action['test'] test_type = LavaTest.select(device, test_parameters) namespace = test_parameters.get('namespace', 'common') if namespace in test_info: test_info[namespace].append({ 'class': test_type, 'parameters': test_parameters }) else: test_info.update({ namespace: [{ 'class': test_type, 'parameters': test_parameters }] }) # FIXME: also read permissable overrides from device config and set from job data # FIXME: ensure that a timeout for deployment 0 does not get set as the timeout for deployment 1 if 1 is default for action_data in data['actions']: action_data.pop('yaml_line', None) for name in action_data: if isinstance( action_data[name], dict ): # FIXME: commands are not fully implemented & may produce a list action_data[name].update(self._map_context_defaults()) namespace = action_data[name].get('namespace', 'common') test_counts.setdefault(namespace, 1) if name == 'deploy' or name == 'boot' or name == 'test': parse_action(action_data, name, device, pipeline, test_info, test_counts[namespace]) if name == 'test': test_counts[namespace] += 1 elif name == 'repeat': count = action_data[name][ 'count'] # first list entry must be the count dict repeats = action_data[name]['actions'] for c_iter in range(count): for repeating in repeats: # block of YAML to repeat for repeat_action in repeating: # name of the action for this block if repeat_action == 'yaml_line': continue repeating[repeat_action][ 'repeat-count'] = c_iter namespace = repeating[repeat_action].get( 'namespace', 'common') test_counts.setdefault(namespace, 1) parse_action(repeating, repeat_action, device, pipeline, test_info, test_counts[namespace]) if repeat_action == 'test': test_counts[namespace] += 1 elif name == 'command': action = CommandAction() action.parameters = action_data[name] pipeline.add_action(action) else: raise JobError("Unknown action name '%'" % name) # there's always going to need to be a finalize_process action finalize = FinalizeAction() pipeline.add_action(finalize) finalize.populate(self._map_context_defaults()) data['output_dir'] = output_dir job.pipeline = pipeline if 'compatibility' in data: try: job_c = int(job.compatibility) data_c = int(data['compatibility']) except ValueError as exc: raise JobError('invalid compatibility value: %s' % exc) if job_c < data_c: raise JobError( 'Dispatcher unable to meet job compatibility requirement. %d > %d' % (job_c, data_c)) return job