def setUp(self): super(TestTimeout, self).setUp() self.parameters = { "job_name": "fakejob", 'output_dir': mkdtemp(), 'timeouts': { 'job': { 'seconds': 3 } }, "actions": [{ 'deploy': { 'namespace': 'common', 'failure_retry': 3 }, 'boot': { 'namespace': 'common', 'failure_retry': 4 }, 'test': { 'namespace': 'common', 'failure_retry': 5 } }] } self.fakejob = TestTimeout.FakeJob(self.parameters) # copy of the _timeout function from parser. if 'timeouts' in self.parameters: if 'job' in self.parameters['timeouts']: duration = Timeout.parse(self.parameters['timeouts']['job']) self.fakejob.timeout = Timeout(self.parameters['job_name'], duration)
def test_action_connection_timeout(self): """ Test connection timeout specified for a particular action """ with open( os.path.join(os.path.dirname(__file__), './sample_jobs/uboot-ramdisk.yaml'), 'r') as uboot_ramdisk: data = yaml.load(uboot_ramdisk) connection_timeout = Timeout.parse(data['timeouts']['connection']) self.assertEqual(connection_timeout, 240) data['timeouts']['connections'] = {'uboot-retry': {}} data['timeouts']['connections']['uboot-retry'] = {'seconds': 20} job = self.create_custom_job(yaml.dump(data)) boot = [ action for action in job.pipeline.actions if action.name == 'uboot-action' ][0] retry = [ action for action in boot.internal_pipeline.actions if action.name == 'uboot-retry' ][0] self.assertEqual( retry.timeout.duration, Timeout.parse(job.device['timeouts']['actions'][retry.name])) self.assertEqual( Timeout.parse(data['timeouts']['connections'][retry.name]), retry.connection_timeout.duration) self.assertEqual(90, retry.timeout.duration)
def _timeouts(self, data, job): if 'timeouts' in data and data['timeouts']: if 'job' in data['timeouts']: duration = Timeout.parse(data['timeouts']['job']) job.timeout = Timeout(data['job_name'], duration) if 'action' in data['timeouts']: self.context['default_action_duration'] = Timeout.parse(data['timeouts']['action']) if 'test' in data['timeouts']: self.context['default_test_duration'] = Timeout.parse(data['timeouts']['test'])
def test_action_timeout(self): factory = Factory() job = factory.create_bbb_job('sample_jobs/uboot-ramdisk.yaml') deploy = [action for action in job.pipeline.actions if action.name == 'tftp-deploy'][0] test_action = [action for action in job.pipeline.actions if action.name == 'lava-test-retry'][0] self.assertEqual(deploy.timeout.duration, 120) # job specifies 2 minutes self.assertEqual(deploy.connection_timeout.duration, Timeout.default_duration()) self.assertEqual(test_action.timeout.duration, 300) self.assertEqual(test_action.connection_timeout.duration, Timeout.default_duration())
def _timeouts(self, data, job): if 'timeouts' in data and data['timeouts']: if 'job' in data['timeouts']: duration = Timeout.parse(data['timeouts']['job']) job.timeout = Timeout(data['job_name'], duration) if 'action' in data['timeouts']: self.context['default_action_duration'] = Timeout.parse( data['timeouts']['action']) if 'test' in data['timeouts']: self.context['default_test_duration'] = Timeout.parse( data['timeouts']['test'])
def test_action_timeout(self): factory = Factory() job = factory.create_bbb_job('sample_jobs/uboot-ramdisk.yaml') job.validate() deploy = [ action for action in job.pipeline.actions if action.name == 'tftp-deploy' ][0] test_action = [ action for action in job.pipeline.actions if action.name == 'lava-test-retry' ][0] test_shell = [ action for action in test_action.internal_pipeline.actions if action.name == 'lava-test-shell' ][0] self.assertEqual(test_shell.connection_timeout.duration, 240) # job specifies 4 minutes self.assertEqual(test_shell.timeout.duration, 420) # job specifies 7 minutes self.assertEqual(deploy.timeout.duration, 120) # job specifies 2 minutes self.assertNotEqual(deploy.connection_timeout.duration, Timeout.default_duration()) self.assertNotEqual(deploy.connection_timeout.duration, test_shell.connection_timeout) self.assertEqual(test_action.timeout.duration, 300)
def __init__(self, parameters, job_id): super(XnbdProtocol, self).__init__(parameters, job_id) # timeout in utils.constants, default 10000 self.system_timeout = Timeout('system', XNBD_SYSTEM_TIMEOUT) self.logger = logging.getLogger('dispatcher') self.parameters = parameters self.port = None
def __init__(self): super(DDAction, self).__init__() self.name = "dd-image" self.summary = "dd image to drive" self.description = "deploy image to drive" self.timeout = Timeout(self.name, 600) self.boot_params = None
def __init__(self, parameters): # FIXME: allow the bare logger to use the zmq socket self.logger = logging.getLogger("root") self.poll_timeout = Timeout(self.name) self.parameters = None self.__errors__ = [] self.parameters = parameters
def _check_data(self, data): try: json_data = json.loads(data) except (ValueError, TypeError) as exc: raise JobError("Invalid data for %s protocol: %s %s" % (self.name, data, exc)) if type(json_data) != dict: raise JobError("Invalid data type %s for protocol %s" % (data, self.name)) if not json_data: raise JobError("No data to be sent over protocol %s" % self.name) if 'request' not in json_data: raise JobError("Bad API call over protocol - missing request") if json_data["request"] == "aggregate": raise JobError("Pipeline submission has not been implemented.") if "poll_delay" in json_data: self.settings['poll_delay'] = int(json_data["poll_delay"]) if 'timeout' in json_data: if isinstance(json_data['timeout'], dict): self.poll_timeout.duration = Timeout.parse(json_data['timeout']) elif isinstance(json_data['timeout'], int) or isinstance(json_data['timeout'], float): self.poll_timeout.duration = json_data['timeout'] else: self.logger.debug(json_data['timeout']) raise JobError("Invalid timeout request") self.logger.debug("Setting poll timeout of %s seconds", int(self.poll_timeout.duration)) if 'messageID' not in json_data: raise JobError("Missing messageID") # handle conversion of api calls to internal functions json_data['request'] = json_data['request'].replace('-', '_') return json_data
def __init__(self): super(UBootCommandsAction, self).__init__() self.name = "u-boot-commands" self.description = "send commands to u-boot" self.summary = "interactive u-boot" self.params = None self.timeout = Timeout(self.name, UBOOT_DEFAULT_CMD_TIMEOUT)
def _check_data(self, data): try: json_data = json.loads(data) except (ValueError, TypeError) as exc: raise JobError("Invalid data for %s protocol: %s %s" % (self.name, data, exc)) if type(json_data) != dict: raise JobError("Invalid data type %s for protocol %s" % (data, self.name)) if not json_data: raise JobError("No data to be sent over protocol %s" % self.name) if 'request' not in json_data: raise JobError("Bad API call over protocol - missing request") if json_data["request"] == "aggregate": raise JobError("Pipeline submission has not been implemented.") if "poll_delay" in json_data: self.settings['poll_delay'] = int(json_data["poll_delay"]) if 'timeout' in json_data: self.poll_timeout = Timeout(self.name, json_data['timeout']) if 'messageID' not in json_data: raise JobError("Missing messageID") # handle conversion of api calls to internal functions json_data['request'] = json_data['request'].replace('-', '_') return json_data
def build_action(action_data, testdata, submission): # test for a known section logger = logging.getLogger("lava_results_app") if "section" not in action_data: logger.warn("Invalid action data - missing section") return metatype = MetaType.get_section(action_data["section"]) if metatype is None: # 0 is allowed logger.debug("Unrecognised metatype in action_data: %s" % action_data["section"]) return # lookup the type from the job definition. type_name = MetaType.get_type_name(action_data["section"], submission) if not type_name: logger.debug("type_name failed for %s metatype %s" % (action_data["section"], MetaType.TYPE_CHOICES[metatype])) return action_meta, created = MetaType.objects.get_or_create(name=type_name, metatype=metatype) if created: action_meta.save() max_retry = None if "max_retries" in action_data: max_retry = action_data["max_retries"] action = ActionData.objects.create( action_name=action_data["name"], action_level=action_data["level"], action_summary=action_data["summary"], testdata=testdata, action_description=action_data["description"], meta_type=action_meta, max_retries=max_retry, timeout=int(Timeout.parse(action_data["timeout"])), ) with transaction.atomic(): action.save()
def setUp(self): self.parameters = { "job_name": "fakejob", 'output_dir': mkdtemp(), 'timeouts': { 'job': { 'seconds': 3 } }, "actions": [ { 'deploy': { 'failure_retry': 3 }, 'boot': { 'failure_retry': 4 }, 'test': { 'failure_retry': 5 } } ] } self.fakejob = TestTimeout.FakeJob(self.parameters) # copy of the _timeout function from parser. if 'timeouts' in self.parameters: if 'job' in self.parameters['timeouts']: duration = Timeout.parse(self.parameters['timeouts']['job']) self.fakejob.timeout = Timeout(self.parameters['job_name'], duration)
def test_timeout_inheritance(self): """ test that classes pick up block timeouts Each action in the internal_pipeline needs to pick up the timeout specified in the job definition block for the top level parent action. """ test_retry = [ action for action in self.job.pipeline.actions if action.name == 'lava-test-retry' ][0] sample_job_file = os.path.join( os.path.dirname(__file__), 'sample_jobs/qemu-debian-installer.yaml') with open(sample_job_file, 'r') as jobdef: data = yaml.load(jobdef) testdata = [ block['test'] for block in data['actions'] if 'test' in block ][0] duration = (Timeout.parse(testdata['timeout'])) self.assertEqual(duration, test_retry.timeout.duration) shell = [ action for action in test_retry.internal_pipeline.actions if action.name == 'lava-test-shell' ][0] self.assertEqual(duration, shell.timeout.duration) if shell.timeout.duration > shell.connection_timeout.duration: self.assertEqual(duration, shell.timeout.duration) else: self.fail("Incorrect timeout calculation")
def __init__(self, parameters, job_id): super(LxcProtocol, self).__init__(parameters, job_id) self.system_timeout = Timeout('system', LAVA_LXC_TIMEOUT) self.persistence = parameters['protocols'][self.name].get('persist', False) if self.persistence: self.lxc_name = parameters['protocols'][self.name]['name'] else: self.lxc_name = '-'.join( [parameters['protocols'][self.name]['name'], str(job_id)]) self.lxc_dist = parameters['protocols'][self.name]['distribution'] self.lxc_release = parameters['protocols'][self.name]['release'] self.lxc_arch = parameters['protocols'][self.name].get('arch', None) self.lxc_template = parameters['protocols'][self.name].get( 'template', 'download') self.lxc_mirror = parameters['protocols'][self.name].get('mirror', None) self.lxc_security_mirror = parameters['protocols'][self.name].get( 'security_mirror', None) self.verbose = parameters['protocols'][self.name].get('verbose', False) self.fastboot_reboot = parameters.get('reboot_to_fastboot', True) self.custom_lxc_path = False if LXC_PATH != lxc_path(parameters['dispatcher']): self.custom_lxc_path = True self.logger = logging.getLogger('dispatcher')
def _check_data(self, data): try: json_data = json.loads(data) except (ValueError, TypeError) as exc: raise JobError("Invalid data for %s protocol: %s %s" % (self.name, data, exc)) if not isinstance(json_data, dict): raise JobError("Invalid data type %s for protocol %s" % (data, self.name)) if not json_data: raise JobError("No data to be sent over protocol %s" % self.name) if 'request' not in json_data: raise JobError("Bad API call over protocol - missing request") if json_data["request"] == "aggregate": raise JobError("Pipeline submission has not been implemented.") if "poll_delay" in json_data: self.settings['poll_delay'] = int(json_data["poll_delay"]) if 'timeout' in json_data: if isinstance(json_data['timeout'], dict): self.poll_timeout.duration = Timeout.parse( json_data['timeout']) elif isinstance(json_data['timeout'], int) or isinstance( json_data['timeout'], float): self.poll_timeout.duration = json_data['timeout'] else: self.logger.debug(json_data['timeout']) raise JobError("Invalid timeout request") self.logger.debug("Setting poll timeout of %s seconds", int(self.poll_timeout.duration)) if 'messageID' not in json_data: raise JobError("Missing messageID") # handle conversion of api calls to internal functions json_data['request'] = json_data['request'].replace('-', '_') return json_data
def test_multi_deploy(self): self.assertIsNotNone(self.parsed_data) job = Job(4212, self.parsed_data, None) job.timeout = Timeout("Job", Timeout.parse({'minutes': 2})) pipeline = Pipeline(job=job) device = TestMultiDeploy.FakeDevice() self.assertIsNotNone(device) job.device = device job.parameters['output_dir'] = mkdtemp() job.logger = DummyLogger() job.pipeline = pipeline counts = {} for action_data in self.parsed_data['actions']: for name in action_data: counts.setdefault(name, 1) parameters = action_data[name] test_deploy = TestMultiDeploy.TestDeploy( pipeline, parameters, job) self.assertEqual({}, test_deploy.action.data) counts[name] += 1 # check that only one action has the example set self.assertEqual(['nowhere'], [ detail['deploy']['example'] for detail in self.parsed_data['actions'] if 'example' in detail['deploy'] ]) self.assertEqual(['faked', 'valid'], [ detail['deploy']['parameters'] for detail in self.parsed_data['actions'] if 'parameters' in detail['deploy'] ]) self.assertIsInstance(pipeline.actions[0], TestMultiDeploy.TestDeployAction) self.assertIsInstance(pipeline.actions[1], TestMultiDeploy.TestDeployAction) self.assertIsInstance(pipeline.actions[2], TestMultiDeploy.TestDeployAction) job.validate() self.assertEqual([], job.pipeline.errors) self.assertEqual(job.run(), 0) self.assertNotEqual(pipeline.actions[0].data, {'fake-deploy': pipeline.actions[0].parameters}) self.assertEqual(pipeline.actions[1].data, {'fake-deploy': pipeline.actions[2].parameters}) # check that values from previous DeployAction run actions have been cleared self.assertEqual(pipeline.actions[2].data, {'fake-deploy': pipeline.actions[2].parameters})
def __init__(self, parameters, job_id): self.logger = logging.getLogger("dispatcher") self.poll_timeout = Timeout(self.name) self.parameters = None self.__errors__ = [] self.parameters = parameters self.configured = False self.job_id = job_id
def __init__(self): super(BootQEMUImageAction, self).__init__() self.name = 'boot_qemu_image' self.description = "boot image using QEMU command line" self.summary = "boot QEMU image" self.overrides = None self.command = [] self.timeout = Timeout(self.name) # FIXME: decide on a duration for the boot QEMU Image timeout
def test_testshell(self): testshell = None for action in self.job.pipeline.actions: self.assertIsNotNone(action.name) if isinstance(action, TestShellRetry): testshell = action.pipeline.actions[0] break self.assertIsInstance(testshell, TestShellAction) self.assertTrue(testshell.valid) if 'timeout' in testshell.parameters: time_int = Timeout.parse(testshell.parameters['timeout']) else: time_int = Timeout.default_duration() self.assertEqual( datetime.timedelta(seconds=time_int).total_seconds(), testshell.timeout.duration)
def test_check_char(self): shell = ShellCommand("%s\n" % 'ls', Timeout('fake', 30), logger=logging.getLogger()) if shell.exitstatus: raise JobError("%s command exited %d: %s" % ('ls', shell.exitstatus, shell.readlines())) connection = ShellSession(self.job, shell) self.assertFalse(hasattr(shell, 'check_char')) self.assertTrue(hasattr(connection, 'check_char')) self.assertIsNotNone(connection.check_char)
def __init__(self): super(BootloaderCommandsAction, self).__init__() self.name = "bootloader-commands" self.description = "send commands to bootloader" self.summary = "interactive bootloader" self.params = None self.timeout = Timeout(self.name, BOOTLOADER_DEFAULT_CMD_TIMEOUT) self.method = ""
def test_action_connection_timeout(self): """ Test connection timeout specified for a particular action """ data = yaml.load( open(os.path.join( os.path.dirname(__file__), './sample_jobs/uboot-ramdisk.yaml'), 'r')) data['timeouts']['connections'] = {'uboot-retry': {}} data['timeouts']['connections']['uboot-retry'] = {'seconds': 20} job = self.create_custom_job(yaml.dump(data)) boot = [action for action in job.pipeline.actions if action.name == 'uboot-action'][0] retry = [action for action in boot.internal_pipeline.actions if action.name == 'uboot-retry'][0] self.assertEqual(retry.timeout.duration, Timeout.parse(job.device['timeouts']['actions'][retry.name])) self.assertEqual( Timeout.parse(job.device['timeouts']['connections'][retry.name]), retry.connection_timeout.duration )
def test_action_complete(self): self.assertIsNotNone(self.fakejob.timeout) seconds = 2 pipeline = TestTimeout.FakePipeline(job=self.fakejob) action = TestTimeout.SafeAction() action.timeout = Timeout(action.name, duration=seconds) pipeline.add_action(action) self.fakejob.pipeline = pipeline self.fakejob.device = TestTimeout.FakeDevice() self.assertEqual(self.fakejob.run(), 0)
def setup(self, parameters): """ Retrieve the poll_timeout from the protocol parameters which are set after init. """ if MultinodeProtocol.name not in parameters: return if 'timeout' in parameters[MultinodeProtocol.name]: self.base_message = { 'timeout': Timeout.parse(parameters[MultinodeProtocol.name]['timeout']) }
def __init__(self, parameters, job_id): super(MultinodeProtocol, self).__init__(parameters, job_id) self.blocks = 4 * 1024 # how long between polls (in seconds) self.system_timeout = Timeout('system', LAVA_MULTINODE_SYSTEM_TIMEOUT) self.settings = None self.sock = None self.base_message = None self.logger = logging.getLogger('dispatcher') self.delayed_start = False params = parameters['protocols'][self.name] if 'request' in params and 'lava-start' == params[ 'request'] and 'expect_role' in params: if params['expect_role'] != params['role']: self.delayed_start = True self.system_timeout.duration = Timeout.parse(params['timeout']) else: self.errors = "expect_role must not match the role declaring lava_start" self.logger.warning(self.errors)
def setup(self, parameters): """ Retrieve the poll_timeout from the protocol parameters which are set after init. """ if MultinodeProtocol.name not in parameters: return if 'timeout' in parameters[MultinodeProtocol.name]: self.base_message = { 'timeout': Timeout.parse( parameters[MultinodeProtocol.name]['timeout']) }
def test_action_complete(self): self.assertIsNotNone(self.fakejob.timeout) seconds = 2 pipeline = TestTimeout.FakePipeline(job=self.fakejob) action = TestTimeout.SafeAction() action.timeout = Timeout(action.name, duration=seconds) pipeline.add_action(action) self.fakejob.set_pipeline(pipeline) self.fakejob.device = TestTimeout.FakeDevice() try: self.fakejob.run() except JobError as exc: self.fail(exc)
def test_multinode_timeout(self): """ Test the protocol timeout is assigned to the action """ testshell = [action for action in self.client_job.pipeline.actions if isinstance(action, MultinodeTestAction)][0] testshell.validate() self.assertIn(30, [p.poll_timeout.duration for p in testshell.protocols]) self.assertIn('minutes', testshell.parameters['lava-multinode']['timeout']) self.assertEqual(10, testshell.parameters['lava-multinode']['timeout']['minutes']) self.assertEqual( testshell.signal_director.base_message['timeout'], Timeout.parse(testshell.parameters['lava-multinode']['timeout']) )
def test_action_connection_timeout(self): """ Test connection timeout specified for a particular action """ y_file = os.path.join(os.path.dirname(__file__), './sample_jobs/uboot-ramdisk.yaml') with open(y_file, 'r') as uboot_ramdisk: data = yaml.load(uboot_ramdisk) connection_timeout = Timeout.parse(data['timeouts']['connection']) self.assertEqual(connection_timeout, 240) job = self.create_custom_job(yaml.dump(data)) boot = [action for action in job.pipeline.actions if action.name == 'uboot-action'][0] retry = [action for action in boot.internal_pipeline.actions if action.name == 'uboot-retry'][0] self.assertEqual(retry.timeout.duration, 90) # Set by the job global action timeout self.assertEqual(retry.connection_timeout.duration, 45)
def test_testshell(self): testshell = None for action in self.job.pipeline.actions: self.assertIsNotNone(action.name) if isinstance(action, TestShellRetry): testshell = action.pipeline.children[action.pipeline][0] break self.assertIsInstance(testshell, TestShellAction) self.assertNotIn('boot-result', testshell.data) self.assertTrue(testshell.valid) if 'timeout' in testshell.parameters: time_int = Timeout.parse(testshell.parameters['timeout']) else: time_int = Timeout.default_duration() self.assertEqual( datetime.timedelta(seconds=time_int).total_seconds(), testshell.timeout.duration ) self.assertNotEqual( testshell.parameters['default_action_timeout'], testshell.timeout.duration )
def test_action_timeout(self): """ Testing timeouts does mean that the tests do nothing until the timeout happens, so the total length of time to run the tests has to increase... """ self.assertIsNotNone(self.fakejob.timeout) seconds = 2 pipeline = TestTimeout.FakePipeline(job=self.fakejob) action = TestTimeout.FakeAction() action.timeout = Timeout(action.name, duration=seconds) pipeline.add_action(action) self.fakejob.pipeline = pipeline self.fakejob.device = TestTimeout.FakeDevice() # run() returns 2 for JobError self.assertEqual(self.fakejob.run(), 2)
def test_action_timeout(self): """ Testing timeouts does mean that the tests do nothing until the timeout happens, so the total length of time to run the tests has to increase... """ self.assertIsNotNone(self.fakejob.timeout) seconds = 2 pipeline = TestTimeout.FakePipeline(job=self.fakejob) action = TestTimeout.FakeAction() action.timeout = Timeout(action.name, duration=seconds) pipeline.add_action(action) self.fakejob.set_pipeline(pipeline) self.fakejob.device = TestTimeout.FakeDevice() with self.assertRaises(JobError): self.fakejob.run()
def __init__(self, parameters, job_id): super(LxcProtocol, self).__init__(parameters, job_id) self.system_timeout = Timeout('system', LAVA_LXC_TIMEOUT) self.lxc_name = '-'.join( [parameters['protocols'][self.name]['name'], str(job_id)]) self.lxc_dist = parameters['protocols'][self.name]['distribution'] self.lxc_release = parameters['protocols'][self.name]['release'] self.lxc_arch = parameters['protocols'][self.name]['arch'] self.lxc_template = parameters['protocols'][self.name].get( 'template', 'download') self.lxc_mirror = parameters['protocols'][self.name].get( 'mirror', None) self.lxc_security_mirror = parameters['protocols'][self.name].get( 'security_mirror', None) self.logger = logging.getLogger('dispatcher')
def __init__(self, parameters): super(MultinodeProtocol, self).__init__(parameters) self.blocks = 4 * 1024 # how long between polls (in seconds) self.system_timeout = Timeout('system', LAVA_MULTINODE_SYSTEM_TIMEOUT) self.settings = None self.sock = None self.base_message = None self.logger = logging.getLogger('dispatcher') self.delayed_start = False params = parameters['protocols'][self.name] if 'request' in params and 'lava-start' == params['request'] and 'expect_role' in params: if params['expect_role'] != params['role']: self.delayed_start = True self.system_timeout.duration = Timeout.parse(params['timeout']) else: self.errors = "expect_role must not match the role declaring lava_start"
def test_panda_template(self): data = """{% extends 'panda.jinja2' %} {% set connection_command = 'telnet serial4 7012' %} {% set hard_reset_command = '/usr/bin/pduclient --daemon staging-master --hostname pdu15 --command reboot --port 05' %} {% set power_off_command = '/usr/bin/pduclient --daemon staging-master --hostname pdu15 --command off --port 05' %} {% set power_on_command = '/usr/bin/pduclient --daemon staging-master --hostname pdu15 --command on --port 05' %}""" self.assertTrue(self.validate_data('staging-panda-01', data)) test_template = prepare_jinja_template('staging-panda-01', data, system_path=False) rendered = test_template.render() template_dict = yaml.load(rendered) self.assertIn('u-boot-commands', template_dict['timeouts']['actions']) self.assertEqual( 120.0, Timeout.parse( template_dict['timeouts']['actions']['u-boot-commands']))
def test_action_timeout(self): factory = ConnectionFactory() job = factory.create_bbb_job('sample_jobs/uboot-ramdisk.yaml') job.validate() deploy = [action for action in job.pipeline.actions if action.name == 'tftp-deploy'][0] test_action = [action for action in job.pipeline.actions if action.name == 'lava-test-retry'][0] test_shell = [action for action in test_action.internal_pipeline.actions if action.name == 'lava-test-shell'][0] self.assertEqual(test_shell.connection_timeout.duration, 240) # job specifies 4 minutes self.assertEqual(test_shell.timeout.duration, 300) # job (test action block) specifies 5 minutes self.assertEqual(deploy.timeout.duration, 120) # job specifies 2 minutes self.assertNotEqual(deploy.connection_timeout.duration, Timeout.default_duration()) self.assertNotEqual(deploy.connection_timeout.duration, test_shell.connection_timeout) self.assertEqual(test_action.timeout.duration, 300) uboot = [action for action in job.pipeline.actions if action.name == 'uboot-action'][0] retry = [action for action in uboot.internal_pipeline.actions if action.name == 'uboot-retry'][0] auto = [action for action in retry.internal_pipeline.actions if action.name == 'auto-login-action'][0] self.assertEqual(auto.timeout.duration / 60, 9) # 9 minutes in the job def
def test_timeout_inheritance(self): """ test that classes pick up block timeouts Each action in the internal_pipeline needs to pick up the timeout specified in the job definition block for the top level parent action. """ test_retry = [action for action in self.job.pipeline.actions if action.name == "lava-test-retry"][0] sample_job_file = os.path.join(os.path.dirname(__file__), "sample_jobs/qemu-debian-installer.yaml") with open(sample_job_file, "r") as jobdef: data = yaml.load(jobdef) testdata = [block["test"] for block in data["actions"] if "test" in block][0] duration = Timeout.parse(testdata["timeout"]) self.assertEqual(duration, test_retry.timeout.duration) shell = [action for action in test_retry.internal_pipeline.actions if action.name == "lava-test-shell"][0] self.assertEqual(duration, shell.timeout.duration) if shell.timeout.duration > shell.connection_timeout.duration: self.assertEqual(duration, shell.timeout.duration) else: self.fail("Incorrect timeout calculation")
def build_action(action_data, testdata, submission): # test for a known section logger = logging.getLogger('dispatcher-master') if 'section' not in action_data: logger.warning("Invalid action data - missing section") return metatype = MetaType.get_section(action_data['section']) if metatype is None: # 0 is allowed logger.debug("Unrecognised metatype in action_data: %s", action_data['section']) return # lookup the type from the job definition. type_name = MetaType.get_type_name(action_data['section'], submission) if not type_name: logger.debug( "type_name failed for %s metatype %s", action_data['section'], MetaType.TYPE_CHOICES[metatype]) return action_meta, created = MetaType.objects.get_or_create( name=type_name, metatype=metatype) if created: action_meta.save() max_retry = None if 'max_retries' in action_data: max_retry = action_data['max_retries'] # maps the static testdata derived from the definition to the runtime pipeline construction action = ActionData.objects.create( action_name=action_data['name'], action_level=action_data['level'], action_summary=action_data['summary'], testdata=testdata, action_description=action_data['description'], meta_type=action_meta, max_retries=max_retry, timeout=int(Timeout.parse(action_data['timeout'])) ) with transaction.atomic(): action.save()
def build_action(action_data, testdata, submission): # test for a known section logger = logging.getLogger('dispatcher-master') if 'section' not in action_data: logger.warning("Invalid action data - missing section") return metatype = MetaType.get_section(action_data['section']) if metatype is None: # 0 is allowed logger.debug("Unrecognised metatype in action_data: %s", action_data['section']) return # lookup the type from the job definition. type_name = MetaType.get_type_name(action_data, submission) if not type_name: logger.debug( "type_name failed for %s metatype %s", action_data['section'], MetaType.TYPE_CHOICES[metatype]) return action_meta, created = MetaType.objects.get_or_create( name=type_name, metatype=metatype) if created: action_meta.save() max_retry = None if 'max_retries' in action_data: max_retry = action_data['max_retries'] # maps the static testdata derived from the definition to the runtime pipeline construction action = ActionData.objects.create( action_name=action_data['name'], action_level=action_data['level'], action_summary=action_data['summary'], testdata=testdata, action_description=action_data['description'], meta_type=action_meta, max_retries=max_retry, timeout=int(Timeout.parse(action_data['timeout'])) ) with transaction.atomic(): action.save()
def parse(self, content, device, job_id, socket_addr, output_dir=None, env_dut=None): self.loader = yaml.Loader(content) self.loader.compose_node = self.compose_node self.loader.construct_mapping = self.construct_mapping data = self.loader.get_single_data() self.context['default_action_duration'] = Timeout.default_duration() self.context['default_test_duration'] = Timeout.default_duration() job = Job(job_id, socket_addr, data) counts = {} job.device = device job.parameters['output_dir'] = output_dir job.parameters['env_dut'] = env_dut job.parameters['target'] = device.target level_tuple = Protocol.select_all(job.parameters) # sort the list of protocol objects by the protocol class level. job.protocols = [item[0](job.parameters) for item in sorted(level_tuple, key=lambda level_tuple: level_tuple[1])] pipeline = Pipeline(job=job) self._timeouts(data, job) # FIXME: also read permissable overrides from device config and set from job data # FIXME: ensure that a timeout for deployment 0 does not get set as the timeout for deployment 1 if 1 is default for action_data in data['actions']: action_data.pop('yaml_line', None) for name in action_data: if type(action_data[name]) is dict: # FIXME: commands are not fully implemented & may produce a list action_data[name]['default_action_timeout'] = self.context['default_action_duration'] action_data[name]['default_test_timeout'] = self.context['default_test_duration'] counts.setdefault(name, 1) if name == 'deploy' or name == 'boot' or name == 'test': # reset the context before adding a second deployment and again before third etc. if name == 'deploy' and counts[name] >= 2: reset_context = ResetContext() reset_context.section = name pipeline.add_action(reset_context) parse_action(action_data, name, device, pipeline) elif name == 'repeat': count = action_data[name]['count'] # first list entry must be the count dict repeats = action_data[name]['actions'] for c_iter in xrange(count): for repeating in repeats: # block of YAML to repeat for repeat_action in repeating: # name of the action for this block if repeat_action == 'yaml_line': continue repeating[repeat_action]['repeat-count'] = c_iter parse_action(repeating, repeat_action, device, pipeline) else: # May only end up being used for submit as other actions all need strategy method objects # select the specific action of this class for this job action = Action.select(name)() action.job = job # put parameters (like rootfs_type, results_dir) into the actions. if type(action_data[name]) == dict: action.parameters = action_data[name] elif name == "commands": # FIXME pass elif type(action_data[name]) == list: for param in action_data[name]: action.parameters = param action.summary = name action.timeout = Timeout(action.name, self.context['default_action_duration']) pipeline.add_action(action) counts[name] += 1 # there's always going to need to be a finalize_process action pipeline.add_action(FinalizeAction()) data['output_dir'] = output_dir job.set_pipeline(pipeline) return job
def parse(self, content, device, job_id, socket_addr, output_dir=None, env_dut=None): self.loader = yaml.Loader(content) self.loader.compose_node = self.compose_node self.loader.construct_mapping = self.construct_mapping data = self.loader.get_single_data() self.context['default_action_duration'] = Timeout.default_duration() self.context['default_test_duration'] = Timeout.default_duration() self.context['default_connection_duration'] = Timeout.default_duration() job = Job(job_id, socket_addr, data) counts = {} job.device = device job.parameters['output_dir'] = output_dir job.parameters['env_dut'] = env_dut job.parameters['target'] = device.target level_tuple = Protocol.select_all(job.parameters) # sort the list of protocol objects by the protocol class level. job.protocols = [item[0](job.parameters) for item in sorted(level_tuple, key=lambda level_tuple: level_tuple[1])] pipeline = Pipeline(job=job) self._timeouts(data, job) # FIXME: also read permissable overrides from device config and set from job data # FIXME: ensure that a timeout for deployment 0 does not get set as the timeout for deployment 1 if 1 is default for action_data in data['actions']: action_data.pop('yaml_line', None) for name in action_data: if type(action_data[name]) is dict: # FIXME: commands are not fully implemented & may produce a list action_data[name].update(self._map_context_defaults()) counts.setdefault(name, 1) if name == 'deploy' or name == 'boot' or name == 'test': parse_action(action_data, name, device, pipeline) elif name == 'repeat': count = action_data[name]['count'] # first list entry must be the count dict repeats = action_data[name]['actions'] for c_iter in xrange(count): for repeating in repeats: # block of YAML to repeat for repeat_action in repeating: # name of the action for this block if repeat_action == 'yaml_line': continue repeating[repeat_action]['repeat-count'] = c_iter parse_action(repeating, repeat_action, device, pipeline) else: # May only end up being used for submit as other actions all need strategy method objects # select the specific action of this class for this job action = Action.select(name)() action.job = job # put parameters (like rootfs_type, results_dir) into the actions. if type(action_data[name]) == dict: action.parameters = action_data[name] elif name == "commands": # FIXME pass elif type(action_data[name]) == list: for param in action_data[name]: action.parameters = param action.summary = name action.timeout = Timeout(action.name, self.context['default_action_duration']) action.connection_timeout = Timeout(action.name, self.context['default_connection_duration']) pipeline.add_action(action) counts[name] += 1 # there's always going to need to be a finalize_process action finalize = FinalizeAction() pipeline.add_action(finalize) finalize.populate(self._map_context_defaults()) data['output_dir'] = output_dir job.set_pipeline(pipeline) logger = logging.getLogger('dispatcher') logger.warn("pipeline contains %x", pipeline) if 'compatibility' in data: try: job_c = int(job.compatibility) data_c = int(data['compatibility']) except ValueError as exc: raise JobError('invalid compatibility value: %s' % exc) if job_c < data_c: raise JobError('Dispatcher unable to meet job compatibility requirement. %d > %d' % (job_c, data_c)) return job