def test_action_connection_timeout(self): """ Test connection timeout specified for a particular action """ with open( os.path.join(os.path.dirname(__file__), './sample_jobs/uboot-ramdisk.yaml'), 'r') as uboot_ramdisk: data = yaml.load(uboot_ramdisk) connection_timeout = Timeout.parse(data['timeouts']['connection']) self.assertEqual(connection_timeout, 240) data['timeouts']['connections'] = {'uboot-retry': {}} data['timeouts']['connections']['uboot-retry'] = {'seconds': 20} job = self.create_custom_job(yaml.dump(data)) boot = [ action for action in job.pipeline.actions if action.name == 'uboot-action' ][0] retry = [ action for action in boot.internal_pipeline.actions if action.name == 'uboot-retry' ][0] self.assertEqual( retry.timeout.duration, Timeout.parse(job.device['timeouts']['actions'][retry.name])) self.assertEqual( Timeout.parse(data['timeouts']['connections'][retry.name]), retry.connection_timeout.duration) self.assertEqual(90, retry.timeout.duration)
def _timeouts(self, data, job): if 'timeouts' in data and data['timeouts']: if 'job' in data['timeouts']: duration = Timeout.parse(data['timeouts']['job']) job.timeout = Timeout(data['job_name'], duration) if 'action' in data['timeouts']: self.context['default_action_duration'] = Timeout.parse(data['timeouts']['action']) if 'test' in data['timeouts']: self.context['default_test_duration'] = Timeout.parse(data['timeouts']['test'])
def _timeouts(self, data, job): if 'timeouts' in data and data['timeouts']: if 'job' in data['timeouts']: duration = Timeout.parse(data['timeouts']['job']) job.timeout = Timeout(data['job_name'], duration) if 'action' in data['timeouts']: self.context['default_action_duration'] = Timeout.parse( data['timeouts']['action']) if 'test' in data['timeouts']: self.context['default_test_duration'] = Timeout.parse( data['timeouts']['test'])
def build_action(action_data, testdata, submission): # test for a known section logger = logging.getLogger("lava_results_app") if "section" not in action_data: logger.warn("Invalid action data - missing section") return metatype = MetaType.get_section(action_data["section"]) if metatype is None: # 0 is allowed logger.debug("Unrecognised metatype in action_data: %s" % action_data["section"]) return # lookup the type from the job definition. type_name = MetaType.get_type_name(action_data["section"], submission) if not type_name: logger.debug("type_name failed for %s metatype %s" % (action_data["section"], MetaType.TYPE_CHOICES[metatype])) return action_meta, created = MetaType.objects.get_or_create(name=type_name, metatype=metatype) if created: action_meta.save() max_retry = None if "max_retries" in action_data: max_retry = action_data["max_retries"] action = ActionData.objects.create( action_name=action_data["name"], action_level=action_data["level"], action_summary=action_data["summary"], testdata=testdata, action_description=action_data["description"], meta_type=action_meta, max_retries=max_retry, timeout=int(Timeout.parse(action_data["timeout"])), ) with transaction.atomic(): action.save()
def _check_data(self, data): try: json_data = json.loads(data) except (ValueError, TypeError) as exc: raise JobError("Invalid data for %s protocol: %s %s" % (self.name, data, exc)) if not isinstance(json_data, dict): raise JobError("Invalid data type %s for protocol %s" % (data, self.name)) if not json_data: raise JobError("No data to be sent over protocol %s" % self.name) if 'request' not in json_data: raise JobError("Bad API call over protocol - missing request") if json_data["request"] == "aggregate": raise JobError("Pipeline submission has not been implemented.") if "poll_delay" in json_data: self.settings['poll_delay'] = int(json_data["poll_delay"]) if 'timeout' in json_data: if isinstance(json_data['timeout'], dict): self.poll_timeout.duration = Timeout.parse( json_data['timeout']) elif isinstance(json_data['timeout'], int) or isinstance( json_data['timeout'], float): self.poll_timeout.duration = json_data['timeout'] else: self.logger.debug(json_data['timeout']) raise JobError("Invalid timeout request") self.logger.debug("Setting poll timeout of %s seconds", int(self.poll_timeout.duration)) if 'messageID' not in json_data: raise JobError("Missing messageID") # handle conversion of api calls to internal functions json_data['request'] = json_data['request'].replace('-', '_') return json_data
def setUp(self): self.parameters = { "job_name": "fakejob", 'output_dir': mkdtemp(), 'timeouts': { 'job': { 'seconds': 3 } }, "actions": [ { 'deploy': { 'failure_retry': 3 }, 'boot': { 'failure_retry': 4 }, 'test': { 'failure_retry': 5 } } ] } self.fakejob = TestTimeout.FakeJob(self.parameters) # copy of the _timeout function from parser. if 'timeouts' in self.parameters: if 'job' in self.parameters['timeouts']: duration = Timeout.parse(self.parameters['timeouts']['job']) self.fakejob.timeout = Timeout(self.parameters['job_name'], duration)
def _check_data(self, data): try: json_data = json.loads(data) except (ValueError, TypeError) as exc: raise JobError("Invalid data for %s protocol: %s %s" % (self.name, data, exc)) if type(json_data) != dict: raise JobError("Invalid data type %s for protocol %s" % (data, self.name)) if not json_data: raise JobError("No data to be sent over protocol %s" % self.name) if 'request' not in json_data: raise JobError("Bad API call over protocol - missing request") if json_data["request"] == "aggregate": raise JobError("Pipeline submission has not been implemented.") if "poll_delay" in json_data: self.settings['poll_delay'] = int(json_data["poll_delay"]) if 'timeout' in json_data: if isinstance(json_data['timeout'], dict): self.poll_timeout.duration = Timeout.parse(json_data['timeout']) elif isinstance(json_data['timeout'], int) or isinstance(json_data['timeout'], float): self.poll_timeout.duration = json_data['timeout'] else: self.logger.debug(json_data['timeout']) raise JobError("Invalid timeout request") self.logger.debug("Setting poll timeout of %s seconds", int(self.poll_timeout.duration)) if 'messageID' not in json_data: raise JobError("Missing messageID") # handle conversion of api calls to internal functions json_data['request'] = json_data['request'].replace('-', '_') return json_data
def setUp(self): super(TestTimeout, self).setUp() self.parameters = { "job_name": "fakejob", 'output_dir': mkdtemp(), 'timeouts': { 'job': { 'seconds': 3 } }, "actions": [{ 'deploy': { 'namespace': 'common', 'failure_retry': 3 }, 'boot': { 'namespace': 'common', 'failure_retry': 4 }, 'test': { 'namespace': 'common', 'failure_retry': 5 } }] } self.fakejob = TestTimeout.FakeJob(self.parameters) # copy of the _timeout function from parser. if 'timeouts' in self.parameters: if 'job' in self.parameters['timeouts']: duration = Timeout.parse(self.parameters['timeouts']['job']) self.fakejob.timeout = Timeout(self.parameters['job_name'], duration)
def test_timeout_inheritance(self): """ test that classes pick up block timeouts Each action in the internal_pipeline needs to pick up the timeout specified in the job definition block for the top level parent action. """ test_retry = [ action for action in self.job.pipeline.actions if action.name == 'lava-test-retry' ][0] sample_job_file = os.path.join( os.path.dirname(__file__), 'sample_jobs/qemu-debian-installer.yaml') with open(sample_job_file, 'r') as jobdef: data = yaml.load(jobdef) testdata = [ block['test'] for block in data['actions'] if 'test' in block ][0] duration = (Timeout.parse(testdata['timeout'])) self.assertEqual(duration, test_retry.timeout.duration) shell = [ action for action in test_retry.internal_pipeline.actions if action.name == 'lava-test-shell' ][0] self.assertEqual(duration, shell.timeout.duration) if shell.timeout.duration > shell.connection_timeout.duration: self.assertEqual(duration, shell.timeout.duration) else: self.fail("Incorrect timeout calculation")
def test_action_connection_timeout(self): """ Test connection timeout specified for a particular action """ data = yaml.load( open(os.path.join( os.path.dirname(__file__), './sample_jobs/uboot-ramdisk.yaml'), 'r')) data['timeouts']['connections'] = {'uboot-retry': {}} data['timeouts']['connections']['uboot-retry'] = {'seconds': 20} job = self.create_custom_job(yaml.dump(data)) boot = [action for action in job.pipeline.actions if action.name == 'uboot-action'][0] retry = [action for action in boot.internal_pipeline.actions if action.name == 'uboot-retry'][0] self.assertEqual(retry.timeout.duration, Timeout.parse(job.device['timeouts']['actions'][retry.name])) self.assertEqual( Timeout.parse(job.device['timeouts']['connections'][retry.name]), retry.connection_timeout.duration )
def setup(self, parameters): """ Retrieve the poll_timeout from the protocol parameters which are set after init. """ if MultinodeProtocol.name not in parameters: return if 'timeout' in parameters[MultinodeProtocol.name]: self.base_message = { 'timeout': Timeout.parse(parameters[MultinodeProtocol.name]['timeout']) }
def setup(self, parameters): """ Retrieve the poll_timeout from the protocol parameters which are set after init. """ if MultinodeProtocol.name not in parameters: return if 'timeout' in parameters[MultinodeProtocol.name]: self.base_message = { 'timeout': Timeout.parse( parameters[MultinodeProtocol.name]['timeout']) }
def test_multinode_timeout(self): """ Test the protocol timeout is assigned to the action """ testshell = [action for action in self.client_job.pipeline.actions if isinstance(action, MultinodeTestAction)][0] testshell.validate() self.assertIn(30, [p.poll_timeout.duration for p in testshell.protocols]) self.assertIn('minutes', testshell.parameters['lava-multinode']['timeout']) self.assertEqual(10, testshell.parameters['lava-multinode']['timeout']['minutes']) self.assertEqual( testshell.signal_director.base_message['timeout'], Timeout.parse(testshell.parameters['lava-multinode']['timeout']) )
def test_action_connection_timeout(self): """ Test connection timeout specified for a particular action """ y_file = os.path.join(os.path.dirname(__file__), './sample_jobs/uboot-ramdisk.yaml') with open(y_file, 'r') as uboot_ramdisk: data = yaml.load(uboot_ramdisk) connection_timeout = Timeout.parse(data['timeouts']['connection']) self.assertEqual(connection_timeout, 240) job = self.create_custom_job(yaml.dump(data)) boot = [action for action in job.pipeline.actions if action.name == 'uboot-action'][0] retry = [action for action in boot.internal_pipeline.actions if action.name == 'uboot-retry'][0] self.assertEqual(retry.timeout.duration, 90) # Set by the job global action timeout self.assertEqual(retry.connection_timeout.duration, 45)
def test_multi_deploy(self): self.assertIsNotNone(self.parsed_data) job = Job(4212, self.parsed_data, None) job.timeout = Timeout("Job", Timeout.parse({'minutes': 2})) pipeline = Pipeline(job=job) device = TestMultiDeploy.FakeDevice() self.assertIsNotNone(device) job.device = device job.parameters['output_dir'] = mkdtemp() job.logger = DummyLogger() job.pipeline = pipeline counts = {} for action_data in self.parsed_data['actions']: for name in action_data: counts.setdefault(name, 1) parameters = action_data[name] test_deploy = TestMultiDeploy.TestDeploy( pipeline, parameters, job) self.assertEqual({}, test_deploy.action.data) counts[name] += 1 # check that only one action has the example set self.assertEqual(['nowhere'], [ detail['deploy']['example'] for detail in self.parsed_data['actions'] if 'example' in detail['deploy'] ]) self.assertEqual(['faked', 'valid'], [ detail['deploy']['parameters'] for detail in self.parsed_data['actions'] if 'parameters' in detail['deploy'] ]) self.assertIsInstance(pipeline.actions[0], TestMultiDeploy.TestDeployAction) self.assertIsInstance(pipeline.actions[1], TestMultiDeploy.TestDeployAction) self.assertIsInstance(pipeline.actions[2], TestMultiDeploy.TestDeployAction) job.validate() self.assertEqual([], job.pipeline.errors) self.assertEqual(job.run(), 0) self.assertNotEqual(pipeline.actions[0].data, {'fake-deploy': pipeline.actions[0].parameters}) self.assertEqual(pipeline.actions[1].data, {'fake-deploy': pipeline.actions[2].parameters}) # check that values from previous DeployAction run actions have been cleared self.assertEqual(pipeline.actions[2].data, {'fake-deploy': pipeline.actions[2].parameters})
def test_panda_template(self): data = """{% extends 'panda.jinja2' %} {% set connection_command = 'telnet serial4 7012' %} {% set hard_reset_command = '/usr/bin/pduclient --daemon staging-master --hostname pdu15 --command reboot --port 05' %} {% set power_off_command = '/usr/bin/pduclient --daemon staging-master --hostname pdu15 --command off --port 05' %} {% set power_on_command = '/usr/bin/pduclient --daemon staging-master --hostname pdu15 --command on --port 05' %}""" self.assertTrue(self.validate_data('staging-panda-01', data)) test_template = prepare_jinja_template('staging-panda-01', data, system_path=False) rendered = test_template.render() template_dict = yaml.load(rendered) self.assertIn('u-boot-commands', template_dict['timeouts']['actions']) self.assertEqual( 120.0, Timeout.parse( template_dict['timeouts']['actions']['u-boot-commands']))
def test_testshell(self): testshell = None for action in self.job.pipeline.actions: self.assertIsNotNone(action.name) if isinstance(action, TestShellRetry): testshell = action.pipeline.actions[0] break self.assertIsInstance(testshell, TestShellAction) self.assertTrue(testshell.valid) if 'timeout' in testshell.parameters: time_int = Timeout.parse(testshell.parameters['timeout']) else: time_int = Timeout.default_duration() self.assertEqual( datetime.timedelta(seconds=time_int).total_seconds(), testshell.timeout.duration)
def __init__(self, parameters): super(MultinodeProtocol, self).__init__(parameters) self.blocks = 4 * 1024 # how long between polls (in seconds) self.system_timeout = Timeout('system', LAVA_MULTINODE_SYSTEM_TIMEOUT) self.settings = None self.sock = None self.base_message = None self.logger = logging.getLogger('dispatcher') self.delayed_start = False params = parameters['protocols'][self.name] if 'request' in params and 'lava-start' == params['request'] and 'expect_role' in params: if params['expect_role'] != params['role']: self.delayed_start = True self.system_timeout.duration = Timeout.parse(params['timeout']) else: self.errors = "expect_role must not match the role declaring lava_start"
def __init__(self, parameters, job_id): super(MultinodeProtocol, self).__init__(parameters, job_id) self.blocks = 4 * 1024 # how long between polls (in seconds) self.system_timeout = Timeout('system', LAVA_MULTINODE_SYSTEM_TIMEOUT) self.settings = None self.sock = None self.base_message = None self.logger = logging.getLogger('dispatcher') self.delayed_start = False params = parameters['protocols'][self.name] if 'request' in params and 'lava-start' == params[ 'request'] and 'expect_role' in params: if params['expect_role'] != params['role']: self.delayed_start = True self.system_timeout.duration = Timeout.parse(params['timeout']) else: self.errors = "expect_role must not match the role declaring lava_start" self.logger.warning(self.errors)
def test_timeout_inheritance(self): """ test that classes pick up block timeouts Each action in the internal_pipeline needs to pick up the timeout specified in the job definition block for the top level parent action. """ test_retry = [action for action in self.job.pipeline.actions if action.name == "lava-test-retry"][0] sample_job_file = os.path.join(os.path.dirname(__file__), "sample_jobs/qemu-debian-installer.yaml") with open(sample_job_file, "r") as jobdef: data = yaml.load(jobdef) testdata = [block["test"] for block in data["actions"] if "test" in block][0] duration = Timeout.parse(testdata["timeout"]) self.assertEqual(duration, test_retry.timeout.duration) shell = [action for action in test_retry.internal_pipeline.actions if action.name == "lava-test-shell"][0] self.assertEqual(duration, shell.timeout.duration) if shell.timeout.duration > shell.connection_timeout.duration: self.assertEqual(duration, shell.timeout.duration) else: self.fail("Incorrect timeout calculation")
def build_action(action_data, testdata, submission): # test for a known section logger = logging.getLogger('dispatcher-master') if 'section' not in action_data: logger.warning("Invalid action data - missing section") return metatype = MetaType.get_section(action_data['section']) if metatype is None: # 0 is allowed logger.debug("Unrecognised metatype in action_data: %s", action_data['section']) return # lookup the type from the job definition. type_name = MetaType.get_type_name(action_data, submission) if not type_name: logger.debug( "type_name failed for %s metatype %s", action_data['section'], MetaType.TYPE_CHOICES[metatype]) return action_meta, created = MetaType.objects.get_or_create( name=type_name, metatype=metatype) if created: action_meta.save() max_retry = None if 'max_retries' in action_data: max_retry = action_data['max_retries'] # maps the static testdata derived from the definition to the runtime pipeline construction action = ActionData.objects.create( action_name=action_data['name'], action_level=action_data['level'], action_summary=action_data['summary'], testdata=testdata, action_description=action_data['description'], meta_type=action_meta, max_retries=max_retry, timeout=int(Timeout.parse(action_data['timeout'])) ) with transaction.atomic(): action.save()
def build_action(action_data, testdata, submission): # test for a known section logger = logging.getLogger('dispatcher-master') if 'section' not in action_data: logger.warning("Invalid action data - missing section") return metatype = MetaType.get_section(action_data['section']) if metatype is None: # 0 is allowed logger.debug("Unrecognised metatype in action_data: %s", action_data['section']) return # lookup the type from the job definition. type_name = MetaType.get_type_name(action_data['section'], submission) if not type_name: logger.debug( "type_name failed for %s metatype %s", action_data['section'], MetaType.TYPE_CHOICES[metatype]) return action_meta, created = MetaType.objects.get_or_create( name=type_name, metatype=metatype) if created: action_meta.save() max_retry = None if 'max_retries' in action_data: max_retry = action_data['max_retries'] # maps the static testdata derived from the definition to the runtime pipeline construction action = ActionData.objects.create( action_name=action_data['name'], action_level=action_data['level'], action_summary=action_data['summary'], testdata=testdata, action_description=action_data['description'], meta_type=action_meta, max_retries=max_retry, timeout=int(Timeout.parse(action_data['timeout'])) ) with transaction.atomic(): action.save()
def test_testshell(self): testshell = None for action in self.job.pipeline.actions: self.assertIsNotNone(action.name) if isinstance(action, TestShellRetry): testshell = action.pipeline.children[action.pipeline][0] break self.assertIsInstance(testshell, TestShellAction) self.assertNotIn('boot-result', testshell.data) self.assertTrue(testshell.valid) if 'timeout' in testshell.parameters: time_int = Timeout.parse(testshell.parameters['timeout']) else: time_int = Timeout.default_duration() self.assertEqual( datetime.timedelta(seconds=time_int).total_seconds(), testshell.timeout.duration ) self.assertNotEqual( testshell.parameters['default_action_timeout'], testshell.timeout.duration )
def test_panda_template(self): logging.basicConfig(stream=sys.stdout, level=logging.DEBUG) logger = logging.getLogger('unittests') logger.disabled = True logger.propagate = False data = """{% extends 'panda.jinja2' %} {% set connection_command = 'telnet serial4 7012' %} {% set hard_reset_command = '/usr/bin/pduclient --daemon staging-master --hostname pdu15 --command reboot --port 05' %} {% set power_off_command = '/usr/bin/pduclient --daemon staging-master --hostname pdu15 --command off --port 05' %} {% set power_on_command = '/usr/bin/pduclient --daemon staging-master --hostname pdu15 --command on --port 05' %}""" self.assertTrue(self.validate_data('staging-panda-01', data)) test_template = prepare_jinja_template('staging-panda-01', data, system_path=self.system) rendered = test_template.render() template_dict = yaml.load(rendered) self.assertIn('bootloader-commands', template_dict['timeouts']['actions']) self.assertEqual( 180.0, Timeout.parse( template_dict['timeouts']['actions']['bootloader-commands'])) commands = template_dict['actions']['boot']['methods']['u-boot'][ 'ramdisk']['commands'] checked = False self.assertIsNotNone(commands) self.assertIsInstance(commands, list) for line in commands: if 'setenv bootargs' in line: self.assertIn('console=ttyO2', line) checked = True self.assertTrue(checked) checked = False for line in commands: if 'setenv initrd_high' in line: checked = True self.assertTrue(checked)
def _timeouts(self, data, job): if data.get('timeouts', None) is not None: if 'job' in data['timeouts']: duration = Timeout.parse(data['timeouts']['job']) job.timeout = Timeout(data['job_name'], duration)
def __init__(self, parameters): super(TestAdjuvant.FakeJob, self).__init__(4212, parameters, None) self.logger = DummyLogger() self.timeout = Timeout("FakeJob", Timeout.parse({'minutes': 2}))