def __set_parameters__(self, data): try: self.__parameters__.update(data) except ValueError: raise LAVABug("Action parameters need to be a dictionary") # Overide the duration if needed if "timeout" in self.parameters: # preserve existing overrides if self.timeout.duration == Timeout.default_duration(): self.timeout.duration = Timeout.parse( self.parameters["timeout"]) if "connection_timeout" in self.parameters: self.connection_timeout.duration = Timeout.parse( self.parameters["connection_timeout"]) # only unit tests should have actions without a pointer to the job. if "failure_retry" in self.parameters and "repeat" in self.parameters: raise JobError( "Unable to use repeat and failure_retry, use a repeat block") if "failure_retry" in self.parameters: self.max_retries = self.parameters["failure_retry"] if "repeat" in self.parameters: self.max_retries = self.parameters["repeat"] if self.job: if self.job.device: if "character_delays" in self.job.device: self.character_delay = self.job.device[ "character_delays"].get(self.section, 0)
def _check_data(self, data): try: json_data = json.loads(data) except (ValueError, TypeError) as exc: raise JobError("Invalid data for %s protocol: %s %s" % (self.name, data, exc)) if not isinstance(json_data, dict): raise JobError("Invalid data type %s for protocol %s" % (data, self.name)) if not json_data: raise JobError("No data to be sent over protocol %s" % self.name) if 'request' not in json_data: raise JobError("Bad API call over protocol - missing request") if json_data["request"] == "aggregate": raise JobError("Pipeline submission has not been implemented.") if "poll_delay" in json_data: self.settings['poll_delay'] = int(json_data["poll_delay"]) if 'timeout' in json_data: if isinstance(json_data['timeout'], dict): self.poll_timeout.duration = Timeout.parse(json_data['timeout']) elif isinstance(json_data['timeout'], int) or isinstance(json_data['timeout'], float): self.poll_timeout.duration = json_data['timeout'] else: self.logger.debug(json_data['timeout']) raise JobError("Invalid timeout request") self.logger.debug("Setting poll timeout of %s seconds", int(self.poll_timeout.duration)) if 'messageID' not in json_data: raise JobError("Missing messageID") # handle conversion of api calls to internal functions json_data['request'] = json_data['request'].replace('-', '_') return json_data
def setUp(self): super().setUp() self.parameters = { "job_name": "fakejob", 'timeouts': { 'job': { 'seconds': 3 } }, "actions": [{ 'deploy': { 'namespace': 'common', 'failure_retry': 3 }, 'boot': { 'namespace': 'common', 'failure_retry': 4 }, 'test': { 'namespace': 'common', 'failure_retry': 5 } }] } self.fakejob = TestTimeout.FakeJob(self.parameters) # copy of the _timeout function from parser. if 'timeouts' in self.parameters: if 'job' in self.parameters['timeouts']: duration = Timeout.parse(self.parameters['timeouts']['job']) self.fakejob.timeout = Timeout(self.parameters['job_name'], duration)
def test_action_connection_timeout(self): """ Test connection timeout specified for a particular action """ y_file = os.path.join(os.path.dirname(__file__), "./sample_jobs/uboot-ramdisk.yaml") with open(y_file, "r") as uboot_ramdisk: data = yaml_safe_load(uboot_ramdisk) connection_timeout = Timeout.parse( data["timeouts"]["connections"]["lava-test-shell"]) data["timeouts"]["actions"]["uboot-commands"] = {} data["timeouts"]["actions"]["uboot-commands"]["seconds"] = 90 data["timeouts"]["connections"]["uboot-commands"] = {} data["timeouts"]["connections"]["uboot-commands"]["seconds"] = 45 self.assertEqual(connection_timeout, 240) job = self.factory.create_custom_job("bbb-01.jinja2", data) boot = [ action for action in job.pipeline.actions if action.name == "uboot-action" ][0] retry = [ action for action in boot.pipeline.actions if action.name == "uboot-commands" ][0] self.assertEqual(retry.timeout.duration, 90) # Set by the job global action timeout self.assertEqual(retry.connection_timeout.duration, 45)
def _check_timeout(prefix, path, local_data): if local_data is None: return duration = Timeout.parse(local_data) if duration > job_duration: raise Invalid("%s timeout is larger than job timeout" % prefix, path=path)
def test_action_connection_timeout(self): """ Test connection timeout specified for a particular action """ y_file = os.path.join(os.path.dirname(__file__), './sample_jobs/uboot-ramdisk.yaml') with open(y_file, 'r') as uboot_ramdisk: data = yaml.safe_load(uboot_ramdisk) connection_timeout = Timeout.parse( data['timeouts']['connections']['lava-test-shell']) data['timeouts']['actions']['uboot-retry'] = {} data['timeouts']['actions']['uboot-retry']['seconds'] = 90 data['timeouts']['connections']['uboot-retry'] = {} data['timeouts']['connections']['uboot-retry']['seconds'] = 45 self.assertEqual(connection_timeout, 240) job = self.factory.create_custom_job('bbb-01.jinja2', data) boot = [ action for action in job.pipeline.actions if action.name == 'uboot-action' ][0] retry = [ action for action in boot.internal_pipeline.actions if action.name == 'uboot-retry' ][0] self.assertEqual(retry.timeout.duration, 90) # Set by the job global action timeout self.assertEqual(retry.connection_timeout.duration, 45)
def populate(self, parameters): super().populate(parameters) dur = (self.job.parameters.get("timeouts", {}).get("connections", {}).get("read-feedback")) if dur: self.duration = Timeout.parse(dur)
def setUp(self): super().setUp() self.parameters = { "job_name": "fakejob", "timeouts": { "job": { "seconds": 3 } }, "actions": [{ "deploy": { "namespace": "common", "failure_retry": 3 }, "boot": { "namespace": "common", "failure_retry": 4 }, "test": { "namespace": "common", "failure_retry": 5 }, }], } self.fakejob = TestTimeout.FakeJob(self.parameters) # copy of the _timeout function from parser. if "timeouts" in self.parameters: if "job" in self.parameters["timeouts"]: duration = Timeout.parse(self.parameters["timeouts"]["job"]) self.fakejob.timeout = Timeout(self.parameters["job_name"], duration)
def check_job_timeouts(data): job_duration = Timeout.parse(data["timeouts"]["job"]) def _check_timeout(prefix, path, local_data): if local_data is None: return duration = Timeout.parse(local_data) if duration > job_duration: raise Invalid("%s timeout is larger than job timeout" % prefix, path=path) # global timeouts _check_timeout("Global", ["timeouts", "action"], data["timeouts"].get("action")) for key in data["timeouts"].get("actions", []): _check_timeout( "Global", ["timeouts", "actions", key], data["timeouts"]["actions"][key] ) _check_timeout( "Global", ["timeouts", "connection"], data["timeouts"].get("connection") ) for key in data["timeouts"].get("connections", []): _check_timeout( "Global", ["timeouts", "connections", key], data["timeouts"]["connections"][key], ) # action timeouts for (index, action) in enumerate(data["actions"]): action_type = next(iter(action.keys())) t = action[action_type].get("timeout") if t is None: continue _check_timeout("Action", ["actions", str(index)], t)
def _override_connection_timeout(self, timeout): """ Only to be called by the Pipeline object, add_action(). """ if timeout is None: return if not isinstance(timeout, dict): raise JobError("Invalid connection timeout %s" % str(timeout)) self.connection_timeout.duration = Timeout.parse(timeout)
def _override_action_timeout(self, timeout): """ Only to be called by the Pipeline object, add_action(). """ if timeout is None: return if not isinstance(timeout, dict): raise JobError("Invalid timeout %s" % str(timeout)) self.timeout.duration = Timeout.parse(timeout) if self.timeout.duration > self.job.timeout.duration: self.logger.warning("Action timeout for %s exceeds Job timeout", self.name)
def setup(self, parameters): """ Retrieve the poll_timeout from the protocol parameters which are set after init. """ if MultinodeProtocol.name not in parameters: return if 'timeout' in parameters[MultinodeProtocol.name]: self.base_message = { 'timeout': Timeout.parse( parameters[MultinodeProtocol.name]['timeout']) }
def test_multinode_timeout(self): """ Test the protocol timeout is assigned to the action """ testshell = [action for action in self.client_job.pipeline.actions if isinstance(action, MultinodeTestAction)][0] testshell.validate() self.assertIn(30, [p.poll_timeout.duration for p in testshell.protocols]) self.assertIn('minutes', testshell.parameters['lava-multinode']['timeout']) self.assertEqual(10, testshell.parameters['lava-multinode']['timeout']['minutes']) self.assertEqual( testshell.signal_director.base_message['timeout'], Timeout.parse(testshell.parameters['lava-multinode']['timeout']) )
def setup(self, parameters, character_delay=0): """ Retrieve the poll_timeout from the protocol parameters which are set after init. """ super().setup(parameters) if MultinodeProtocol.name not in parameters: return if "timeout" in parameters[MultinodeProtocol.name]: self.base_message = { "timeout": Timeout.parse( parameters[MultinodeProtocol.name]["timeout"] ) } self.character_delay = character_delay
def test_testshell(self): testshell = None for action in self.job.pipeline.actions: self.assertIsNotNone(action.name) if isinstance(action, TestShellRetry): testshell = action.pipeline.actions[0] break self.assertIsInstance(testshell, TestShellAction) self.assertTrue(testshell.valid) if 'timeout' in testshell.parameters: time_int = Timeout.parse(testshell.parameters['timeout']) else: time_int = Timeout.default_duration() self.assertEqual( datetime.timedelta(seconds=time_int).total_seconds(), testshell.timeout.duration)
def __init__(self, parameters, job_id): super().__init__(parameters, job_id) self.blocks = 4 * 1024 # how long between polls (in seconds) self.system_timeout = Timeout('system', LAVA_MULTINODE_SYSTEM_TIMEOUT) self.settings = None self.sock = None self.base_message = None self.logger = logging.getLogger('dispatcher') self.delayed_start = False params = parameters['protocols'][self.name] if 'request' in params and 'lava-start' == params['request'] and 'expect_role' in params: if params['expect_role'] != params['role']: self.delayed_start = True self.system_timeout.duration = Timeout.parse(params['timeout']) else: self.errors = "expect_role must not match the role declaring lava_start" self.logger.warning(self.errors)
def __init__(self, parameters, job_id): super().__init__(parameters, job_id) self.blocks = 4 * 1024 # how long between polls (in seconds) self.system_timeout = Timeout("system", LAVA_MULTINODE_SYSTEM_TIMEOUT) self.settings = None self.sock = None self.base_message = None self.logger = logging.getLogger("dispatcher") self.delayed_start = False params = parameters["protocols"][self.name] if ("request" in params and "lava-start" == params["request"] and "expect_role" in params): if params["expect_role"] != params["role"]: self.delayed_start = True self.system_timeout.duration = Timeout.parse(params["timeout"]) else: self.errors = "expect_role must not match the role declaring lava_start" self.logger.warning(self.errors)
def test_failure_retry_specified_interval(self): self.parameters = { "job_name": "fakejob", "timeouts": { "job": { "seconds": 3 } }, "actions": [{ "deploy": { "namespace": "common", "failure_retry": 3, "failure_retry_interval": 2, }, "boot": { "namespace": "common", "failure_retry": 4 }, "test": { "namespace": "common", "failure_retry": 5 }, }], } self.fakejob = TestAction.FakeJob(self.parameters) # copy of the _timeout function from parser. if "timeouts" in self.parameters: if "job" in self.parameters["timeouts"]: duration = Timeout.parse(self.parameters["timeouts"]["job"]) self.fakejob.timeout = Timeout(self.parameters["job_name"], duration) pipeline = TestAction.FakePipeline(job=self.fakejob) action = TestAction.InternalRetryAction() for actions in self.lookup_deploy(self.parameters["actions"]): action.parameters = actions pipeline.add_action(action) self.fakejob.pipeline = pipeline self.fakejob.device = TestTimeout.FakeDevice() with self.assertRaises(JobError): self.fakejob.run() self.assertEqual(action.sleep, 2)
suite__name="lava") for case in test_cases: if case.action_metadata: if case.action_metadata.get("level") == action_data["level"]: match_case = case # maps the static testdata derived from the definition to the runtime pipeline construction ActionData.objects.create( action_name=action_data["name"], action_level=action_data["level"], action_summary=action_data["summary"], testdata=testdata, action_description=action_data["description"], meta_type=action_meta, max_retries=max_retry, timeout=int(Timeout.parse(action_data["timeout"])), testcase=match_case, ) def walk_actions(data, testdata, submission): for action in data: build_action(action, testdata, submission) if "pipeline" in action: walk_actions(action["pipeline"], testdata, submission) def map_metadata(description, job): """ Generate metadata from the combination of the pipeline definition file (after any parsing for protocols) and the pipeline description
def test_parsing(): # 1/ simple durations assert ( # nosec - assert is part of the test process. Timeout.parse({"days": 1}) == 86400 ) assert ( # nosec - assert is part of the test process. Timeout.parse({"hours": 3}) == 3 * 3600 ) assert ( # nosec - assert is part of the test process. Timeout.parse({"minutes": 1}) == 1 * 60 ) assert ( # nosec - assert is part of the test process. Timeout.parse({"seconds": 345}) == 345 ) # 2/ complexe durations assert ( # nosec - assert is part of the test process. Timeout.parse({"minutes": 22, "seconds": 17}) == 22 * 60 + 17 ) assert ( # nosec - assert is part of the test process. Timeout.parse({"hours": 2, "minutes": 22, "seconds": 17}) == 2 * 3600 + 22 * 60 + 17 ) assert ( # nosec - assert is part of the test process. Timeout.parse({"days": 1, "minutes": 22, "seconds": 17}) == 86400 + 22 * 60 + 17 ) # 3/ invalid durations assert ( # nosec - assert is part of the test process. Timeout.parse({"day": 1}) == Timeout.default_duration() ) assert ( # nosec - assert is part of the test process. Timeout.parse({}) == Timeout.default_duration() ) with pytest.raises(ConfigurationError): Timeout.parse("")
test_cases = TestCase.objects.filter(suite__job=testdata.testjob, suite__name='lava') for case in test_cases: if 'level' in case.action_metadata: if case.action_metadata['level'] == action_data['level']: match_case = case # maps the static testdata derived from the definition to the runtime pipeline construction ActionData.objects.create( action_name=action_data['name'], action_level=action_data['level'], action_summary=action_data['summary'], testdata=testdata, action_description=action_data['description'], meta_type=action_meta, max_retries=max_retry, timeout=int(Timeout.parse(action_data['timeout'])), testcase=match_case ) def walk_actions(data, testdata, submission): for action in data: build_action(action, testdata, submission) if 'pipeline' in action: walk_actions(action['pipeline'], testdata, submission) def map_metadata(description, job): """ Generate metadata from the combination of the pipeline definition file (after any parsing for protocols) and the pipeline description