def _override_action_timeout(self, timeout): """ Only to be called by the Pipeline object, add_action(). """ if timeout is None: return if not isinstance(timeout, dict): raise JobError("Invalid timeout %s" % str(timeout)) self.timeout.duration = Timeout.parse(timeout) if self.timeout.duration > self.job.timeout.duration: self.logger.warning("Action timeout for %s exceeds Job timeout", self.name)
def test_check_char(self): shell = ShellCommand("%s\n" % "ls", Timeout("fake", 30), logger=logging.getLogger()) if shell.exitstatus: raise JobError("%s command exited %d: %s" % ("ls", shell.exitstatus, shell.readlines())) connection = ShellSession(self.job, shell) self.assertFalse(hasattr(shell, "check_char")) self.assertTrue(hasattr(connection, "check_char")) self.assertIsNotNone(connection.check_char)
def __set_parameters__(self, data): try: self.__parameters__.update(data) except ValueError: raise LAVABug("Action parameters need to be a dictionary") # Override the duration if needed if "timeout" in self.parameters: # preserve existing overrides if self.timeout.duration == Timeout.default_duration(): self.timeout.duration = Timeout.parse( self.parameters["timeout"]) if "connection_timeout" in self.parameters: self.connection_timeout.duration = Timeout.parse( self.parameters["connection_timeout"]) # only unit tests should have actions without a pointer to the job. if "failure_retry" in self.parameters and "repeat" in self.parameters: raise JobError( "Unable to use repeat and failure_retry, use a repeat block") if "failure_retry" in self.parameters: self.max_retries = self.parameters["failure_retry"] else: if self.job: if self.job.device and type( self.job.device).__name__ != "dict": if "constants" in self.job.device: max_retry = self.get_constant("failure_retry", "") if max_retry: self.max_retries = int(max_retry) # In case of a boot section, used boot_retry if it exists boot_retry = self.get_constant("boot_retry", "") if self.section == "boot" and boot_retry: self.max_retries = int(boot_retry) if "repeat" in self.parameters: self.max_retries = self.parameters["repeat"] if self.job: if self.job.device: if "character_delays" in self.job.device: self.character_delay = self.job.device[ "character_delays"].get(self.section, 0)
def setup(self, parameters): """ Retrieve the poll_timeout from the protocol parameters which are set after init. """ if MultinodeProtocol.name not in parameters: return if 'timeout' in parameters[MultinodeProtocol.name]: self.base_message = { 'timeout': Timeout.parse( parameters[MultinodeProtocol.name]['timeout']) }
def __init__(self, parameters, job_id): super().__init__(parameters, job_id) self.blocks = 4 * 1024 # how long between polls (in seconds) self.system_timeout = Timeout("system", LAVA_MULTINODE_SYSTEM_TIMEOUT) self.settings = None self.sock = None self.base_message = None self.logger = logging.getLogger("dispatcher") self.delayed_start = False params = parameters["protocols"][self.name] if ("request" in params and "lava-start" == params["request"] and "expect_role" in params): if params["expect_role"] != params["role"]: self.delayed_start = True if "timeout" in params: self.system_timeout.duration = Timeout.parse( params["timeout"]) else: self.errors = "expect_role must not match the role declaring lava_start" self.logger.warning(self.errors)
def test_action_timout_custom_exception(self): seconds = 2 pipeline = TestTimeout.FakePipeline(job=self.fakejob) action = TestTimeout.FakeAction() action.timeout = Timeout(action.name, duration=seconds, exception=InfrastructureError) pipeline.add_action(action) self.fakejob.pipeline = pipeline self.fakejob.device = TestTimeout.FakeDevice() with self.assertRaises(InfrastructureError): self.fakejob.run()
def test_multinode_timeout(self): """ Test the protocol timeout is assigned to the action """ testshell = [action for action in self.client_job.pipeline.actions if isinstance(action, MultinodeTestAction)][0] testshell.validate() self.assertIn(30, [p.poll_timeout.duration for p in testshell.protocols]) self.assertIn('minutes', testshell.parameters['lava-multinode']['timeout']) self.assertEqual(10, testshell.parameters['lava-multinode']['timeout']['minutes']) self.assertEqual( testshell.signal_director.base_message['timeout'], Timeout.parse(testshell.parameters['lava-multinode']['timeout']) )
def setup(self, parameters, character_delay=0): """ Retrieve the poll_timeout from the protocol parameters which are set after init. """ super().setup(parameters) if MultinodeProtocol.name not in parameters: return if "timeout" in parameters[MultinodeProtocol.name]: self.base_message = { "timeout": Timeout.parse( parameters[MultinodeProtocol.name]["timeout"] ) } self.character_delay = character_delay
def test_exception_raised(monkeypatch): # 1/ default case t = Timeout("name", 12) monkeypatch.setattr(signal, "signal", DummySignal([t._timed_out])) monkeypatch.setattr(signal, "alarm", DummyAlarm([12, 0])) with pytest.raises(JobError): with t(None, None) as max_end_time: t._timed_out(None, None) # 2/ another exception t = Timeout("name", 12, InfrastructureError) monkeypatch.setattr(signal, "signal", DummySignal([t._timed_out])) monkeypatch.setattr(signal, "alarm", DummyAlarm([12, 0])) with pytest.raises(InfrastructureError): with t(None, None) as max_end_time: t._timed_out(None, None)
def test_action_timeout(self): """ Testing timeouts does mean that the tests do nothing until the timeout happens, so the total length of time to run the tests has to increase... """ self.assertIsNotNone(self.fakejob.timeout) seconds = 2 pipeline = TestTimeout.FakePipeline(job=self.fakejob) action = TestTimeout.FakeAction() action.timeout = Timeout(action.name, duration=seconds) pipeline.add_action(action) self.fakejob.pipeline = pipeline self.fakejob.device = TestTimeout.FakeDevice() with self.assertRaises(JobError): self.fakejob.run()
def test_wait_for_board_id_is_optional(factory): action = DockerTestAction() action.job = Job("1234", {}, None) rendered, _ = factory.create_device("hi6220-hikey-r2-01.jinja2") action.job.device = NewDevice(yaml_load(rendered)) action.job.timeout = Timeout("blah") action.level = 1 action.populate( { "namespace": "common", "docker": {"image": "foobar", "wait": {"device": False}}, } ) assert not any( [a for a in action.pipeline.actions if a.name == "wait-device-boardid"] ) docker_test_shell = action.pipeline.actions[-2] assert not docker_test_shell.wait_for_device
def test_action_timeout(self, which_mock): factory = ConnectionFactory() job = factory.create_bbb_job("sample_jobs/uboot-ramdisk.yaml") job.validate() deploy = [ action for action in job.pipeline.actions if action.name == "tftp-deploy" ][0] test_action = [ action for action in job.pipeline.actions if action.name == "lava-test-retry" ][0] test_shell = [ action for action in test_action.pipeline.actions if action.name == "lava-test-shell" ][0] self.assertEqual( test_shell.connection_timeout.duration, 240 ) # job specifies 4 minutes self.assertEqual( test_shell.timeout.duration, 300 ) # job (test action block) specifies 5 minutes self.assertEqual(deploy.timeout.duration, 120) # job specifies 2 minutes self.assertEqual(deploy.connection_timeout.duration, Timeout.default_duration()) self.assertNotEqual( deploy.connection_timeout.duration, test_shell.connection_timeout ) self.assertEqual(test_action.timeout.duration, 300) uboot = [ action for action in job.pipeline.actions if action.name == "uboot-action" ][0] retry = [ action for action in uboot.pipeline.actions if action.name == "uboot-commands" ][0] auto = [ action for action in retry.pipeline.actions if action.name == "auto-login-action" ][0] self.assertEqual(auto.timeout.duration / 60, 9) # 9 minutes in the job def
def test_action_timeout(self): factory = ConnectionFactory() job = factory.create_bbb_job('sample_jobs/uboot-ramdisk.yaml') job.validate() deploy = [ action for action in job.pipeline.actions if action.name == 'tftp-deploy' ][0] test_action = [ action for action in job.pipeline.actions if action.name == 'lava-test-retry' ][0] test_shell = [ action for action in test_action.internal_pipeline.actions if action.name == 'lava-test-shell' ][0] self.assertEqual(test_shell.connection_timeout.duration, 240) # job specifies 4 minutes self.assertEqual(test_shell.timeout.duration, 300) # job (test action block) specifies 5 minutes self.assertEqual(deploy.timeout.duration, 120) # job specifies 2 minutes self.assertEqual(deploy.connection_timeout.duration, Timeout.default_duration()) self.assertNotEqual(deploy.connection_timeout.duration, test_shell.connection_timeout) self.assertEqual(test_action.timeout.duration, 300) uboot = [ action for action in job.pipeline.actions if action.name == 'uboot-action' ][0] retry = [ action for action in uboot.internal_pipeline.actions if action.name == 'uboot-retry' ][0] auto = [ action for action in retry.internal_pipeline.actions if action.name == 'auto-login-action' ][0] self.assertEqual(auto.timeout.duration / 60, 9) # 9 minutes in the job def
def _check_data(self, data): try: json_data = json.loads(data) except (ValueError, TypeError) as exc: raise JobError( "Invalid data for %s protocol: %s %s" % (self.name, data, exc) ) if not isinstance(json_data, dict): raise JobError("Invalid data type %s for protocol %s" % (data, self.name)) if not json_data: raise JobError("No data to be sent over protocol %s" % self.name) if "request" not in json_data: raise JobError("Bad API call over protocol - missing request") if json_data["request"] == "aggregate": raise JobError("Pipeline submission has not been implemented.") if "poll_delay" in json_data: self.settings["poll_delay"] = int(json_data["poll_delay"]) if "timeout" in json_data: if isinstance(json_data["timeout"], dict): self.poll_timeout.duration = Timeout.parse(json_data["timeout"]) elif isinstance(json_data["timeout"], int) or isinstance( json_data["timeout"], float ): self.poll_timeout.duration = json_data["timeout"] else: self.logger.debug(json_data["timeout"]) raise JobError("Invalid timeout request") self.logger.debug( "Setting poll timeout of %s seconds", int(self.poll_timeout.duration) ) if "messageID" not in json_data: raise JobError("Missing messageID") # handle conversion of api calls to internal functions json_data["request"] = json_data["request"].replace("-", "_") return json_data
def __init__(self): super().__init__() self.start_message = None self.timeout = Timeout( self.name, BOOTLOADER_DEFAULT_CMD_TIMEOUT, exception=self.timeout_exception )
test_cases = TestCase.objects.filter(suite__job=testdata.testjob, suite__name='lava') for case in test_cases: if 'level' in case.action_metadata: if case.action_metadata['level'] == action_data['level']: match_case = case # maps the static testdata derived from the definition to the runtime pipeline construction ActionData.objects.create( action_name=action_data['name'], action_level=action_data['level'], action_summary=action_data['summary'], testdata=testdata, action_description=action_data['description'], meta_type=action_meta, max_retries=max_retry, timeout=int(Timeout.parse(action_data['timeout'])), testcase=match_case ) def walk_actions(data, testdata, submission): for action in data: build_action(action, testdata, submission) if 'pipeline' in action: walk_actions(action['pipeline'], testdata, submission) def map_metadata(description, job): """ Generate metadata from the combination of the pipeline definition file (after any parsing for protocols) and the pipeline description
def test_without_raising(monkeypatch): # 1/ without parent # 1.1/ without max_end_time t = Timeout("timeout-name", 200) monkeypatch.setattr(signal, "signal", DummySignal([t._timed_out])) monkeypatch.setattr(signal, "alarm", DummyAlarm([200, 0])) monkeypatch.setattr(time, "time", lambda: 0) with t(None, None) as max_end_time: assert max_end_time == 200 # nosec - assert is part of the test process. # signal.alarm and signal.signal were called once each assert signal.alarm.data == [0] # nosec - assert is part of the test process. assert signal.signal.data == [] # nosec - assert is part of the test process. monkeypatch.setattr(time, "time", lambda: 23) assert signal.alarm.data == [] # nosec - assert is part of the test process. assert t.elapsed_time == 23 # nosec - assert is part of the test process. # 1.1/ with a smaller max_end_time t = Timeout("timeout-name", 200) monkeypatch.setattr(signal, "signal", DummySignal([t._timed_out])) monkeypatch.setattr(signal, "alarm", DummyAlarm([125, 0])) monkeypatch.setattr(time, "time", lambda: 0) with t(None, 125) as max_end_time: assert max_end_time == 125 # nosec - assert is part of the test process. assert signal.alarm.data == [0] # nosec - assert is part of the test process. assert signal.signal.data == [] # nosec - assert is part of the test process. monkeypatch.setattr(time, "time", lambda: 109) assert signal.alarm.data == [] # nosec - assert is part of the test process. assert t.elapsed_time == 109 # nosec - assert is part of the test process. # 1.2/ with a larger max_end_time t = Timeout("timeout-name", 200) monkeypatch.setattr(signal, "signal", DummySignal([t._timed_out])) monkeypatch.setattr(signal, "alarm", DummyAlarm([200, 0])) monkeypatch.setattr(time, "time", lambda: 0) with t(None, 201) as max_end_time: assert max_end_time == 200 # nosec - assert is part of the test process. assert signal.alarm.data == [0] # nosec - assert is part of the test process. assert signal.signal.data == [] # nosec - assert is part of the test process. monkeypatch.setattr(time, "time", lambda: 45) assert signal.alarm.data == [] # nosec - assert is part of the test process. assert t.elapsed_time == 45 # nosec - assert is part of the test process. # 2/ with a parent # 2.1/ with a larger max_end_time t0 = Timeout("timeout-parent", 200) parent = ParentAction(t0) t1 = Timeout("timeout-child", 100) monkeypatch.setattr(signal, "signal", DummySignal([t1._timed_out, t0._timed_out])) monkeypatch.setattr(signal, "alarm", DummyAlarm([100, 177])) monkeypatch.setattr(time, "time", lambda: 0) with t1(parent, 200) as max_end_time: assert max_end_time == 100 # nosec - assert is part of the test process. assert signal.alarm.data == [177] # nosec - assert is part of the test process. assert signal.signal.data == [ # nosec - assert is part of the test process. t0._timed_out ] monkeypatch.setattr(time, "time", lambda: 23) assert signal.alarm.data == [] # nosec - assert is part of the test process. assert t1.elapsed_time == 23 # nosec - assert is part of the test process. # 2.2/ with a smaller max_end_time t0 = Timeout("timeout-parent", 50) parent = ParentAction(t0) t1 = Timeout("timeout-child", 100) monkeypatch.setattr(signal, "signal", DummySignal([t1._timed_out, t0._timed_out])) monkeypatch.setattr(signal, "alarm", DummyAlarm([50, 27])) monkeypatch.setattr(time, "time", lambda: 0) with t1(parent, 50) as max_end_time: assert max_end_time == 50 # nosec - assert is part of the test process. assert signal.alarm.data == [27] # nosec - assert is part of the test process. assert signal.signal.data == [ # nosec - assert is part of the test process. t0._timed_out ] monkeypatch.setattr(time, "time", lambda: 23) assert signal.alarm.data == [] # nosec - assert is part of the test process. assert t1.elapsed_time == 23 # nosec - assert is part of the test process.
def test_parsing(): # 1/ simple durations assert ( # nosec - assert is part of the test process. Timeout.parse({"days": 1}) == 86400 ) assert ( # nosec - assert is part of the test process. Timeout.parse({"hours": 3}) == 3 * 3600 ) assert ( # nosec - assert is part of the test process. Timeout.parse({"minutes": 1}) == 1 * 60 ) assert ( # nosec - assert is part of the test process. Timeout.parse({"seconds": 345}) == 345 ) # 2/ complexe durations assert ( # nosec - assert is part of the test process. Timeout.parse({"minutes": 22, "seconds": 17}) == 22 * 60 + 17 ) assert ( # nosec - assert is part of the test process. Timeout.parse({"hours": 2, "minutes": 22, "seconds": 17}) == 2 * 3600 + 22 * 60 + 17 ) assert ( # nosec - assert is part of the test process. Timeout.parse({"days": 1, "minutes": 22, "seconds": 17}) == 86400 + 22 * 60 + 17 ) # 3/ invalid durations assert ( # nosec - assert is part of the test process. Timeout.parse({"day": 1}) == Timeout.default_duration() ) assert ( # nosec - assert is part of the test process. Timeout.parse({}) == Timeout.default_duration() ) with pytest.raises(ConfigurationError): Timeout.parse("")
def test_with_raising(monkeypatch): # 1/ without parent # 1.1/ without max_end_time t = Timeout("timeout-name", 200) monkeypatch.setattr(signal, "signal", DummySignal([t._timed_out])) monkeypatch.setattr(signal, "alarm", DummyAlarm([200, 0])) monkeypatch.setattr(time, "time", lambda: 0) with pytest.raises(JobError): with t(None, None) as max_end_time: assert max_end_time == 200 # nosec - assert is part of the test process. assert signal.alarm.data == [ # nosec - assert is part of the test process. 0 ] assert ( # nosec - assert is part of the test process. signal.signal.data == [] ) monkeypatch.setattr(time, "time", lambda: 200) t._timed_out(None, None) assert signal.alarm.data == [] # nosec - assert is part of the test process. assert t.elapsed_time == 200 # nosec - assert is part of the test process. # 1.1/ with a smaller max_end_time t = Timeout("timeout-name", 200) monkeypatch.setattr(signal, "signal", DummySignal([t._timed_out])) monkeypatch.setattr(signal, "alarm", DummyAlarm([125, 0])) monkeypatch.setattr(time, "time", lambda: 0) with pytest.raises(JobError): with t(None, 125) as max_end_time: assert max_end_time == 125 # nosec - assert is part of the test process. assert signal.alarm.data == [ # nosec - assert is part of the test process. 0 ] assert ( # nosec - assert is part of the test process. signal.signal.data == [] ) monkeypatch.setattr(time, "time", lambda: 126) t._timed_out(None, None) assert signal.alarm.data == [] # nosec - assert is part of the test process. assert t.elapsed_time == 126 # nosec - assert is part of the test process. # 1.2/ with a larger max_end_time t = Timeout("timeout-name", 200) monkeypatch.setattr(signal, "signal", DummySignal([t._timed_out])) monkeypatch.setattr(signal, "alarm", DummyAlarm([200, 0])) monkeypatch.setattr(time, "time", lambda: 0) with pytest.raises(JobError): with t(None, 201) as max_end_time: assert max_end_time == 200 # nosec - assert is part of the test process. assert signal.alarm.data == [0] # nosec - test process. assert signal.signal.data == [] # nosec - test process. monkeypatch.setattr(time, "time", lambda: 200) t._timed_out(None, None) assert signal.alarm.data == [] # nosec - assert is part of the test process. assert t.elapsed_time == 200 # nosec - assert is part of the test process. # 1.3/ with max_end_time <= 0 t = Timeout("timeout-name", 200) monkeypatch.setattr(signal, "signal", DummySignal([t._timed_out])) monkeypatch.setattr(signal, "alarm", DummyAlarm([0])) monkeypatch.setattr(time, "time", lambda: 0) with pytest.raises(JobError): with t(None, 0) as max_end_time: # Check that the exception is raised before this line assert 0 # nosec - assert is part of the test process. assert signal.alarm.data == [] # nosec - assert is part of the test process. assert t.elapsed_time == 0 # nosec - assert is part of the test process. # 2/ with a parent # 2.1/ with a larger max_end_time t0 = Timeout("timeout-parent", 200) parent = ParentAction(t0) t1 = Timeout("timeout-child", 100) monkeypatch.setattr(signal, "signal", DummySignal([t1._timed_out, t0._timed_out])) monkeypatch.setattr(signal, "alarm", DummyAlarm([100, 0])) monkeypatch.setattr(time, "time", lambda: 0) with pytest.raises(JobError): with t1(parent, 200) as max_end_time: assert max_end_time == 100 # nosec - assert is part of the test process. assert signal.alarm.data == [0] # nosec - test process. assert signal.signal.data == [t0._timed_out] # nosec - test process. monkeypatch.setattr(time, "time", lambda: 100) t1._timed_out(None, None) assert signal.alarm.data == [] # nosec - assert is part of the test process. assert signal.signal.data == [t0._timed_out] # nosec - test process. assert t1.elapsed_time == 100 # nosec - assert is part of the test process. # 2.2/ with a smaller max_end_time t0 = Timeout("timeout-parent", 50) parent = ParentAction(t0) t1 = Timeout("timeout-child", 100) monkeypatch.setattr(signal, "signal", DummySignal([t1._timed_out, t0._timed_out])) monkeypatch.setattr(signal, "alarm", DummyAlarm([50, 0])) monkeypatch.setattr(time, "time", lambda: 0) with pytest.raises(JobError): with t1(parent, 50) as max_end_time: assert max_end_time == 50 # nosec - assert is part of the test process. assert signal.alarm.data == [0] # nosec - test process. assert signal.signal.data == [t0._timed_out] # nosec - test process. monkeypatch.setattr(time, "time", lambda: 23) t1._timed_out(None, None) assert signal.alarm.data == [] # nosec - assert is part of the test process. assert signal.signal.data == [t0._timed_out] # nosec - test process. assert t1.elapsed_time == 23 # nosec - assert is part of the test process. # 2.3/ with max_end_time <= 0 t0 = Timeout("timeout-parent", 1) parent = ParentAction(t0) t1 = Timeout("timeout-child", 100) monkeypatch.setattr(signal, "signal", DummySignal([t1._timed_out, t0._timed_out])) monkeypatch.setattr(signal, "alarm", DummyAlarm([0])) monkeypatch.setattr(time, "time", lambda: 0) with pytest.raises(JobError): with t1(parent, -1) as max_end_time: assert 0 # nosec - assert is part of the test process. assert signal.alarm.data == [] # nosec - assert is part of the test process. assert signal.signal.data == [t1._timed_out, t0._timed_out] # nosec - test process. assert t1.elapsed_time == 0 # nosec - assert is part of the test process. # 2.4/ raising parent timeout t0 = Timeout("timeout-parent", 50, InfrastructureError) parent = ParentAction(t0) t1 = Timeout("timeout-child", 100) monkeypatch.setattr(signal, "signal", DummySignal([t1._timed_out, t0._timed_out])) monkeypatch.setattr(signal, "alarm", DummyAlarm([50, 0, 0])) monkeypatch.setattr(time, "time", lambda: 0) with pytest.raises(InfrastructureError): with t1(parent, 50) as max_end_time: assert max_end_time == 50 # nosec - assert is part of the test process. assert signal.alarm.data == [0, 0] # nosec - test assert signal.signal.data == [t0._timed_out] # nosec - test monkeypatch.setattr(time, "time", lambda: 50) assert signal.alarm.data == [] # nosec - assert is part of the test process. assert signal.signal.data == [] # nosec - assert is part of the test process. assert t1.elapsed_time == 50 # nosec - assert is part of the test process.
suite__name="lava") for case in test_cases: if case.action_metadata: if case.action_metadata.get("level") == action_data["level"]: match_case = case # maps the static testdata derived from the definition to the runtime pipeline construction ActionData.objects.create( action_name=action_data["name"], action_level=action_data["level"], action_summary=action_data["summary"], testdata=testdata, action_description=action_data["description"], meta_type=action_meta, max_retries=max_retry, timeout=int(Timeout.parse(action_data["timeout"])), testcase=match_case, ) def walk_actions(data, testdata, submission): for action in data: build_action(action, testdata, submission) if "pipeline" in action: walk_actions(action["pipeline"], testdata, submission) def map_metadata(description, job): """ Generate metadata from the combination of the pipeline definition file (after any parsing for protocols) and the pipeline description
def __init__(self, expect_final=True): super().__init__() self.params = None self.timeout = Timeout(self.name, BOOTLOADER_DEFAULT_CMD_TIMEOUT) self.method = "" self.expect_final = expect_final
def _check_timeout(prefix, path, local_data): if local_data is None: return duration = Timeout.parse(local_data) if duration > job_duration: raise Invalid("%s timeout is larger than job timeout" % prefix, path=path)