def test_file_download_validate(tmpdir): # Create the file to use (tmpdir / "bla.img").write_text("hello", encoding="utf-8") # Working action = FileDownloadAction( "image", "/path/to/file", urlparse("file://" + str(tmpdir) + "/bla.img") ) action.section = "deploy" action.job = Job(1234, {}, None) action.parameters = { "image": {"url": "file://" + str(tmpdir) + "/bla.img"}, "namespace": "common", } action.params = action.parameters["image"] action.validate() assert action.errors == [] assert action.size == 5 # Missing file action = FileDownloadAction( "image", "/path/to/file", urlparse("file://" + str(tmpdir) + "/bla2.img") ) action.section = "deploy" action.job = Job(1234, {}, None) action.parameters = { "image": {"url": "file://" + str(tmpdir) + "/bla2.img"}, "namespace": "common", } action.params = action.parameters["image"] action.validate() assert action.errors == [ "Image file '" + str(tmpdir) + "/bla2.img' does not exist or is not readable" ] assert action.size == -1
def test_download_handler_validate_extra_arguments(): # "images.key" with compression, image_arg, overlay, ... action = DownloadHandler( "key", "/path/to/save", urlparse("http://example.com/resource.img.gz") ) action.job = Job(1234, {}, None) action.parameters = { "images": { "key": { "url": "http://example.com/resource.img.gz", "compression": "gz", "image_arg": "something", "overlay": True, } }, "namespace": "common", } action.params = action.parameters["images"]["key"] action.validate() assert action.data == { "common": { "download-action": { "key": { "file": "/path/to/save/key/resource.img", "image_arg": "something", "compression": "gz", "overlay": True, } } } } # "key" with compression, image_arg, overlay, ... action = DownloadHandler( "key", "/path/to/save", urlparse("http://example.com/resource.img.gz") ) action.job = Job(1234, {}, None) action.parameters = { "key": { "url": "http://example.com/resource.img.gz", "compression": "gz", "image_arg": "something", "overlay": True, }, "namespace": "common", } action.params = action.parameters["key"] action.validate() assert action.data == { "common": { "download-action": { "key": { "file": "/path/to/save/key/resource.img", "compression": "gz", "image_arg": "something", "overlay": True, } } } }
def test_predownloaded_missing_file(tmpdir): job = Job(1234, {}, None) destdir = job.mkdtemp("some-other-action") action = PreDownloadedAction("rootfs", urlparse("downloads://missing.xz"), destdir) action.job = job action.parameters = {"namespace": "common"} with pytest.raises(JobError) as exc: action.run(None, 4242)
def test_overlay_action(self): # pylint: disable=too-many-locals parameters = { 'device_type': 'd02', 'job_name': 'grub-standard-ramdisk', 'job_timeout': '15m', 'action_timeout': '5m', 'priority': 'medium', 'actions': { 'boot': { 'method': 'grub', 'commands': 'ramdisk', 'prompts': ['linaro-test', 'root@debian:~#'] }, 'deploy': { 'ramdisk': 'initrd.gz', 'kernel': 'zImage', 'dtb': 'broken.dtb' } } } device = NewDevice(os.path.join(os.path.dirname(__file__), '../devices/d02-01.yaml')) job = Job(4212, parameters, None) job.device = device pipeline = Pipeline(job=job, parameters=parameters['actions']['boot']) job.pipeline = pipeline overlay = BootloaderCommandOverlay() pipeline.add_action(overlay) ip_addr = dispatcher_ip(None) parsed = [] kernel = parameters['actions']['deploy']['kernel'] ramdisk = parameters['actions']['deploy']['ramdisk'] dtb = parameters['actions']['deploy']['dtb'] substitution_dictionary = { '{SERVER_IP}': ip_addr, # the addresses need to be hexadecimal '{RAMDISK}': ramdisk, '{KERNEL}': kernel, '{DTB}': dtb } params = device['actions']['boot']['methods'] commands = params['grub']['ramdisk']['commands'] self.assertIn('net_bootp', commands) self.assertIn("linux (tftp,{SERVER_IP})/{KERNEL} console=ttyS0,115200 earlycon=uart8250,mmio32,0x80300000 root=/dev/ram0 ip=dhcp", commands) self.assertIn('initrd (tftp,{SERVER_IP})/{RAMDISK}', commands) self.assertIn('devicetree (tftp,{SERVER_IP})/{DTB}', commands) params['grub']['ramdisk']['commands'] = substitute(params['grub']['ramdisk']['commands'], substitution_dictionary) substituted_commands = params['grub']['ramdisk']['commands'] self.assertIs(type(substituted_commands), list) self.assertIn('net_bootp', substituted_commands) self.assertNotIn("linux (tftp,{SERVER_IP})/{KERNEL} console=ttyS0,115200 earlycon=uart8250,mmio32,0x80300000 root=/dev/ram0 ip=dhcp", substituted_commands) self.assertIn("linux (tftp,%s)/%s console=ttyS0,115200 earlycon=uart8250,mmio32,0x80300000 root=/dev/ram0 ip=dhcp" % (ip_addr, kernel), substituted_commands) self.assertNotIn('initrd (tftp,{SERVER_IP})/{RAMDISK}', parsed) self.assertNotIn('devicetree (tftp,{SERVER_IP})/{DTB}', parsed)
def test_overlay_action(self): # pylint: disable=too-many-locals parameters = { "device_type": "x86", "job_name": "ipxe-pipeline", "job_timeout": "15m", "action_timeout": "5m", "priority": "medium", "actions": { "boot": { "method": "ipxe", "commands": "ramdisk", "prompts": ["linaro-test", "root@debian:~#"], }, "deploy": { "ramdisk": "initrd.gz", "kernel": "zImage" }, }, } (rendered, _) = self.factory.create_device("x86-01.jinja2") device = NewDevice(yaml.safe_load(rendered)) job = Job(4212, parameters, None) job.device = device pipeline = Pipeline(job=job, parameters=parameters["actions"]["boot"]) job.pipeline = pipeline overlay = BootloaderCommandOverlay() pipeline.add_action(overlay) ip_addr = dispatcher_ip(None) kernel = parameters["actions"]["deploy"]["kernel"] ramdisk = parameters["actions"]["deploy"]["ramdisk"] substitution_dictionary = { "{SERVER_IP}": ip_addr, "{RAMDISK}": ramdisk, "{KERNEL}": kernel, "{LAVA_MAC}": "00:00:00:00:00:00", } params = device["actions"]["boot"]["methods"] params["ipxe"]["ramdisk"]["commands"] = substitute( params["ipxe"]["ramdisk"]["commands"], substitution_dictionary) commands = params["ipxe"]["ramdisk"]["commands"] self.assertIs(type(commands), list) self.assertIn("dhcp net0", commands) self.assertIn( "set console console=ttyS0,115200n8 lava_mac=00:00:00:00:00:00", commands) self.assertIn("set extraargs ip=dhcp", commands) self.assertNotIn( "kernel tftp://{SERVER_IP}/{KERNEL} ${extraargs} ${console}", commands) self.assertNotIn("initrd tftp://{SERVER_IP}/{RAMDISK}", commands) self.assertIn("boot", commands)
def test_multi_deploy(self): self.assertIsNotNone(self.parsed_data) job = Job(4212, self.parsed_data, None) job.timeout = Timeout("Job", Timeout.parse({"minutes": 2})) pipeline = Pipeline(job=job) device = TestMultiDeploy.FakeDevice() self.assertIsNotNone(device) job.device = device job.logger = DummyLogger() job.pipeline = pipeline counts = {} for action_data in self.parsed_data["actions"]: for name in action_data: counts.setdefault(name, 1) parameters = action_data[name] test_deploy = TestMultiDeploy.TestDeploy( pipeline, parameters, job) self.assertEqual({}, test_deploy.action.data) counts[name] += 1 # check that only one action has the example set self.assertEqual( ["nowhere"], [ detail["deploy"]["example"] for detail in self.parsed_data["actions"] if "example" in detail["deploy"] ], ) self.assertEqual( ["faked", "valid"], [ detail["deploy"]["parameters"] for detail in self.parsed_data["actions"] if "parameters" in detail["deploy"] ], ) self.assertIsInstance(pipeline.actions[0], TestMultiDeploy.TestDeployAction) self.assertIsInstance(pipeline.actions[1], TestMultiDeploy.TestDeployAction) self.assertIsInstance(pipeline.actions[2], TestMultiDeploy.TestDeployAction) job.validate() self.assertEqual([], job.pipeline.errors) job.run() self.assertNotEqual(pipeline.actions[0].data, {"fake-deploy": pipeline.actions[0].parameters}) self.assertEqual(pipeline.actions[1].data, {"fake-deploy": pipeline.actions[2].parameters}) # check that values from previous DeployAction run actions have been cleared self.assertEqual(pipeline.actions[2].data, {"fake-deploy": pipeline.actions[2].parameters})
def test_append_overlays_update_guestfs(caplog, mocker, tmpdir): caplog.set_level(logging.DEBUG) params = { "format": "ext4", "overlays": { "modules": { "url": "http://example.com/modules.tar.xz", "compression": "xz", "format": "tar", "path": "/lib", } }, } action = AppendOverlays("rootfs", params) action.job = Job(1234, {}, None) action.parameters = { "rootfs": { "url": "http://example.com/rootff.ext4", **params }, "namespace": "common", } action.data = { "common": { "download-action": { "rootfs": { "file": str(tmpdir / "rootfs.ext4"), "compression": "gz", "decompressed": True, }, "rootfs.modules": { "file": str(tmpdir / "modules.tar") }, } } } guestfs = mocker.MagicMock() guestfs.add_drive = mocker.MagicMock() mocker.patch( "lava_dispatcher.actions.deploy.apply_overlay.guestfs.GuestFS", guestfs) action.update_guestfs() guestfs.assert_called_once_with(python_return_dict=True) guestfs().launch.assert_called_once_with() guestfs().list_devices.assert_called_once_with() guestfs().add_drive.assert_called_once_with(str(tmpdir / "rootfs.ext4")) guestfs().mount.assert_called_once_with(guestfs().list_devices()[0], "/") guestfs().mkdir_p.assert_called_once_with("/lib") guestfs().tar_in.assert_called_once_with(str(tmpdir / "modules.tar"), "/lib", compress=None) assert caplog.record_tuples == [ ("dispatcher", 20, f"Modifying '{tmpdir}/rootfs.ext4'"), ("dispatcher", 10, "Overlays:"), ("dispatcher", 10, f"- rootfs.modules: '{tmpdir}/modules.tar' to '/lib'"), ]
def test_overlay_action(self): # pylint: disable=too-many-locals parameters = { 'device_type': 'x86', 'job_name': 'ipxe-pipeline', 'job_timeout': '15m', 'action_timeout': '5m', 'priority': 'medium', 'actions': { 'boot': { 'method': 'ipxe', 'commands': 'ramdisk', 'prompts': ['linaro-test', 'root@debian:~#'] }, 'deploy': { 'ramdisk': 'initrd.gz', 'kernel': 'zImage', } } } device = NewDevice(os.path.join(os.path.dirname(__file__), '../devices/x86-01.yaml')) job = Job(4212, parameters, None) job.device = device pipeline = Pipeline(job=job, parameters=parameters['actions']['boot']) job.pipeline = pipeline overlay = BootloaderCommandOverlay() pipeline.add_action(overlay) ip_addr = dispatcher_ip(None) kernel = parameters['actions']['deploy']['kernel'] ramdisk = parameters['actions']['deploy']['ramdisk'] substitution_dictionary = { '{SERVER_IP}': ip_addr, '{RAMDISK}': ramdisk, '{KERNEL}': kernel, '{LAVA_MAC}': "00:00:00:00:00:00" } params = device['actions']['boot']['methods'] params['ipxe']['ramdisk']['commands'] = substitute(params['ipxe']['ramdisk']['commands'], substitution_dictionary) commands = params['ipxe']['ramdisk']['commands'] self.assertIs(type(commands), list) self.assertIn("dhcp net0", commands) self.assertIn("set console console=ttyS0,115200n8 lava_mac=00:00:00:00:00:00", commands) self.assertIn("set extraargs init=/sbin/init ip=dhcp", commands) self.assertNotIn("kernel tftp://{SERVER_IP}/{KERNEL} ${extraargs} ${console}", commands) self.assertNotIn("initrd tftp://{SERVER_IP}/{RAMDISK}", commands) self.assertIn("boot", commands)
def test_http_download_run_compressed(tmpdir): def reader(): yield b"\xfd7zXZ\x00\x00\x04\xe6\xd6\xb4F\x02\x00!\x01\x16\x00\x00" yield b"\x00t/\xe5\xa3\x01\x00\x0bhello world\n\x00\xa1\xf2\xff\xc4j" yield b"\x7f\xbf\xcf\x00\x01$\x0c\xa6\x18\xd8\xd8\x1f\xb6\xf3}\x01" yield b"\x00\x00\x00\x00\x04YZ" action = HttpDownloadAction( "rootfs", str(tmpdir), urlparse("https://example.com/rootfs.xz") ) action.job = Job(1234, {}, None) action.url = urlparse("https://example.com/rootfs.xz") action.parameters = { "to": "download", "rootfs": { "url": "https://example.com/rootfs.xz", "compression": "xz", "md5sum": "0107d527acf9b8de628b7b4d103c89d1", "sha256sum": "3275a39be7b717d548b66f3c8f23d940603a63b0f13d84a596d979a7f66feb2c", "sha512sum": "d0850c3e0c45bdf74995907a04f69806a070d79a4f0b2dd82d6b96adafdbfd85ce6c1daaff916ff089bdf9b04eba7805041c49afecdbeabca69fef802e60de35", }, "namespace": "common", } action.params = action.parameters["rootfs"] action.reader = reader action.size = 68 action.fname = str(tmpdir / "rootfs/rootfs") action.run(None, 4212) data = "" with open(str(tmpdir / "rootfs/rootfs")) as f_in: data = f_in.read() assert data == "hello world\n" assert dict(action.results) == { "success": { "sha512": "d0850c3e0c45bdf74995907a04f69806a070d79a4f0b2dd82d6b96adafdbfd85ce6c1daaff916ff089bdf9b04eba7805041c49afecdbeabca69fef802e60de35" }, "label": "rootfs", "size": 68, "md5sum": "0107d527acf9b8de628b7b4d103c89d1", "sha256sum": "3275a39be7b717d548b66f3c8f23d940603a63b0f13d84a596d979a7f66feb2c", "sha512sum": "d0850c3e0c45bdf74995907a04f69806a070d79a4f0b2dd82d6b96adafdbfd85ce6c1daaff916ff089bdf9b04eba7805041c49afecdbeabca69fef802e60de35", } assert action.data == { "common": { "download-action": { "rootfs": { "decompressed": True, "file": "%s/rootfs/rootfs" % str(tmpdir), "md5": "0107d527acf9b8de628b7b4d103c89d1", "sha256": "3275a39be7b717d548b66f3c8f23d940603a63b0f13d84a596d979a7f66feb2c", "sha512": "d0850c3e0c45bdf74995907a04f69806a070d79a4f0b2dd82d6b96adafdbfd85ce6c1daaff916ff089bdf9b04eba7805041c49afecdbeabca69fef802e60de35", }, "file": {"rootfs": "%s/rootfs/rootfs" % str(tmpdir)}, } } }
def test_http_download_run(tmpdir): def reader(): yield b"hello" yield b"world" action = HttpDownloadAction("dtb", str(tmpdir), urlparse("https://example.com/dtb")) action.job = Job(1234, {"dispatcher": {}}, None) action.url = urlparse("https://example.com/dtb") action.parameters = { "to": "download", "images": { "dtb": { "url": "https://example.com/dtb", "md5sum": "fc5e038d38a57032085441e7fe7010b0", "sha256sum": "936a185caaa266bb9cbe981e9e05cb78cd732b0b3280eb944412bb6f8f8f07af", "sha512sum": "1594244d52f2d8c12b142bb61f47bc2eaf503d6d9ca8480cae9fcf112f66e4967dc5e8fa98285e36db8af1b8ffa8b84cb15e0fbcf836c3deb803c13f37659a60", } }, "namespace": "common", } action.params = action.parameters["images"]["dtb"] action.reader = reader action.fname = str(tmpdir / "dtb/dtb") action.run(None, 4212) data = "" with open(str(tmpdir / "dtb/dtb")) as f_in: data = f_in.read() assert data == "helloworld" assert dict(action.results) == { "success": { "sha512": "1594244d52f2d8c12b142bb61f47bc2eaf503d6d9ca8480cae9fcf112f66e4967dc5e8fa98285e36db8af1b8ffa8b84cb15e0fbcf836c3deb803c13f37659a60" }, "label": "dtb", "size": 10, "md5sum": "fc5e038d38a57032085441e7fe7010b0", "sha256sum": "936a185caaa266bb9cbe981e9e05cb78cd732b0b3280eb944412bb6f8f8f07af", "sha512sum": "1594244d52f2d8c12b142bb61f47bc2eaf503d6d9ca8480cae9fcf112f66e4967dc5e8fa98285e36db8af1b8ffa8b84cb15e0fbcf836c3deb803c13f37659a60", } assert action.data == { "common": { "download-action": { "dtb": { "decompressed": False, "file": "%s/dtb/dtb" % str(tmpdir), "md5": "fc5e038d38a57032085441e7fe7010b0", "sha256": "936a185caaa266bb9cbe981e9e05cb78cd732b0b3280eb944412bb6f8f8f07af", "sha512": "1594244d52f2d8c12b142bb61f47bc2eaf503d6d9ca8480cae9fcf112f66e4967dc5e8fa98285e36db8af1b8ffa8b84cb15e0fbcf836c3deb803c13f37659a60", }, "file": {"dtb": "%s/dtb/dtb" % str(tmpdir)}, } } }
def test_run(monkeypatch): class FD: def readlines(self): return [] class Proc: def __init__(self): self.stderr = FD() self.stdout = FD() def poll(self): return 0 def wait(self): return 0 class Poller: def register(self, fd, flag): pass commands = [ ["nice", "/home/lava/bin/PiCtrl.py", "PowerPlug", "0", "off"], ["nice", "touch"], ] def Popen( cmd, cwd, stdout, stderr, bufsize, universal_newlines ): # nosec - unit test assert cmd == commands.pop(0) # nosec - unit test assert stdout == subprocess.PIPE # nosec - unit test assert stderr == subprocess.PIPE # nosec - unit test assert bufsize == 1 # nosec - unit test assert universal_newlines # nosec - unit test return Proc() monkeypatch.setattr(subprocess, "Popen", Popen) monkeypatch.setattr(select, "epoll", lambda: Poller()) action = FlasherAction() device = PipelineDevice( { "actions": { "deploy": { "methods": { "flasher": {"commands": ["{HARD_RESET_COMMAND}", "touch"]} } } }, "commands": {"hard_reset": "/home/lava/bin/PiCtrl.py PowerPlug 0 off"}, } ) action.job = Job(1234, {}, None) action.job.device = device action.parameters = {"namespace": "common", "images": {}} action.section = Flasher.action_type # self.commands is populated by validate action.validate() assert action.errors == [] # nosec - unit test # Run the action action.run(None, 10) assert commands == [] # nosec - unit test
def test_overlay_action(self): # pylint: disable=too-many-locals parameters = { 'device_type': 'beaglebone-black', 'job_name': 'uboot-pipeline', 'job_timeout': '15m', 'action_timeout': '5m', 'priority': 'medium', 'actions': { 'boot': { 'method': 'u-boot', 'commands': 'ramdisk', 'type': 'bootz', 'prompts': ['linaro-test', 'root@debian:~#'] }, 'deploy': { 'ramdisk': 'initrd.gz', 'kernel': 'zImage', 'dtb': 'broken.dtb' } } } device = NewDevice(os.path.join(os.path.dirname(__file__), '../devices/bbb-01.yaml')) job = Job(4212, parameters, None) job.device = device pipeline = Pipeline(job=job, parameters=parameters['actions']['boot']) job.pipeline = pipeline overlay = BootloaderCommandOverlay() pipeline.add_action(overlay) ip_addr = dispatcher_ip(None) parsed = [] kernel_addr = job.device['parameters'][overlay.parameters['type']]['ramdisk'] ramdisk_addr = job.device['parameters'][overlay.parameters['type']]['ramdisk'] dtb_addr = job.device['parameters'][overlay.parameters['type']]['dtb'] kernel = parameters['actions']['deploy']['kernel'] ramdisk = parameters['actions']['deploy']['ramdisk'] dtb = parameters['actions']['deploy']['dtb'] substitution_dictionary = { '{SERVER_IP}': ip_addr, # the addresses need to be hexadecimal '{KERNEL_ADDR}': kernel_addr, '{DTB_ADDR}': dtb_addr, '{RAMDISK_ADDR}': ramdisk_addr, '{BOOTX}': "%s %s %s %s" % ( overlay.parameters['type'], kernel_addr, ramdisk_addr, dtb_addr), '{RAMDISK}': ramdisk, '{KERNEL}': kernel, '{DTB}': dtb } params = device['actions']['boot']['methods'] params['u-boot']['ramdisk']['commands'] = substitute(params['u-boot']['ramdisk']['commands'], substitution_dictionary) commands = params['u-boot']['ramdisk']['commands'] self.assertIs(type(commands), list) self.assertIn("setenv loadkernel 'tftp ${kernel_addr_r} zImage'", commands) self.assertIn("setenv loadinitrd 'tftp ${initrd_addr_r} initrd.gz; setenv initrd_size ${filesize}'", commands) self.assertIn("setenv loadfdt 'tftp ${fdt_addr_r} broken.dtb'", commands) self.assertNotIn("setenv kernel_addr_r '{KERNEL_ADDR}'", commands) self.assertNotIn("setenv initrd_addr_r '{RAMDISK_ADDR}'", commands) self.assertNotIn("setenv fdt_addr_r '{DTB_ADDR}'", commands) for line in params['u-boot']['ramdisk']['commands']: line = line.replace('{SERVER_IP}', ip_addr) # the addresses need to be hexadecimal line = line.replace('{KERNEL_ADDR}', kernel_addr) line = line.replace('{DTB_ADDR}', dtb_addr) line = line.replace('{RAMDISK_ADDR}', ramdisk_addr) line = line.replace('{BOOTX}', "%s %s %s %s" % ( overlay.parameters['type'], kernel_addr, ramdisk_addr, dtb_addr)) line = line.replace('{RAMDISK}', ramdisk) line = line.replace('{KERNEL}', kernel) line = line.replace('{DTB}', dtb) parsed.append(line) self.assertIn("setenv loadkernel 'tftp ${kernel_addr_r} zImage'", parsed) self.assertIn("setenv loadinitrd 'tftp ${initrd_addr_r} initrd.gz; setenv initrd_size ${filesize}'", parsed) self.assertIn("setenv loadfdt 'tftp ${fdt_addr_r} broken.dtb'", parsed) self.assertNotIn("setenv kernel_addr_r '{KERNEL_ADDR}'", parsed) self.assertNotIn("setenv initrd_addr_r '{RAMDISK_ADDR}'", parsed) self.assertNotIn("setenv fdt_addr_r '{DTB_ADDR}'", parsed)
def parse(self, content, device, job_id, logger, dispatcher_config, env_dut=None): data = yaml.safe_load(content) job = Job(job_id, data, logger) test_counts = {} job.device = device job.parameters["env_dut"] = env_dut # Load the dispatcher config job.parameters["dispatcher"] = {} if dispatcher_config is not None: config = yaml.safe_load(dispatcher_config) if isinstance(config, dict): job.parameters["dispatcher"] = config level_tuple = Protocol.select_all(job.parameters) # sort the list of protocol objects by the protocol class level. job.protocols = [ item[0](job.parameters, job_id) for item in sorted(level_tuple, key=lambda level_tuple: level_tuple[1]) ] pipeline = Pipeline(job=job) self._timeouts(data, job) # deploy and boot classes can populate the pipeline differently depending # on the test action type they are linked with (via namespacing). # This code builds an information dict for each namespace which is then # passed as a parameter to each Action class to use. test_actions = [action for action in data["actions"] if "test" in action] for test_action in test_actions: test_parameters = test_action["test"] test_type = LavaTest.select(device, test_parameters) namespace = test_parameters.get("namespace", "common") connection_namespace = test_parameters.get( "connection-namespace", namespace ) if namespace in job.test_info: job.test_info[namespace].append( {"class": test_type, "parameters": test_parameters} ) else: job.test_info.update( {namespace: [{"class": test_type, "parameters": test_parameters}]} ) if namespace != connection_namespace: job.test_info.update( { connection_namespace: [ {"class": test_type, "parameters": test_parameters} ] } ) # FIXME: also read permissable overrides from device config and set from job data # FIXME: ensure that a timeout for deployment 0 does not get set as the timeout for deployment 1 if 1 is default for action_data in data["actions"]: for name in action_data: # Set a default namespace if needed namespace = action_data[name].setdefault("namespace", "common") test_counts.setdefault(namespace, 1) if name == "deploy" or name == "boot" or name == "test": action = parse_action( action_data, name, device, pipeline, job.test_info, test_counts[namespace], ) if name == "test" and action.needs_overlay(): test_counts[namespace] += 1 elif name == "repeat": count = action_data[name][ "count" ] # first list entry must be the count dict repeats = action_data[name]["actions"] for c_iter in range(count): for repeating in repeats: # block of YAML to repeat for ( repeat_action ) in repeating: # name of the action for this block repeating[repeat_action]["repeat-count"] = c_iter namespace = repeating[repeat_action].setdefault( "namespace", "common" ) test_counts.setdefault(namespace, 1) action = parse_action( repeating, repeat_action, device, pipeline, job.test_info, test_counts[namespace], ) if repeat_action == "test" and action.needs_overlay(): test_counts[namespace] += 1 elif name == "command": action = CommandAction() action.parameters = action_data[name] pipeline.add_action(action) else: raise JobError("Unknown action name '%s'" % name) # there's always going to need to be a finalize_process action finalize = FinalizeAction() pipeline.add_action(finalize) finalize.populate(None) job.pipeline = pipeline if "compatibility" in data: try: job_c = int(job.compatibility) data_c = int(data["compatibility"]) except ValueError as exc: raise JobError("invalid compatibility value: %s" % exc) if job_c < data_c: raise JobError( "Dispatcher unable to meet job compatibility requirement. %d > %d" % (job_c, data_c) ) return job