def test_overlay_action(self): # pylint: disable=too-many-locals parameters = { 'device_type': 'd02', 'job_name': 'grub-standard-ramdisk', 'job_timeout': '15m', 'action_timeout': '5m', 'priority': 'medium', 'output_dir': mkdtemp(), 'actions': { 'boot': { 'method': 'grub', 'commands': 'ramdisk', 'prompts': ['linaro-test', 'root@debian:~#'] }, 'deploy': { 'ramdisk': 'initrd.gz', 'kernel': 'zImage', 'dtb': 'broken.dtb' } } } device = NewDevice(os.path.join(os.path.dirname(__file__), '../devices/d02-01.yaml')) job = Job(4212, None, None, None, parameters) job.device = device pipeline = Pipeline(job=job, parameters=parameters['actions']['boot']) job.set_pipeline(pipeline) overlay = BootloaderCommandOverlay() pipeline.add_action(overlay) try: ip_addr = dispatcher_ip() except InfrastructureError as exc: raise RuntimeError("Unable to get dispatcher IP address: %s" % exc) parsed = [] kernel = parameters['actions']['deploy']['kernel'] ramdisk = parameters['actions']['deploy']['ramdisk'] dtb = parameters['actions']['deploy']['dtb'] substitution_dictionary = { '{SERVER_IP}': ip_addr, # the addresses need to be hexadecimal '{RAMDISK}': ramdisk, '{KERNEL}': kernel, '{DTB}': dtb } params = device['actions']['boot']['methods'] commands = params['grub']['ramdisk']['commands'] self.assertIn('net_bootp', commands) self.assertIn("linux (tftp,{SERVER_IP})/{KERNEL} console=ttyS0,115200 earlycon=uart8250,mmio32,0x80300000 root=/dev/ram0 ip=dhcp", commands) self.assertIn('initrd (tftp,{SERVER_IP})/{RAMDISK}', commands) self.assertIn('devicetree (tftp,{SERVER_IP})/{DTB}', commands) params['grub']['ramdisk']['commands'] = substitute(params['grub']['ramdisk']['commands'], substitution_dictionary) substituted_commands = params['grub']['ramdisk']['commands'] self.assertIs(type(substituted_commands), list) self.assertIn('net_bootp', substituted_commands) self.assertNotIn("linux (tftp,{SERVER_IP})/{KERNEL} console=ttyS0,115200 earlycon=uart8250,mmio32,0x80300000 root=/dev/ram0 ip=dhcp", substituted_commands) self.assertIn("linux (tftp,%s)/%s console=ttyS0,115200 earlycon=uart8250,mmio32,0x80300000 root=/dev/ram0 ip=dhcp" % (ip_addr, kernel), substituted_commands) self.assertNotIn('initrd (tftp,{SERVER_IP})/{RAMDISK}', parsed) self.assertNotIn('devicetree (tftp,{SERVER_IP})/{DTB}', parsed)
def test_overlay_action(self): # pylint: disable=too-many-locals parameters = { 'device_type': 'd02', 'job_name': 'grub-standard-ramdisk', 'job_timeout': '15m', 'action_timeout': '5m', 'priority': 'medium', 'output_dir': mkdtemp(), 'actions': { 'boot': { 'method': 'grub', 'commands': 'ramdisk', 'prompts': ['linaro-test', 'root@debian:~#'] }, 'deploy': { 'ramdisk': 'initrd.gz', 'kernel': 'zImage', 'dtb': 'broken.dtb' } } } device = NewDevice(os.path.join(os.path.dirname(__file__), '../devices/d02-01.yaml')) job = Job(4212, parameters, None) job.device = device pipeline = Pipeline(job=job, parameters=parameters['actions']['boot']) job.pipeline = pipeline overlay = BootloaderCommandOverlay() pipeline.add_action(overlay) ip_addr = dispatcher_ip(None) parsed = [] kernel = parameters['actions']['deploy']['kernel'] ramdisk = parameters['actions']['deploy']['ramdisk'] dtb = parameters['actions']['deploy']['dtb'] substitution_dictionary = { '{SERVER_IP}': ip_addr, # the addresses need to be hexadecimal '{RAMDISK}': ramdisk, '{KERNEL}': kernel, '{DTB}': dtb } params = device['actions']['boot']['methods'] commands = params['grub']['ramdisk']['commands'] self.assertIn('net_bootp', commands) self.assertIn("linux (tftp,{SERVER_IP})/{KERNEL} console=ttyS0,115200 earlycon=uart8250,mmio32,0x80300000 root=/dev/ram0 ip=dhcp", commands) self.assertIn('initrd (tftp,{SERVER_IP})/{RAMDISK}', commands) self.assertIn('devicetree (tftp,{SERVER_IP})/{DTB}', commands) params['grub']['ramdisk']['commands'] = substitute(params['grub']['ramdisk']['commands'], substitution_dictionary) substituted_commands = params['grub']['ramdisk']['commands'] self.assertIs(type(substituted_commands), list) self.assertIn('net_bootp', substituted_commands) self.assertNotIn("linux (tftp,{SERVER_IP})/{KERNEL} console=ttyS0,115200 earlycon=uart8250,mmio32,0x80300000 root=/dev/ram0 ip=dhcp", substituted_commands) self.assertIn("linux (tftp,%s)/%s console=ttyS0,115200 earlycon=uart8250,mmio32,0x80300000 root=/dev/ram0 ip=dhcp" % (ip_addr, kernel), substituted_commands) self.assertNotIn('initrd (tftp,{SERVER_IP})/{RAMDISK}', parsed) self.assertNotIn('devicetree (tftp,{SERVER_IP})/{DTB}', parsed)
def test_overlay_action(self): # pylint: disable=too-many-locals parameters = { 'device_type': 'x86', 'job_name': 'ipxe-pipeline', 'job_timeout': '15m', 'action_timeout': '5m', 'priority': 'medium', 'output_dir': mkdtemp(), 'actions': { 'boot': { 'method': 'ipxe', 'commands': 'ramdisk', 'prompts': ['linaro-test', 'root@debian:~#'] }, 'deploy': { 'ramdisk': 'initrd.gz', 'kernel': 'zImage', } } } device = NewDevice( os.path.join(os.path.dirname(__file__), '../devices/x86-01.yaml')) job = Job(4212, parameters, None) job.device = device pipeline = Pipeline(job=job, parameters=parameters['actions']['boot']) job.pipeline = pipeline overlay = BootloaderCommandOverlay() pipeline.add_action(overlay) ip_addr = dispatcher_ip(None) kernel = parameters['actions']['deploy']['kernel'] ramdisk = parameters['actions']['deploy']['ramdisk'] substitution_dictionary = { '{SERVER_IP}': ip_addr, '{RAMDISK}': ramdisk, '{KERNEL}': kernel, '{LAVA_MAC}': "00:00:00:00:00:00" } params = device['actions']['boot']['methods'] params['ipxe']['ramdisk']['commands'] = substitute( params['ipxe']['ramdisk']['commands'], substitution_dictionary) commands = params['ipxe']['ramdisk']['commands'] self.assertIs(type(commands), list) self.assertIn("dhcp net0", commands) self.assertIn( "set console console=ttyS0,115200n8 lava_mac=00:00:00:00:00:00", commands) self.assertIn("set extraargs init=/sbin/init ip=dhcp", commands) self.assertNotIn( "kernel tftp://{SERVER_IP}/{KERNEL} ${extraargs} ${console}", commands) self.assertNotIn("initrd tftp://{SERVER_IP}/{RAMDISK}", commands) self.assertIn("boot", commands)
def test_overlay_action(self): # pylint: disable=too-many-locals parameters = { 'device_type': 'x86', 'job_name': 'ipxe-pipeline', 'job_timeout': '15m', 'action_timeout': '5m', 'priority': 'medium', 'output_dir': mkdtemp(), 'actions': { 'boot': { 'method': 'ipxe', 'commands': 'ramdisk', 'prompts': ['linaro-test', 'root@debian:~#'] }, 'deploy': { 'ramdisk': 'initrd.gz', 'kernel': 'zImage', } } } device = NewDevice(os.path.join(os.path.dirname(__file__), '../devices/x86-01.yaml')) job = Job(4212, None, parameters) job.device = device pipeline = Pipeline(job=job, parameters=parameters['actions']['boot']) job.set_pipeline(pipeline) overlay = BootloaderCommandOverlay() pipeline.add_action(overlay) try: ip_addr = dispatcher_ip() except InfrastructureError as exc: raise RuntimeError("Unable to get dispatcher IP address: %s" % exc) parsed = [] kernel = parameters['actions']['deploy']['kernel'] ramdisk = parameters['actions']['deploy']['ramdisk'] substitution_dictionary = { '{SERVER_IP}': ip_addr, '{RAMDISK}': ramdisk, '{KERNEL}': kernel, '{LAVA_MAC}': "00:00:00:00:00:00" } params = device['actions']['boot']['methods'] params['ipxe']['ramdisk']['commands'] = substitute(params['ipxe']['ramdisk']['commands'], substitution_dictionary) commands = params['ipxe']['ramdisk']['commands'] self.assertIs(type(commands), list) self.assertIn("dhcp net0", commands) self.assertIn("set console console=ttyS0,115200n8 lava_mac=00:00:00:00:00:00", commands) self.assertIn("set extraargs init=/sbin/init ip=dhcp", commands) self.assertNotIn("kernel tftp://{SERVER_IP}/{KERNEL} ${extraargs} ${console}", commands) self.assertNotIn("initrd tftp://{SERVER_IP}/{RAMDISK}", commands) self.assertIn("boot", commands)
def test_multi_deploy(self): self.assertIsNotNone(self.parsed_data) job = Job(4212, None, None, None, self.parsed_data) pipeline = Pipeline(job=job) device = TestMultiDeploy.FakeDevice() self.assertIsNotNone(device) job.device = device job.parameters['output_dir'] = mkdtemp() job.set_pipeline(pipeline) counts = {} for action_data in self.parsed_data['actions']: for name in action_data: counts.setdefault(name, 1) parameters = action_data[name] test_deploy = TestMultiDeploy.TestDeploy( pipeline, parameters, job) self.assertEqual({'common': {}}, test_deploy.action.data) counts[name] += 1 # check that only one action has the example set self.assertEqual(['nowhere'], [ detail['deploy']['example'] for detail in self.parsed_data['actions'] if 'example' in detail['deploy'] ]) self.assertEqual(['faked', 'valid'], [ detail['deploy']['parameters'] for detail in self.parsed_data['actions'] if 'parameters' in detail['deploy'] ]) self.assertIsInstance(pipeline.actions[0], TestMultiDeploy.TestDeployAction) self.assertIsInstance(pipeline.actions[1], TestMultiDeploy.TestDeployAction) self.assertIsInstance(pipeline.actions[2], TestMultiDeploy.TestDeployAction) job.validate() self.assertEqual([], job.pipeline.errors) job.run() self.assertNotEqual(pipeline.actions[0].data, { 'common': {}, 'fake_deploy': pipeline.actions[0].parameters }) self.assertEqual(pipeline.actions[1].data, { 'common': {}, 'fake_deploy': pipeline.actions[2].parameters }) # check that values from previous DeployAction run actions have been cleared self.assertEqual(pipeline.actions[2].data, { 'common': {}, 'fake_deploy': pipeline.actions[2].parameters })
def parse(self, content, device, output_dir=None): self.loader = yaml.Loader(content) self.loader.compose_node = self.compose_node self.loader.construct_mapping = self.construct_mapping data = self.loader.get_single_data() job = Job(data) job.device = device job.parameters['output_dir'] = output_dir pipeline = Pipeline(job=job) for action_data in data['actions']: line = action_data.pop('yaml_line', None) for name in action_data: if name == "deploy": # allow the classmethod to check the parameters deploy = Deployment.select(device, action_data[name])(pipeline) deploy.action.parameters = action_data[name] # still need to pass the parameters to the instance if 'test' in data['actions']: deploy.action.parameters = action_data['test'] deploy.action.yaml_line = line device.deployment_data = deployment_data.get(deploy.action.parameters['os']) deploy.action.parameters = {'deployment_data': device.deployment_data} else: action_class = Action.find(name) # select the specific action of this class for this job action = action_class() # put parameters (like rootfs_type, results_dir) into the actions. if type(action_data[name]) == dict: action.parameters = action_data[name] elif name == "commands": # FIXME pass elif type(action_data[name]) == list: for param in action_data[name]: action.parameters = param action.summary = name pipeline.add_action(action) # uncomment for debug # print action.parameters # the only parameters sent to the job are job parameters # like job_name, logging_level or target_group. data.pop('actions') data['output_dir'] = output_dir job.set_pipeline(pipeline) return job
def test_multi_deploy(self): self.assertIsNotNone(self.parsed_data) job = Job(4212, None, self.parsed_data) pipeline = Pipeline(job=job) device = TestMultiDeploy.FakeDevice() self.assertIsNotNone(device) job.device = device job.parameters['output_dir'] = mkdtemp() job.set_pipeline(pipeline) counts = {} for action_data in self.parsed_data['actions']: for name in action_data: counts.setdefault(name, 1) if counts[name] >= 2: reset_context = ResetContext() reset_context.section = 'deploy' pipeline.add_action(reset_context) parameters = action_data[name] test_deploy = TestMultiDeploy.TestDeploy(pipeline, parameters, job) self.assertEqual( {'common': {}}, test_deploy.action.data ) counts[name] += 1 # check that only one action has the example set self.assertEqual( ['nowhere'], [detail['deploy']['example'] for detail in self.parsed_data['actions'] if 'example' in detail['deploy']] ) self.assertEqual( ['faked', 'valid'], [detail['deploy']['parameters'] for detail in self.parsed_data['actions'] if 'parameters' in detail['deploy']] ) self.assertIsInstance(pipeline.actions[0], TestMultiDeploy.TestDeployAction) self.assertIsInstance(pipeline.actions[1], ResetContext) self.assertIsInstance(pipeline.actions[2], TestMultiDeploy.TestDeployAction) self.assertIsInstance(pipeline.actions[3], ResetContext) self.assertIsInstance(pipeline.actions[4], TestMultiDeploy.TestDeployAction) job.validate() self.assertEqual([], job.pipeline.errors) job.run() self.assertNotEqual(pipeline.actions[0].data, {'common': {}, 'fake_deploy': pipeline.actions[0].parameters}) self.assertNotEqual(pipeline.actions[1].data, {'common': {}, 'fake_deploy': pipeline.actions[1].parameters}) self.assertEqual(pipeline.actions[2].data, {'common': {}, 'fake_deploy': pipeline.actions[4].parameters}) # check that values from previous DeployAction run actions have been cleared self.assertEqual(pipeline.actions[4].data, {'common': {}, 'fake_deploy': pipeline.actions[4].parameters})
def parse(self, content, device, job_id, zmq_config, dispatcher_config, output_dir=None, env_dut=None): self.loader = yaml.Loader(content) self.loader.compose_node = self.compose_node self.loader.construct_mapping = self.construct_mapping data = self.loader.get_single_data() job = Job(job_id, data, zmq_config) test_counts = {} job.device = device job.parameters['output_dir'] = output_dir job.parameters['env_dut'] = env_dut # Load the dispatcher config job.parameters['dispatcher'] = {} if dispatcher_config is not None: config = yaml.load(dispatcher_config) if isinstance(config, dict): job.parameters['dispatcher'] = config # Setup the logging now that we have the parameters job.setup_logging() level_tuple = Protocol.select_all(job.parameters) # sort the list of protocol objects by the protocol class level. job.protocols = [ item[0](job.parameters, job_id) for item in sorted(level_tuple, key=lambda level_tuple: level_tuple[1]) ] pipeline = Pipeline(job=job) self._timeouts(data, job) # deploy and boot classes can populate the pipeline differently depending # on the test action type they are linked with (via namespacing). # This code builds an information dict for each namespace which is then # passed as a parameter to each Action class to use. test_info = {} test_actions = ([ action for action in data['actions'] if 'test' in action ]) for test_action in test_actions: test_parameters = test_action['test'] test_type = LavaTest.select(device, test_parameters) namespace = test_parameters.get('namespace', 'common') if namespace in test_info: test_info[namespace].append({ 'class': test_type, 'parameters': test_parameters }) else: test_info.update({ namespace: [{ 'class': test_type, 'parameters': test_parameters }] }) # FIXME: also read permissable overrides from device config and set from job data # FIXME: ensure that a timeout for deployment 0 does not get set as the timeout for deployment 1 if 1 is default for action_data in data['actions']: action_data.pop('yaml_line', None) for name in action_data: # Set a default namespace if needed namespace = action_data[name].setdefault('namespace', 'common') test_counts.setdefault(namespace, 1) if name == 'deploy' or name == 'boot' or name == 'test': parse_action(action_data, name, device, pipeline, test_info, test_counts[namespace]) if name == 'test': test_counts[namespace] += 1 elif name == 'repeat': count = action_data[name][ 'count'] # first list entry must be the count dict repeats = action_data[name]['actions'] for c_iter in range(count): for repeating in repeats: # block of YAML to repeat for repeat_action in repeating: # name of the action for this block if repeat_action == 'yaml_line': continue repeating[repeat_action][ 'repeat-count'] = c_iter namespace = repeating[ repeat_action].setdefault( 'namespace', 'common') test_counts.setdefault(namespace, 1) parse_action(repeating, repeat_action, device, pipeline, test_info, test_counts[namespace]) if repeat_action == 'test': test_counts[namespace] += 1 elif name == 'command': action = CommandAction() action.parameters = action_data[name] pipeline.add_action(action) else: raise JobError("Unknown action name '%'" % name) # there's always going to need to be a finalize_process action finalize = FinalizeAction() pipeline.add_action(finalize) finalize.populate(None) data['output_dir'] = output_dir job.pipeline = pipeline if 'compatibility' in data: try: job_c = int(job.compatibility) data_c = int(data['compatibility']) except ValueError as exc: raise JobError('invalid compatibility value: %s' % exc) if job_c < data_c: raise JobError( 'Dispatcher unable to meet job compatibility requirement. %d > %d' % (job_c, data_c)) return job
def test_overlay_action(self): # pylint: disable=too-many-locals parameters = { 'device_type': 'beaglebone-black', 'job_name': 'uboot-pipeline', 'job_timeout': '15m', 'action_timeout': '5m', 'priority': 'medium', 'output_dir': mkdtemp(), 'actions': { 'boot': { 'method': 'u-boot', 'commands': 'ramdisk', 'type': 'bootz', 'prompts': ['linaro-test', 'root@debian:~#'] }, 'deploy': { 'ramdisk': 'initrd.gz', 'kernel': 'zImage', 'dtb': 'broken.dtb' } } } device = NewDevice(os.path.join(os.path.dirname(__file__), '../devices/bbb-01.yaml')) job = Job(4212, None, parameters) job.device = device pipeline = Pipeline(job=job, parameters=parameters['actions']['boot']) job.set_pipeline(pipeline) overlay = UBootCommandOverlay() pipeline.add_action(overlay) try: ip_addr = dispatcher_ip() except InfrastructureError as exc: raise RuntimeError("Unable to get dispatcher IP address: %s" % exc) parsed = [] kernel_addr = job.device['parameters'][overlay.parameters['type']]['ramdisk'] ramdisk_addr = job.device['parameters'][overlay.parameters['type']]['ramdisk'] dtb_addr = job.device['parameters'][overlay.parameters['type']]['dtb'] kernel = parameters['actions']['deploy']['kernel'] ramdisk = parameters['actions']['deploy']['ramdisk'] dtb = parameters['actions']['deploy']['dtb'] substitution_dictionary = { '{SERVER_IP}': ip_addr, # the addresses need to be hexadecimal '{KERNEL_ADDR}': kernel_addr, '{DTB_ADDR}': dtb_addr, '{RAMDISK_ADDR}': ramdisk_addr, '{BOOTX}': "%s %s %s %s" % ( overlay.parameters['type'], kernel_addr, ramdisk_addr, dtb_addr), '{RAMDISK}': ramdisk, '{KERNEL}': kernel, '{DTB}': dtb } params = device['actions']['boot']['methods'] params['u-boot']['ramdisk']['commands'] = substitute(params['u-boot']['ramdisk']['commands'], substitution_dictionary) commands = params['u-boot']['ramdisk']['commands'] self.assertIs(type(commands), list) self.assertIn("setenv loadkernel 'tftp ${kernel_addr_r} zImage'", commands) self.assertIn("setenv loadinitrd 'tftp ${initrd_addr_r} initrd.gz; setenv initrd_size ${filesize}'", commands) self.assertIn("setenv loadfdt 'tftp ${fdt_addr_r} broken.dtb'", commands) self.assertNotIn("setenv kernel_addr_r '{KERNEL_ADDR}'", commands) self.assertNotIn("setenv initrd_addr_r '{RAMDISK_ADDR}'", commands) self.assertNotIn("setenv fdt_addr_r '{DTB_ADDR}'", commands) for line in params['u-boot']['ramdisk']['commands']: line = line.replace('{SERVER_IP}', ip_addr) # the addresses need to be hexadecimal line = line.replace('{KERNEL_ADDR}', kernel_addr) line = line.replace('{DTB_ADDR}', dtb_addr) line = line.replace('{RAMDISK_ADDR}', ramdisk_addr) line = line.replace('{BOOTX}', "%s %s %s %s" % ( overlay.parameters['type'], kernel_addr, ramdisk_addr, dtb_addr)) line = line.replace('{RAMDISK}', ramdisk) line = line.replace('{KERNEL}', kernel) line = line.replace('{DTB}', dtb) parsed.append(line) self.assertIn("setenv loadkernel 'tftp ${kernel_addr_r} zImage'", parsed) self.assertIn("setenv loadinitrd 'tftp ${initrd_addr_r} initrd.gz; setenv initrd_size ${filesize}'", parsed) self.assertIn("setenv loadfdt 'tftp ${fdt_addr_r} broken.dtb'", parsed) self.assertNotIn("setenv kernel_addr_r '{KERNEL_ADDR}'", parsed) self.assertNotIn("setenv initrd_addr_r '{RAMDISK_ADDR}'", parsed) self.assertNotIn("setenv fdt_addr_r '{DTB_ADDR}'", parsed)
def test_overlay_action(self): # pylint: disable=too-many-locals parameters = { 'device_type': 'beaglebone-black', 'job_name': 'uboot-pipeline', 'job_timeout': '15m', 'action_timeout': '5m', 'priority': 'medium', 'output_dir': mkdtemp(), 'actions': { 'boot': { 'method': 'u-boot', 'commands': 'ramdisk', 'type': 'bootz', 'prompts': ['linaro-test', 'root@debian:~#'] }, 'deploy': { 'ramdisk': 'initrd.gz', 'kernel': 'zImage', 'dtb': 'broken.dtb' } } } device = NewDevice( os.path.join(os.path.dirname(__file__), '../devices/bbb-01.yaml')) job = Job(4212, parameters, None) job.device = device pipeline = Pipeline(job=job, parameters=parameters['actions']['boot']) job.pipeline = pipeline overlay = BootloaderCommandOverlay() pipeline.add_action(overlay) ip_addr = dispatcher_ip(None) parsed = [] kernel_addr = job.device['parameters'][ overlay.parameters['type']]['ramdisk'] ramdisk_addr = job.device['parameters'][ overlay.parameters['type']]['ramdisk'] dtb_addr = job.device['parameters'][overlay.parameters['type']]['dtb'] kernel = parameters['actions']['deploy']['kernel'] ramdisk = parameters['actions']['deploy']['ramdisk'] dtb = parameters['actions']['deploy']['dtb'] substitution_dictionary = { '{SERVER_IP}': ip_addr, # the addresses need to be hexadecimal '{KERNEL_ADDR}': kernel_addr, '{DTB_ADDR}': dtb_addr, '{RAMDISK_ADDR}': ramdisk_addr, '{BOOTX}': "%s %s %s %s" % (overlay.parameters['type'], kernel_addr, ramdisk_addr, dtb_addr), '{RAMDISK}': ramdisk, '{KERNEL}': kernel, '{DTB}': dtb } params = device['actions']['boot']['methods'] params['u-boot']['ramdisk']['commands'] = substitute( params['u-boot']['ramdisk']['commands'], substitution_dictionary) commands = params['u-boot']['ramdisk']['commands'] self.assertIs(type(commands), list) self.assertIn("setenv loadkernel 'tftp ${kernel_addr_r} zImage'", commands) self.assertIn( "setenv loadinitrd 'tftp ${initrd_addr_r} initrd.gz; setenv initrd_size ${filesize}'", commands) self.assertIn("setenv loadfdt 'tftp ${fdt_addr_r} broken.dtb'", commands) self.assertNotIn("setenv kernel_addr_r '{KERNEL_ADDR}'", commands) self.assertNotIn("setenv initrd_addr_r '{RAMDISK_ADDR}'", commands) self.assertNotIn("setenv fdt_addr_r '{DTB_ADDR}'", commands) for line in params['u-boot']['ramdisk']['commands']: line = line.replace('{SERVER_IP}', ip_addr) # the addresses need to be hexadecimal line = line.replace('{KERNEL_ADDR}', kernel_addr) line = line.replace('{DTB_ADDR}', dtb_addr) line = line.replace('{RAMDISK_ADDR}', ramdisk_addr) line = line.replace( '{BOOTX}', "%s %s %s %s" % (overlay.parameters['type'], kernel_addr, ramdisk_addr, dtb_addr)) line = line.replace('{RAMDISK}', ramdisk) line = line.replace('{KERNEL}', kernel) line = line.replace('{DTB}', dtb) parsed.append(line) self.assertIn("setenv loadkernel 'tftp ${kernel_addr_r} zImage'", parsed) self.assertIn( "setenv loadinitrd 'tftp ${initrd_addr_r} initrd.gz; setenv initrd_size ${filesize}'", parsed) self.assertIn("setenv loadfdt 'tftp ${fdt_addr_r} broken.dtb'", parsed) self.assertNotIn("setenv kernel_addr_r '{KERNEL_ADDR}'", parsed) self.assertNotIn("setenv initrd_addr_r '{RAMDISK_ADDR}'", parsed) self.assertNotIn("setenv fdt_addr_r '{DTB_ADDR}'", parsed)
def parse(self, content, device, job_id, socket_addr, output_dir=None, env_dut=None): self.loader = yaml.Loader(content) self.loader.compose_node = self.compose_node self.loader.construct_mapping = self.construct_mapping data = self.loader.get_single_data() self.context['default_action_duration'] = Timeout.default_duration() self.context['default_test_duration'] = Timeout.default_duration() job = Job(job_id, socket_addr, data) counts = {} job.device = device job.parameters['output_dir'] = output_dir job.parameters['env_dut'] = env_dut job.parameters['target'] = device.target for instance in Protocol.select_all(job.parameters): job.protocols.append(instance(job.parameters)) pipeline = Pipeline(job=job) self._timeouts(data, job) # FIXME: also read permissable overrides from device config and set from job data # FIXME: ensure that a timeout for deployment 0 does not get set as the timeout for deployment 1 if 1 is default for action_data in data['actions']: action_data.pop('yaml_line', None) for name in action_data: if type( action_data[name] ) is dict: # FIXME: commands are not fully implemented & may produce a list action_data[name]['default_action_timeout'] = self.context[ 'default_action_duration'] action_data[name]['default_test_timeout'] = self.context[ 'default_test_duration'] counts.setdefault(name, 1) if name == 'deploy' or name == 'boot' or name == 'test': # reset the context before adding a second deployment and again before third etc. if name == 'deploy' and counts[name] >= 2: reset_context = ResetContext() reset_context.section = name pipeline.add_action(reset_context) parse_action(action_data, name, device, pipeline) elif name == 'repeat': count = action_data[name][ 'count'] # first list entry must be the count dict repeats = action_data[name]['actions'] for c_iter in xrange(count): for repeating in repeats: # block of YAML to repeat for repeat_action in repeating: # name of the action for this block if repeat_action == 'yaml_line': continue repeating[repeat_action][ 'repeat-count'] = c_iter parse_action(repeating, repeat_action, device, pipeline) else: # May only end up being used for submit as other actions all need strategy method objects # select the specific action of this class for this job action = Action.select(name)() action.job = job # put parameters (like rootfs_type, results_dir) into the actions. if type(action_data[name]) == dict: action.parameters = action_data[name] elif name == "commands": # FIXME pass elif type(action_data[name]) == list: for param in action_data[name]: action.parameters = param action.summary = name action.timeout = Timeout( action.name, self.context['default_action_duration']) pipeline.add_action(action) counts[name] += 1 # there's always going to need to be a finalize_process action pipeline.add_action(FinalizeAction()) data['output_dir'] = output_dir job.set_pipeline(pipeline) return job
def parse(self, content, device, job_id, socket_addr, output_dir=None, env_dut=None): self.loader = yaml.Loader(content) self.loader.compose_node = self.compose_node self.loader.construct_mapping = self.construct_mapping data = self.loader.get_single_data() self.context['default_action_duration'] = Timeout.default_duration() self.context['default_test_duration'] = Timeout.default_duration() job = Job(job_id, socket_addr, data) counts = {} job.device = device job.parameters['output_dir'] = output_dir job.parameters['env_dut'] = env_dut job.parameters['target'] = device.target level_tuple = Protocol.select_all(job.parameters) # sort the list of protocol objects by the protocol class level. job.protocols = [item[0](job.parameters) for item in sorted(level_tuple, key=lambda level_tuple: level_tuple[1])] pipeline = Pipeline(job=job) self._timeouts(data, job) # FIXME: also read permissable overrides from device config and set from job data # FIXME: ensure that a timeout for deployment 0 does not get set as the timeout for deployment 1 if 1 is default for action_data in data['actions']: action_data.pop('yaml_line', None) for name in action_data: if type(action_data[name]) is dict: # FIXME: commands are not fully implemented & may produce a list action_data[name]['default_action_timeout'] = self.context['default_action_duration'] action_data[name]['default_test_timeout'] = self.context['default_test_duration'] counts.setdefault(name, 1) if name == 'deploy' or name == 'boot' or name == 'test': # reset the context before adding a second deployment and again before third etc. if name == 'deploy' and counts[name] >= 2: reset_context = ResetContext() reset_context.section = name pipeline.add_action(reset_context) parse_action(action_data, name, device, pipeline) elif name == 'repeat': count = action_data[name]['count'] # first list entry must be the count dict repeats = action_data[name]['actions'] for c_iter in xrange(count): for repeating in repeats: # block of YAML to repeat for repeat_action in repeating: # name of the action for this block if repeat_action == 'yaml_line': continue repeating[repeat_action]['repeat-count'] = c_iter parse_action(repeating, repeat_action, device, pipeline) else: # May only end up being used for submit as other actions all need strategy method objects # select the specific action of this class for this job action = Action.select(name)() action.job = job # put parameters (like rootfs_type, results_dir) into the actions. if type(action_data[name]) == dict: action.parameters = action_data[name] elif name == "commands": # FIXME pass elif type(action_data[name]) == list: for param in action_data[name]: action.parameters = param action.summary = name action.timeout = Timeout(action.name, self.context['default_action_duration']) pipeline.add_action(action) counts[name] += 1 # there's always going to need to be a finalize_process action pipeline.add_action(FinalizeAction()) data['output_dir'] = output_dir job.set_pipeline(pipeline) return job
def parse(self, content, device, job_id, socket_addr, master_cert, slave_cert, output_dir=None, env_dut=None): self.loader = yaml.Loader(content) self.loader.compose_node = self.compose_node self.loader.construct_mapping = self.construct_mapping data = self.loader.get_single_data() self.context['default_action_duration'] = Timeout.default_duration() self.context['default_test_duration'] = Timeout.default_duration() self.context['default_connection_duration'] = Timeout.default_duration( ) job = Job(job_id, socket_addr, master_cert, slave_cert, data) counts = {} job.device = device job.parameters['output_dir'] = output_dir job.parameters['env_dut'] = env_dut job.parameters['target'] = device.target level_tuple = Protocol.select_all(job.parameters) # sort the list of protocol objects by the protocol class level. job.protocols = [ item[0](job.parameters, job_id) for item in sorted(level_tuple, key=lambda level_tuple: level_tuple[1]) ] pipeline = Pipeline(job=job) self._timeouts(data, job) # some special handling is needed to tell the overlay classes about the presence or absence of a test action test_action = True test_list = [action for action in data['actions'] if 'test' in action] if test_list and 'test' not in test_list[0]: test_action = False # FIXME: also read permissable overrides from device config and set from job data # FIXME: ensure that a timeout for deployment 0 does not get set as the timeout for deployment 1 if 1 is default for action_data in data['actions']: action_data.pop('yaml_line', None) for name in action_data: if isinstance( action_data[name], dict ): # FIXME: commands are not fully implemented & may produce a list action_data[name].update(self._map_context_defaults()) counts.setdefault(name, 1) if name == 'deploy' or name == 'boot' or name == 'test': parse_action(action_data, name, device, pipeline, test_action) elif name == 'repeat': count = action_data[name][ 'count'] # first list entry must be the count dict repeats = action_data[name]['actions'] for c_iter in range(count): for repeating in repeats: # block of YAML to repeat for repeat_action in repeating: # name of the action for this block if repeat_action == 'yaml_line': continue repeating[repeat_action][ 'repeat-count'] = c_iter parse_action(repeating, repeat_action, device, pipeline, test_action) else: # May only end up being used for submit as other actions all need strategy method objects # select the specific action of this class for this job action = Action.select(name)() action.job = job # put parameters (like rootfs_type, results_dir) into the actions. if isinstance(action_data[name], dict): action.parameters = action_data[name] elif name == "commands": # FIXME pass elif isinstance(action_data[name], list): for param in action_data[name]: action.parameters = param action.summary = name action.timeout = Timeout( action.name, self.context['default_action_duration']) action.connection_timeout = Timeout( action.name, self.context['default_connection_duration']) pipeline.add_action(action) counts[name] += 1 # there's always going to need to be a finalize_process action finalize = FinalizeAction() pipeline.add_action(finalize) finalize.populate(self._map_context_defaults()) data['output_dir'] = output_dir job.set_pipeline(pipeline) if 'compatibility' in data: try: job_c = int(job.compatibility) data_c = int(data['compatibility']) except ValueError as exc: raise JobError('invalid compatibility value: %s' % exc) if job_c < data_c: raise JobError( 'Dispatcher unable to meet job compatibility requirement. %d > %d' % (job_c, data_c)) return job
def test_overlay_action(self): # pylint: disable=too-many-locals parameters = { "device_type": "beaglebone-black", "job_name": "uboot-pipeline", "job_timeout": "15m", "action_timeout": "5m", "priority": "medium", "output_dir": mkdtemp(), "actions": { "boot": {"method": "u-boot", "commands": "ramdisk", "type": "bootz"}, "deploy": {"ramdisk": "initrd.gz", "kernel": "zImage", "dtb": "broken.dtb"}, }, } device = NewDevice(os.path.join(os.path.dirname(__file__), "../devices/bbb-01.yaml")) job = Job(4212, None, parameters) job.device = device pipeline = Pipeline(job=job, parameters=parameters["actions"]["boot"]) job.set_pipeline(pipeline) overlay = UBootCommandOverlay() pipeline.add_action(overlay) try: ip_addr = dispatcher_ip() except InfrastructureError as exc: raise RuntimeError("Unable to get dispatcher IP address: %s" % exc) parsed = [] kernel_addr = job.device["parameters"][overlay.parameters["type"]]["ramdisk"] ramdisk_addr = job.device["parameters"][overlay.parameters["type"]]["ramdisk"] dtb_addr = job.device["parameters"][overlay.parameters["type"]]["dtb"] kernel = parameters["actions"]["deploy"]["kernel"] ramdisk = parameters["actions"]["deploy"]["ramdisk"] dtb = parameters["actions"]["deploy"]["dtb"] substitution_dictionary = { "{SERVER_IP}": ip_addr, # the addresses need to be hexadecimal "{KERNEL_ADDR}": kernel_addr, "{DTB_ADDR}": dtb_addr, "{RAMDISK_ADDR}": ramdisk_addr, "{BOOTX}": "%s %s %s %s" % (overlay.parameters["type"], kernel_addr, ramdisk_addr, dtb_addr), "{RAMDISK}": ramdisk, "{KERNEL}": kernel, "{DTB}": dtb, } params = device["actions"]["boot"]["methods"] params["u-boot"]["ramdisk"]["commands"] = substitute( params["u-boot"]["ramdisk"]["commands"], substitution_dictionary ) commands = params["u-boot"]["ramdisk"]["commands"] self.assertIs(type(commands), list) self.assertIn("setenv loadkernel 'tftp ${kernel_addr_r} zImage'", commands) self.assertIn("setenv loadinitrd 'tftp ${initrd_addr_r} initrd.gz; setenv initrd_size ${filesize}'", commands) self.assertIn("setenv loadfdt 'tftp ${fdt_addr_r} broken.dtb'", commands) self.assertNotIn("setenv kernel_addr_r '{KERNEL_ADDR}'", commands) self.assertNotIn("setenv initrd_addr_r '{RAMDISK_ADDR}'", commands) self.assertNotIn("setenv fdt_addr_r '{DTB_ADDR}'", commands) for line in params["u-boot"]["ramdisk"]["commands"]: line = line.replace("{SERVER_IP}", ip_addr) # the addresses need to be hexadecimal line = line.replace("{KERNEL_ADDR}", kernel_addr) line = line.replace("{DTB_ADDR}", dtb_addr) line = line.replace("{RAMDISK_ADDR}", ramdisk_addr) line = line.replace( "{BOOTX}", "%s %s %s %s" % (overlay.parameters["type"], kernel_addr, ramdisk_addr, dtb_addr) ) line = line.replace("{RAMDISK}", ramdisk) line = line.replace("{KERNEL}", kernel) line = line.replace("{DTB}", dtb) parsed.append(line) self.assertIn("setenv loadkernel 'tftp ${kernel_addr_r} zImage'", parsed) self.assertIn("setenv loadinitrd 'tftp ${initrd_addr_r} initrd.gz; setenv initrd_size ${filesize}'", parsed) self.assertIn("setenv loadfdt 'tftp ${fdt_addr_r} broken.dtb'", parsed) self.assertNotIn("setenv kernel_addr_r '{KERNEL_ADDR}'", parsed) self.assertNotIn("setenv initrd_addr_r '{RAMDISK_ADDR}'", parsed) self.assertNotIn("setenv fdt_addr_r '{DTB_ADDR}'", parsed)
def parse(self, content, device, job_id, socket_addr, output_dir=None, env_dut=None): self.loader = yaml.Loader(content) self.loader.compose_node = self.compose_node self.loader.construct_mapping = self.construct_mapping data = self.loader.get_single_data() self.context['default_action_duration'] = Timeout.default_duration() self.context['default_test_duration'] = Timeout.default_duration() self.context['default_connection_duration'] = Timeout.default_duration() job = Job(job_id, socket_addr, data) counts = {} job.device = device job.parameters['output_dir'] = output_dir job.parameters['env_dut'] = env_dut job.parameters['target'] = device.target level_tuple = Protocol.select_all(job.parameters) # sort the list of protocol objects by the protocol class level. job.protocols = [item[0](job.parameters) for item in sorted(level_tuple, key=lambda level_tuple: level_tuple[1])] pipeline = Pipeline(job=job) self._timeouts(data, job) # FIXME: also read permissable overrides from device config and set from job data # FIXME: ensure that a timeout for deployment 0 does not get set as the timeout for deployment 1 if 1 is default for action_data in data['actions']: action_data.pop('yaml_line', None) for name in action_data: if type(action_data[name]) is dict: # FIXME: commands are not fully implemented & may produce a list action_data[name].update(self._map_context_defaults()) counts.setdefault(name, 1) if name == 'deploy' or name == 'boot' or name == 'test': parse_action(action_data, name, device, pipeline) elif name == 'repeat': count = action_data[name]['count'] # first list entry must be the count dict repeats = action_data[name]['actions'] for c_iter in xrange(count): for repeating in repeats: # block of YAML to repeat for repeat_action in repeating: # name of the action for this block if repeat_action == 'yaml_line': continue repeating[repeat_action]['repeat-count'] = c_iter parse_action(repeating, repeat_action, device, pipeline) else: # May only end up being used for submit as other actions all need strategy method objects # select the specific action of this class for this job action = Action.select(name)() action.job = job # put parameters (like rootfs_type, results_dir) into the actions. if type(action_data[name]) == dict: action.parameters = action_data[name] elif name == "commands": # FIXME pass elif type(action_data[name]) == list: for param in action_data[name]: action.parameters = param action.summary = name action.timeout = Timeout(action.name, self.context['default_action_duration']) action.connection_timeout = Timeout(action.name, self.context['default_connection_duration']) pipeline.add_action(action) counts[name] += 1 # there's always going to need to be a finalize_process action finalize = FinalizeAction() pipeline.add_action(finalize) finalize.populate(self._map_context_defaults()) data['output_dir'] = output_dir job.set_pipeline(pipeline) logger = logging.getLogger('dispatcher') logger.warn("pipeline contains %x", pipeline) if 'compatibility' in data: try: job_c = int(job.compatibility) data_c = int(data['compatibility']) except ValueError as exc: raise JobError('invalid compatibility value: %s' % exc) if job_c < data_c: raise JobError('Dispatcher unable to meet job compatibility requirement. %d > %d' % (job_c, data_c)) return job
def parse(self, content, device, output_dir=None): self.loader = yaml.Loader(content) self.loader.compose_node = self.compose_node self.loader.construct_mapping = self.construct_mapping data = self.loader.get_single_data() job = Job(data) job.device = device job.parameters['output_dir'] = output_dir pipeline = Pipeline(job=job) for action_data in data['actions']: line = action_data.pop('yaml_line', None) for name in action_data: if name == "deploy": # allow the classmethod to check the parameters deploy = Deployment.select(device, action_data[name])(pipeline) deploy.action.parameters = action_data[ name] # still need to pass the parameters to the instance if 'test' in data['actions']: deploy.action.parameters = action_data['test'] deploy.action.yaml_line = line device.deployment_data = deployment_data.get( deploy.action.parameters['os']) deploy.action.parameters = { 'deployment_data': device.deployment_data } elif name == "boot": boot = Boot.select(device, action_data[name])(pipeline) boot.action.parameters = action_data[name] boot.action.yaml_line = line # elif name == "test": # lavatest = LavaTest.select(device, action_data[name])(pipeline) # lavatest.action.parameters = action_data[name] # lavatest.action.yaml_line = line else: # May only end up being used for submit as other actions all need strategy method objects # select the specific action of this class for this job action = Action.find(name)() # put parameters (like rootfs_type, results_dir) into the actions. if type(action_data[name]) == dict: action.parameters = action_data[name] elif name == "commands": # FIXME pass elif type(action_data[name]) == list: for param in action_data[name]: action.parameters = param action.summary = name pipeline.add_action(action) # uncomment for debug # print action.parameters # there's always going to need to be a finalize_process action pipeline.add_action(FinalizeAction()) # the only parameters sent to the job are job parameters # like job_name, logging_level or target_group. data.pop('actions') data['output_dir'] = output_dir job.set_pipeline(pipeline) return job