def create_custom_job(self, template, job_data): job_ctx = job_data.get('context') (data, device_dict) = self.create_device(template, job_ctx) device = NewDevice(yaml.safe_load(data)) if self.debug: print('####### Device configuration #######') print(data) print('#######') try: parser = JobParser() job = parser.parse(yaml.dump(job_data), device, 4999, None, "") except (ConfigurationError, TypeError) as exc: print('####### Parser exception ########') print(device) print('#######') raise ConfigurationError("Invalid device: %s" % exc) job.logger = DummyLogger() return job
def test_device_health_looping(self): # Make sure that get_health_check does return something Device.get_health_check = _minimal_valid_job self.assertNotEqual(self.device01.get_health_check(), None) self.assertNotEqual(self.device02.get_health_check(), None) self.assertNotEqual(self.device03.get_health_check(), None) self.device01.health = Device.HEALTH_LOOPING self.device01.save() self.device02.health = Device.HEALTH_LOOPING self.device02.save() self.device03.health = Device.HEALTH_LOOPING self.device03.save() available_devices = schedule_health_checks(DummyLogger())[0] self.assertEquals(available_devices, {"dt-01": []}) self._check_hc_scheduled(self.device01) self._check_hc_not_scheduled(self.device02) self._check_hc_scheduled(self.device03)
def test_missing_handler(self): device = NewDevice( os.path.join(os.path.dirname(__file__), '../devices/kvm01.yaml')) kvm_yaml = os.path.join(os.path.dirname(__file__), 'sample_jobs/kvm.yaml') parser = JobParser() with open(kvm_yaml) as sample_job_data: data = yaml.load(sample_job_data) data['actions'][2]['test']['definitions'][0][ 'from'] = 'unusable-handler' try: job = parser.parse(yaml.dump(data), device, 4212, None, "") job.logger = DummyLogger() except JobError: pass except Exception as exc: # pylint: disable=broad-except self.fail(exc) else: self.fail('JobError not raised')
def test_uimage_boot_type(self): # uimage in boot type del self.deploy_block['kernel']['type'] self.boot_block['type'] = 'bootm' job = self.parser.parse(yaml.dump(self.base_data), self.device, 4212, None, "") job.logger = DummyLogger() job.validate() deploy = [ action for action in job.pipeline.actions if action.name == 'tftp-deploy' ][0] overlay = [ action for action in deploy.internal_pipeline.actions if action.name == 'prepare-tftp-overlay' ][0] self.assertNotIn( 'uboot-prepare-kernel', [action.name for action in overlay.internal_pipeline.actions])
def test_device_health_wrong(self): # Make sure that get_health_check does return something Device.get_health_check = _minimal_valid_job self.assertNotEqual(self.device01.get_health_check(), None) self.assertNotEqual(self.device02.get_health_check(), None) self.assertNotEqual(self.device03.get_health_check(), None) # HEALTH_(BAD|MAINTENANCE|RETIRED) for health in [Device.HEALTH_BAD, Device.HEALTH_MAINTENANCE, Device.HEALTH_RETIRED]: self.device01.health = health self.device01.save() self.device02.health = health self.device02.save() self.device03.health = health self.device03.save() available_devices = schedule_health_checks(DummyLogger())[0] self.assertEquals(available_devices, {"dt-01": []}) self._check_hc_not_scheduled(self.device01) self._check_hc_not_scheduled(self.device02) self._check_hc_not_scheduled(self.device03)
def test_device_environment(self): data = """ # YAML syntax. overrides: DEBEMAIL: "*****@*****.**" DEBFULLNAME: "Neil Williams" """ factory = Factory() job_parser = JobParser() (rendered, _) = factory.create_device('bbb-01.jinja2') device = NewDevice(yaml.load(rendered)) sample_job_file = os.path.join(os.path.dirname(__file__), 'sample_jobs/uboot-ramdisk.yaml') with open(sample_job_file) as sample_job_data: job = job_parser.parse(sample_job_data, device, 4212, None, "", env_dut=data) job.logger = DummyLogger() self.assertEqual(job.parameters['env_dut'], data) job.validate() boot_actions = [ action.internal_pipeline.actions for action in job.pipeline.actions if action.name == 'uboot-action' ][0] retry = [ action for action in boot_actions if action.name == 'uboot-retry' ][0] boot_env = [ action for action in retry.internal_pipeline.actions if action.name == 'export-device-env' ][0] found = False for line in boot_env.env: if 'DEBFULLNAME' in line: found = True # assert that the string containing a space still contains that space and is quoted self.assertIn('\\\'Neil Williams\\\'', line) self.assertTrue(found)
def test_low_medium_high_with_hc(self): # Enable health checks self.device_type01.health_denominator = DeviceType.HEALTH_PER_HOUR self.device_type01.health_frequency = 24 self.device_type01.save() Device.get_health_check = _minimal_valid_job self.assertNotEqual(self.device01.get_health_check(), None) jobs = [] for p in [TestJob.LOW, TestJob.MEDIUM, TestJob.HIGH, TestJob.MEDIUM, TestJob.LOW]: j = TestJob.objects.create(requested_device_type=self.device_type01, user=self.user, submitter=self.user, is_public=True, definition=_minimal_valid_job(None), priority=p) jobs.append(j) # Check that an health check will be scheduled before any jobs log = DummyLogger() schedule(log) self.device01.refresh_from_db() self.assertEqual(self.device01.state, Device.STATE_RESERVED) self._check_job(jobs[0], TestJob.STATE_SUBMITTED) self._check_job(jobs[1], TestJob.STATE_SUBMITTED) self._check_job(jobs[2], TestJob.STATE_SUBMITTED) self._check_job(jobs[3], TestJob.STATE_SUBMITTED) self._check_job(jobs[4], TestJob.STATE_SUBMITTED) current_hc = self.device01.current_job() self.assertEqual(current_hc.state, TestJob.STATE_SCHEDULED) current_hc.go_state_finished(TestJob.HEALTH_COMPLETE) current_hc.save() # Check that the next job is the highest priority schedule(log) self.device01.refresh_from_db() self.assertEqual(self.device01.state, Device.STATE_RESERVED) self._check_job(jobs[0], TestJob.STATE_SUBMITTED) self._check_job(jobs[1], TestJob.STATE_SUBMITTED) self._check_job(jobs[2], TestJob.STATE_SCHEDULED, self.device01) self._check_job(jobs[3], TestJob.STATE_SUBMITTED) self._check_job(jobs[4], TestJob.STATE_SUBMITTED)
def test_extra_options(self): (rendered, _) = self.factory.create_device('kvm01.jinja2') device = NewDevice(yaml.safe_load(rendered)) kvm_yaml = os.path.join(os.path.dirname(__file__), 'sample_jobs/kvm-inline.yaml') with open(kvm_yaml) as sample_job_data: job_data = yaml.safe_load(sample_job_data) device['actions']['boot']['methods']['qemu']['parameters'][ 'extra'] = yaml.safe_load(""" - -smp - 1 - -global - virtio-blk-device.scsi=off - -device virtio-scsi-device,id=scsi - --append "console=ttyAMA0 root=/dev/vda rw" """) self.assertIsInstance( device['actions']['boot']['methods']['qemu']['parameters']['extra'] [1], int) parser = JobParser() job = parser.parse(yaml.dump(job_data), device, 4212, None, "") job.logger = DummyLogger() job.validate() boot_image = [ action for action in job.pipeline.actions if action.name == 'boot-image-retry' ][0] boot_qemu = [ action for action in boot_image.internal_pipeline.actions if action.name == 'boot-qemu-image' ][0] qemu = [ action for action in boot_qemu.internal_pipeline.actions if action.name == 'execute-qemu' ][0] self.assertIsInstance(qemu.sub_command, list) [self.assertIsInstance(item, str) for item in qemu.sub_command] # pylint: disable=expression-not-assigned self.assertIn('virtio-blk-device.scsi=off', qemu.sub_command) self.assertIn('1', qemu.sub_command) self.assertNotIn(1, qemu.sub_command)
def test_multi_deploy(self): self.assertIsNotNone(self.parsed_data) job = Job(4212, self.parsed_data, None) job.timeout = Timeout("Job", Timeout.parse({'minutes': 2})) pipeline = Pipeline(job=job) device = TestMultiDeploy.FakeDevice() self.assertIsNotNone(device) job.device = device job.logger = DummyLogger() job.pipeline = pipeline counts = {} for action_data in self.parsed_data['actions']: for name in action_data: counts.setdefault(name, 1) parameters = action_data[name] test_deploy = TestMultiDeploy.TestDeploy(pipeline, parameters, job) self.assertEqual( {}, test_deploy.action.data ) counts[name] += 1 # check that only one action has the example set self.assertEqual( ['nowhere'], [detail['deploy']['example'] for detail in self.parsed_data['actions'] if 'example' in detail['deploy']] ) self.assertEqual( ['faked', 'valid'], [detail['deploy']['parameters'] for detail in self.parsed_data['actions'] if 'parameters' in detail['deploy']] ) self.assertIsInstance(pipeline.actions[0], TestMultiDeploy.TestDeployAction) self.assertIsInstance(pipeline.actions[1], TestMultiDeploy.TestDeployAction) self.assertIsInstance(pipeline.actions[2], TestMultiDeploy.TestDeployAction) job.validate() self.assertEqual([], job.pipeline.errors) job.run() self.assertNotEqual(pipeline.actions[0].data, {'fake-deploy': pipeline.actions[0].parameters}) self.assertEqual(pipeline.actions[1].data, {'fake-deploy': pipeline.actions[2].parameters}) # check that values from previous DeployAction run actions have been cleared self.assertEqual(pipeline.actions[2].data, {'fake-deploy': pipeline.actions[2].parameters})
def create_kvm_job(self, filename): # pylint: disable=no-self-use """ Custom function to allow for extra exception handling. """ (data, device_dict) = self.create_device('kvm01.jinja2') device = NewDevice(yaml.safe_load(data)) if self.debug: print('####### Device configuration #######') print(data) print('#######') self.validate_data('hi6220-hikey-01', device_dict) kvm_yaml = os.path.join(os.path.dirname(__file__), filename) parser = JobParser() try: with open(kvm_yaml) as sample_job_data: job = parser.parse(sample_job_data, device, 4212, None, "") job.logger = DummyLogger() except LAVAError as exc: print(exc) # some deployments listed in basics.yaml are not implemented yet return None return job
def test_vland_overlay(self): with open(self.filename) as yaml_data: alpha_data = yaml.safe_load(yaml_data) for vlan_key, _ in alpha_data['protocols'][VlandProtocol.name].items(): alpha_data['protocols'][VlandProtocol.name][vlan_key] = { 'tags': [] } # removed tags from original job to simulate job where any interface tags will be acceptable self.assertEqual(alpha_data['protocols'][VlandProtocol.name], {'vlan_one': { 'tags': [] }}) parser = JobParser() job = parser.parse(yaml.dump(alpha_data), self.device, 4212, None, "") job.logger = DummyLogger() job.validate() tftp_deploy = [ action for action in job.pipeline.actions if action.name == 'tftp-deploy' ][0] prepare = [ action for action in tftp_deploy.internal_pipeline.actions if action.name == 'prepare-tftp-overlay' ][0] overlay = [ action for action in prepare.internal_pipeline.actions if action.name == 'lava-overlay' ][0] vland = [ action for action in overlay.internal_pipeline.actions if action.name == 'lava-vland-overlay' ][0] self.assertTrue(os.path.exists(vland.lava_vland_test_dir)) vland_files = os.listdir(vland.lava_vland_test_dir) self.assertIn('lava-vland-names', vland_files) self.assertIn('lava-vland-tags', vland_files) self.assertIn('lava-vland-self', vland_files)
def test_secondary_media(self): """ Test UBootSecondaryMedia validation """ job_parser = JobParser() (rendered, _) = self.factory.create_device('cubie1.jinja2') cubie = NewDevice(yaml.safe_load(rendered)) sample_job_file = os.path.join(os.path.dirname(__file__), 'sample_jobs/cubietruck-removable.yaml') sample_job_data = open(sample_job_file) job = job_parser.parse(sample_job_data, cubie, 4212, None, "") job.logger = DummyLogger() job.validate() sample_job_data.close() uboot_action = [action for action in job.pipeline.actions if action.name == 'uboot-action' and action.parameters['namespace'] == 'boot2'][0] u_boot_media = [action for action in uboot_action.internal_pipeline.actions if action.name == 'uboot-from-media' and action.parameters['namespace'] == 'boot2'][0] self.assertIsInstance(u_boot_media, UBootSecondaryMedia) self.assertEqual([], u_boot_media.errors) self.assertEqual(u_boot_media.parameters['kernel'], '/boot/vmlinuz-3.16.0-4-armmp-lpae') self.assertEqual(u_boot_media.parameters['kernel'], u_boot_media.get_namespace_data( action='download-action', label='file', key='kernel')) self.assertEqual(u_boot_media.parameters['ramdisk'], u_boot_media.get_namespace_data( action='compress-ramdisk', label='file', key='ramdisk')) self.assertEqual(u_boot_media.parameters['dtb'], u_boot_media.get_namespace_data( action='download-action', label='file', key='dtb')) # use the base class name so that uboot-from-media can pick up the value reliably. self.assertEqual(u_boot_media.parameters['root_uuid'], u_boot_media.get_namespace_data( action='bootloader-from-media', label='uuid', key='root')) device = u_boot_media.get_namespace_data(action='storage-deploy', label='u-boot', key='device') self.assertIsNotNone(device) part_reference = '%s:%s' % ( job.device['parameters']['media']['usb'][device]['device_id'], u_boot_media.parameters['boot_part'] ) self.assertEqual(part_reference, u_boot_media.get_namespace_data( action=u_boot_media.name, label='uuid', key='boot_part')) self.assertEqual(part_reference, "0:1")
def shellcommand_dummy_logger_init(self, command, lava_timeout, logger=None, cwd=None): self.__old_init__(command, lava_timeout, DummyLogger(), cwd)
def __init__(self, parameters): super(TestTimeout.FakeJob, self).__init__(4212, parameters, None) self.logger = DummyLogger()
def __init__(self, parameters): super().__init__(4212, parameters, None) self.logger = DummyLogger()
def setUp(self): super().setUp() factory = Factory() self.job = factory.create_kvm_job('sample_jobs/kvm-inline.yaml') self.job.logger = DummyLogger() self.max_end_time = time.time() + 30
def test_substitutions(self): """ Test substitution of secondary media values into u-boot commands Unlike most u-boot calls, removable knows in advance all the values it needs to substitute into the boot commands for the secondary deployment as these are fixed by the device config and the image details from the job submission. """ job_parser = JobParser() cubie = NewDevice( os.path.join(os.path.dirname(__file__), '../devices/cubie1.yaml')) sample_job_file = os.path.join( os.path.dirname(__file__), 'sample_jobs/cubietruck-removable.yaml') with open(sample_job_file) as sample_job_data: job = job_parser.parse(sample_job_data, cubie, 4212, None, "") job.logger = DummyLogger() job.validate() boot_params = [ methods for methods in job.parameters['actions'] if 'boot' in methods.keys() ][1]['boot'] self.assertIn('ramdisk', boot_params) self.assertIn('kernel', boot_params) self.assertIn('dtb', boot_params) self.assertIn('root_uuid', boot_params) self.assertIn('boot_part', boot_params) self.assertNotIn('type', boot_params) self.assertGreater(len(job.pipeline.actions), 1) self.assertIsNotNone(job.pipeline.actions[1].internal_pipeline) u_boot_action = [ action for action in job.pipeline.actions if action.name == 'uboot-action' ][1] overlay = [ action for action in u_boot_action.internal_pipeline.actions if action.name == 'bootloader-overlay' ][0] self.assertIsNotNone( overlay.get_namespace_data(action='storage-deploy', label='u-boot', key='device')) methods = cubie['actions']['boot']['methods'] self.assertIn('u-boot', methods) self.assertIn('usb', methods['u-boot']) self.assertIn('commands', methods['u-boot']['usb']) commands_list = methods['u-boot']['usb']['commands'] device_id = u_boot_action.get_namespace_data(action='storage-deploy', label='u-boot', key='device') self.assertIsNotNone(device_id) kernel_type = u_boot_action.parameters['kernel_type'] bootcommand = map_kernel_uboot(kernel_type, device_params=cubie.get( 'parameters', None)) substitutions = { '{BOOTX}': "%s %s %s %s" % ( bootcommand, cubie['parameters'][bootcommand]['kernel'], cubie['parameters'][bootcommand]['ramdisk'], cubie['parameters'][bootcommand]['dtb'], ), '{RAMDISK}': boot_params['ramdisk'], '{KERNEL}': boot_params['kernel'], '{DTB}': boot_params['dtb'], '{ROOT}': boot_params['root_uuid'], '{ROOT_PART}': "%s:%s" % (cubie['parameters']['media']['usb'][device_id]['device_id'], u_boot_action.parameters['boot_part']) } self.assertEqual('bootz 0x42000000 0x43300000 0x43000000', substitutions['{BOOTX}']) self.assertEqual('/boot/initrd.img-3.16.0-4-armmp-lpae.u-boot', substitutions['{RAMDISK}']) commands = substitute(commands_list, substitutions) self.assertEqual(commands, [ 'usb start', 'usb info', 'setenv autoload no', "setenv initrd_high '0xffffffff'", "setenv fdt_high '0xffffffff'", 'setenv initrd_addr_r ${ramdisk_addr_r}', "setenv loadkernel 'load usb 0:1 ${kernel_addr_r} /boot/vmlinuz-3.16.0-4-armmp-lpae'", "setenv loadinitrd 'load usb 0:1 ${initrd_addr_r} /boot/initrd.img-3.16.0-4-armmp-lpae.u-boot; setenv initrd_size ${filesize}'", "setenv loadfdt 'load usb 0:1 ${fdt_addr_r} /boot/dtb-3.16.0-4-armmp-lpae'", "setenv bootargs 'console=ttyS0,115200n8 root=UUID=159d17cc-697c-4125-95a0-a3775e1deabe ip=dhcp'", "setenv bootcmd 'run loadkernel; run loadinitrd; run loadfdt; bootz 0x42000000 0x43300000 0x43000000'", 'boot' ])
def test_low_medium_high_without_hc(self): # Disable health checks Device.get_health_check = lambda cls: None jobs = [] for p in [TestJob.LOW, TestJob.MEDIUM, TestJob.HIGH, TestJob.MEDIUM, TestJob.LOW, 40]: j = TestJob.objects.create(requested_device_type=self.device_type01, user=self.user, submitter=self.user, is_public=True, definition=_minimal_valid_job(None), priority=p) jobs.append(j) log = DummyLogger() schedule(log) self.device01.refresh_from_db() self.assertEqual(self.device01.state, Device.STATE_RESERVED) self._check_job(jobs[0], TestJob.STATE_SUBMITTED) self._check_job(jobs[1], TestJob.STATE_SUBMITTED) self._check_job(jobs[2], TestJob.STATE_SCHEDULED, self.device01) self._check_job(jobs[3], TestJob.STATE_SUBMITTED) self._check_job(jobs[4], TestJob.STATE_SUBMITTED) self._check_job(jobs[5], TestJob.STATE_SUBMITTED) jobs[2].go_state_finished(TestJob.HEALTH_COMPLETE) jobs[2].save() self._check_job(jobs[2], TestJob.STATE_FINISHED, self.device01) schedule(log) self.device01.refresh_from_db() self.assertEqual(self.device01.state, Device.STATE_RESERVED) self._check_job(jobs[0], TestJob.STATE_SUBMITTED) self._check_job(jobs[1], TestJob.STATE_SCHEDULED, self.device01) self._check_job(jobs[2], TestJob.STATE_FINISHED, self.device01) self._check_job(jobs[3], TestJob.STATE_SUBMITTED) self._check_job(jobs[4], TestJob.STATE_SUBMITTED) self._check_job(jobs[5], TestJob.STATE_SUBMITTED) jobs[1].go_state_finished(TestJob.HEALTH_COMPLETE) jobs[1].save() self._check_job(jobs[1], TestJob.STATE_FINISHED, self.device01) schedule(log) self.device01.refresh_from_db() self.assertEqual(self.device01.state, Device.STATE_RESERVED) self._check_job(jobs[0], TestJob.STATE_SUBMITTED) self._check_job(jobs[1], TestJob.STATE_FINISHED, self.device01) self._check_job(jobs[2], TestJob.STATE_FINISHED, self.device01) self._check_job(jobs[3], TestJob.STATE_SCHEDULED, self.device01) self._check_job(jobs[4], TestJob.STATE_SUBMITTED) self._check_job(jobs[5], TestJob.STATE_SUBMITTED) jobs[3].go_state_finished(TestJob.HEALTH_COMPLETE) jobs[3].save() self._check_job(jobs[3], TestJob.STATE_FINISHED, self.device01) schedule(log) self.device01.refresh_from_db() self.assertEqual(self.device01.state, Device.STATE_RESERVED) self._check_job(jobs[0], TestJob.STATE_SUBMITTED) self._check_job(jobs[1], TestJob.STATE_FINISHED, self.device01) self._check_job(jobs[2], TestJob.STATE_FINISHED, self.device01) self._check_job(jobs[3], TestJob.STATE_FINISHED, self.device01) self._check_job(jobs[4], TestJob.STATE_SUBMITTED) self._check_job(jobs[5], TestJob.STATE_SCHEDULED, self.device01) jobs[5].go_state_finished(TestJob.HEALTH_COMPLETE) jobs[5].save() self._check_job(jobs[5], TestJob.STATE_FINISHED, self.device01) schedule(log) self.device01.refresh_from_db() self.assertEqual(self.device01.state, Device.STATE_RESERVED) self._check_job(jobs[0], TestJob.STATE_SCHEDULED, self.device01) self._check_job(jobs[1], TestJob.STATE_FINISHED, self.device01) self._check_job(jobs[2], TestJob.STATE_FINISHED, self.device01) self._check_job(jobs[3], TestJob.STATE_FINISHED, self.device01) self._check_job(jobs[4], TestJob.STATE_SUBMITTED) self._check_job(jobs[5], TestJob.STATE_FINISHED, self.device01) jobs[0].go_state_finished(TestJob.HEALTH_COMPLETE) jobs[0].save() self._check_job(jobs[0], TestJob.STATE_FINISHED, self.device01) schedule(log) self.device01.refresh_from_db() self.assertEqual(self.device01.state, Device.STATE_RESERVED) self._check_job(jobs[0], TestJob.STATE_FINISHED, self.device01) self._check_job(jobs[1], TestJob.STATE_FINISHED, self.device01) self._check_job(jobs[2], TestJob.STATE_FINISHED, self.device01) self._check_job(jobs[3], TestJob.STATE_FINISHED, self.device01) self._check_job(jobs[4], TestJob.STATE_SCHEDULED, self.device01) self._check_job(jobs[5], TestJob.STATE_FINISHED, self.device01)
def setUp(self): super().setUp() factory = Factory() self.job = factory.create_job('kvm02.jinja2', 'sample_jobs/qemu-nfs.yaml') self.job.logger = DummyLogger()
def test_job(self): with open(self.filename) as yaml_data: alpha_data = yaml.safe_load(yaml_data) self.assertIn('protocols', alpha_data) self.assertIn(VlandProtocol.name, alpha_data['protocols']) with open(self.filename) as sample_job_data: parser = JobParser() job = parser.parse(sample_job_data, self.device, 4212, None, "") job.logger = DummyLogger() description_ref = self.pipeline_reference('bbb-group-vland-alpha.yaml', job=job) self.assertEqual(description_ref, job.pipeline.describe(False)) job.validate() self.assertNotEqual([], [ protocol.name for protocol in job.protocols if protocol.name == MultinodeProtocol.name ]) ret = { "message": { "kvm01": { "vlan_name": "name", "vlan_tag": 6 } }, "response": "ack" } self.assertEqual(('name', 6), ( ret['message']['kvm01']['vlan_name'], ret['message']['kvm01']['vlan_tag'], )) self.assertIn('protocols', job.parameters) self.assertIn(VlandProtocol.name, job.parameters['protocols']) self.assertIn(MultinodeProtocol.name, job.parameters['protocols']) vprotocol = [ vprotocol for vprotocol in job.protocols if vprotocol.name == VlandProtocol.name ][0] self.assertTrue(vprotocol.valid) self.assertEqual(vprotocol.names, {'vlan_one': '4212vlanone'}) self.assertFalse(vprotocol.check_timeout(120, {'request': 'no call'})) self.assertRaises(JobError, vprotocol.check_timeout, 60, 'deploy_vlans') self.assertRaises(JobError, vprotocol.check_timeout, 60, {'request': 'deploy_vlans'}) self.assertTrue( vprotocol.check_timeout(120, {'request': 'deploy_vlans'})) for vlan_name in job.parameters['protocols'][VlandProtocol.name]: if vlan_name == 'yaml_line': continue self.assertIn(vlan_name, vprotocol.params) self.assertIn('switch', vprotocol.params[vlan_name]) self.assertIn('port', vprotocol.params[vlan_name]) self.assertIn('iface', vprotocol.params[vlan_name]) params = job.parameters['protocols'][vprotocol.name] names = [] for key, _ in params.items(): if key == 'yaml_line': continue names.append(",".join([key, vprotocol.params[key]['iface']])) # this device only has one interface with interface tags self.assertEqual(names, ['vlan_one,eth1'])
def test_lxc_without_lxctest(self): # pylint: disable=too-many-locals lxc_yaml = os.path.join(os.path.dirname(__file__), 'sample_jobs/bbb-lxc-notest.yaml') with open(lxc_yaml) as sample_job_data: data = yaml.safe_load(sample_job_data) parser = JobParser() (rendered, _) = self.factory.create_device('bbb-01.jinja2') device = NewDevice(yaml.safe_load(rendered)) job = parser.parse(yaml.dump(data), device, 4577, None, "") job.logger = DummyLogger() job.validate() lxc_deploy = [ action for action in job.pipeline.actions if action.name == 'lxc-deploy' ][0] names = [ action.name for action in lxc_deploy.internal_pipeline.actions ] self.assertNotIn('prepare-tftp-overlay', names) namespace1 = lxc_deploy.parameters.get('namespace') tftp_deploy = [ action for action in job.pipeline.actions if action.name == 'tftp-deploy' ][0] prepare = [ action for action in tftp_deploy.internal_pipeline.actions if action.name == 'prepare-tftp-overlay' ][0] overlay = [ action for action in prepare.internal_pipeline.actions if action.name == 'lava-overlay' ][0] test_def = [ action for action in overlay.internal_pipeline.actions if action.name == 'test-definition' ][0] namespace = test_def.parameters.get('namespace') self.assertIsNotNone(namespace) self.assertIsNotNone(namespace1) self.assertNotEqual(namespace, namespace1) self.assertNotEqual(self.job.pipeline.describe(False), job.pipeline.describe(False)) test_actions = [ action for action in job.parameters['actions'] if 'test' in action ] for action in test_actions: if 'namespace' in action['test']: if action['test']['namespace'] == namespace: self.assertEqual(action['test']['definitions'][0]['name'], 'smoke-tests-bbb') else: self.fail("Found a test action not from the tftp boot") namespace_tests = [ action['test']['definitions'] for action in test_actions if 'namespace' in action['test'] and action['test']['namespace'] == namespace ] self.assertEqual(len(namespace_tests), 1) self.assertEqual(len(test_actions), 1) description_ref = self.pipeline_reference('bbb-lxc-notest.yaml', job=job) self.assertEqual(description_ref, job.pipeline.describe(False))
def test_lxc_with_device(self): # pylint: disable=too-many-locals self.assertIsNotNone(self.job) # validate with two test actions, lxc and device self.job.validate() lxc_yaml = os.path.join(os.path.dirname(__file__), 'sample_jobs/bbb-lxc.yaml') with open(lxc_yaml) as sample_job_data: data = yaml.safe_load(sample_job_data) lxc_deploy = [ action for action in self.job.pipeline.actions if action.name == 'lxc-deploy' ][0] overlay = [ action for action in lxc_deploy.internal_pipeline.actions if action.name == 'lava-overlay' ][0] test_def = [ action for action in overlay.internal_pipeline.actions if action.name == 'test-definition' ][0] self.assertIsNotNone(test_def.level, test_def.test_list) runner = [ action for action in test_def.internal_pipeline.actions if action.name == 'test-runscript-overlay' ][0] self.assertIsNotNone(runner.testdef_levels) tftp_deploy = [ action for action in self.job.pipeline.actions if action.name == 'tftp-deploy' ][0] prepare = [ action for action in tftp_deploy.internal_pipeline.actions if action.name == 'prepare-tftp-overlay' ][0] overlay = [ action for action in prepare.internal_pipeline.actions if action.name == 'lava-overlay' ][0] test_def = [ action for action in overlay.internal_pipeline.actions if action.name == 'test-definition' ][0] namespace = test_def.parameters.get('namespace') self.assertIsNotNone(namespace) test_actions = [ action for action in self.job.parameters['actions'] if 'test' in action ] for action in test_actions: if 'namespace' in action['test']: if action['test']['namespace'] == namespace: self.assertEqual(action['test']['definitions'][0]['name'], 'smoke-tests-bbb') namespace_tests = [ action['test']['definitions'] for action in test_actions if 'namespace' in action['test'] and action['test']['namespace'] == namespace ] self.assertEqual(len(namespace_tests), 1) self.assertEqual(len(test_actions), 2) self.assertEqual('smoke-tests-bbb', namespace_tests[0][0]['name']) self.assertEqual('smoke-tests-bbb', test_def.test_list[0][0]['name']) self.assertIsNotNone(test_def.level, test_def.test_list) runner = [ action for action in test_def.internal_pipeline.actions if action.name == 'test-runscript-overlay' ][0] self.assertIsNotNone(runner.testdef_levels) # remove the second test action data['actions'].pop() test_actions = [ action for action in data['actions'] if 'test' in action ] self.assertEqual(len(test_actions), 1) self.assertEqual(test_actions[0]['test']['namespace'], 'probe') parser = JobParser() (rendered, _) = self.factory.create_device('bbb-01.jinja2') device = NewDevice(yaml.safe_load(rendered)) job = parser.parse(yaml.dump(data), device, 4577, None, "") job.logger = DummyLogger() job.validate() lxc_deploy = [ action for action in self.job.pipeline.actions if action.name == 'lxc-deploy' ][0] overlay = [ action for action in lxc_deploy.internal_pipeline.actions if action.name == 'lava-overlay' ][0] test_def = [ action for action in overlay.internal_pipeline.actions if action.name == 'test-definition' ][0] self.assertIsNotNone(test_def.level, test_def.test_list) runner = [ action for action in test_def.internal_pipeline.actions if action.name == 'test-runscript-overlay' ][0] self.assertIsNotNone(runner.testdef_levels)