def test_health_frequency_hours(self): self.device_type01.health_denominator = DeviceType.HEALTH_PER_HOUR self.device_type01.health_frequency = 24 self.device_type01.save() Device.get_health_check = _minimal_valid_job self.assertNotEqual(self.device01.get_health_check(), None) self.assertNotEqual(self.device02.get_health_check(), None) self.assertNotEqual(self.device03.get_health_check(), None) # Only device03 is available now self.device01.health = Device.HEALTH_BAD self.device01.save() self.device03.health = Device.HEALTH_GOOD self.device03.save() # Create a job that should be scheduled now j = TestJob.objects.create(requested_device_type=self.device_type01, user=self.user, submitter=self.user, is_public=True, definition=_minimal_valid_job(None)) schedule(DummyLogger()) self.device01.refresh_from_db() j.refresh_from_db() self.assertEqual(j.state, TestJob.STATE_SCHEDULED) self.assertEqual(j.actual_device, self.device03) j.go_state_finished(TestJob.HEALTH_COMPLETE) j.save() # Create a job that should be scheduled after the health check j = TestJob.objects.create(requested_device_type=self.device_type01, user=self.user, submitter=self.user, is_public=True, definition=_minimal_valid_job(None)) self.device03.refresh_from_db() self.last_hc03.submit_time = timezone.now() - timedelta(hours=25) self.last_hc03.save() schedule(DummyLogger()) self.device03.refresh_from_db() j.refresh_from_db() self.assertEqual(j.state, TestJob.STATE_SUBMITTED) current_hc = self.device03.current_job() self.assertTrue(current_hc.health_check) self.assertEqual(current_hc.state, TestJob.STATE_SCHEDULED)
def create_x86_job(self, filename, device): # pylint: disable=no-self-use kvm_yaml = os.path.join(os.path.dirname(__file__), filename) parser = JobParser() try: with open(kvm_yaml) as sample_job_data: job = parser.parse(sample_job_data, device, 4212, None, "") job.logger = DummyLogger() except LAVAError as exc: print(exc) # some deployments listed in basics.yaml are not implemented yet return None return job
def test_disabled_hc(self): # Make sure that get_health_check does return something Device.get_health_check = _minimal_valid_job self.assertNotEqual(self.device01.get_health_check(), None) self.assertNotEqual(self.device02.get_health_check(), None) self.assertNotEqual(self.device03.get_health_check(), None) self.device_type01.disable_health_check = True self.device_type01.save() available_devices = schedule_health_checks(DummyLogger())[0] self.assertEquals(available_devices, {"dt-01": ["device-01", "device-03"]})
def test_without_previous_hc_device_health_unknown(self): # Make sure that get_health_check does return something Device.get_health_check = _minimal_valid_job self.assertNotEqual(self.device01.get_health_check(), None) self.assertNotEqual(self.device02.get_health_check(), None) self.assertNotEqual(self.device03.get_health_check(), None) available_devices = schedule_health_checks(DummyLogger())[0] self.assertEquals(available_devices, {"dt-01": []}) self._check_hc_scheduled(self.device01) self._check_hc_not_scheduled(self.device02) self._check_hc_scheduled(self.device03)
def test_health_visibility(self): self._check_initial_state() self.device_type01.disable_health_check = False self.device_type01.owners_only = False self.device_type01.save() schedule_health_checks(DummyLogger())[0] self._check_hc_scheduled(self.device01) self._check_hc_not_scheduled(self.device02) self._check_hc_scheduled(self.device03)
def test_uimage(self): self.deploy_block['kernel']['type'] = 'uimage' job = self.parser.parse(yaml.dump(self.base_data), self.device, 4212, None, "") job.logger = DummyLogger() job.validate() deploy = [action for action in job.pipeline.actions if action.name == 'tftp-deploy'][0] overlay = [action for action in deploy.internal_pipeline.actions if action.name == 'prepare-tftp-overlay'][0] prepare = [action for action in overlay.internal_pipeline.actions if action.name == 'prepare-kernel'][0] uboot_prepare = [action for action in prepare.internal_pipeline.actions if action.name == 'uboot-prepare-kernel'][0] self.assertEqual('uimage', uboot_prepare.kernel_type) self.assertEqual('bootm', uboot_prepare.bootcommand) self.assertFalse(uboot_prepare.mkimage_conversion)
def test_prompt_from_job(self): # pylint: disable=too-many-locals """ Support setting the prompt after login via the job Loads a known YAML, adds a prompt to the dict and re-parses the job. Checks that the prompt is available in the expect_shell_connection action. """ job = self.factory.create_job('x86-01.jinja2', 'sample_jobs/ipxe-ramdisk.yaml') job.validate() bootloader = [ action for action in job.pipeline.actions if action.name == 'bootloader-action' ][0] retry = [ action for action in bootloader.internal_pipeline.actions if action.name == 'bootloader-retry' ][0] expect = [ action for action in retry.internal_pipeline.actions if action.name == 'expect-shell-connection' ][0] check = expect.parameters (rendered, _) = self.factory.create_device('x86-01.jinja2') device = NewDevice(yaml.safe_load(rendered)) extra_yaml = os.path.join(os.path.dirname(__file__), 'sample_jobs/ipxe.yaml') with open(extra_yaml) as data: sample_job_string = data.read() parser = JobParser() sample_job_data = yaml.safe_load(sample_job_string) boot = [ item['boot'] for item in sample_job_data['actions'] if 'boot' in item ][0] self.assertIsNotNone(boot) sample_job_string = yaml.dump(sample_job_data) job = parser.parse(sample_job_string, device, 4212, None, "") job.logger = DummyLogger() job.validate() bootloader = [ action for action in job.pipeline.actions if action.name == 'bootloader-action' ][0] retry = [ action for action in bootloader.internal_pipeline.actions if action.name == 'bootloader-retry' ][0] expect = [ action for action in retry.internal_pipeline.actions if action.name == 'expect-shell-connection' ][0]
def test_health_visibility_owners(self): self._check_initial_state() self.device_type01.disable_health_check = False self.device_type01.owners_only = True self.device_type01.save() schedule_health_checks(DummyLogger())[0] # no health checks can be scheduled for _minimal_valid_job self._check_hc_not_scheduled(self.device01) self._check_hc_not_scheduled(self.device02) self._check_hc_not_scheduled(self.device03)
def create_qemu_installer_job(self): (rendered, _) = self.create_device('kvm01.jinja2') device = NewDevice(yaml.safe_load(rendered)) sample_job_file = os.path.join(os.path.dirname(__file__), 'sample_jobs/qemu-debian-installer.yaml') parser = JobParser() try: with open(sample_job_file) as sample_job_data: job = parser.parse(sample_job_data, device, 4212, None, "") job.logger = DummyLogger() except NotImplementedError: # some deployments listed in basics.yaml are not implemented yet return None return job
def _check_valid_job(self, device, test_file): self.maxDiff = None # pylint: disable=invalid-name job_parser = JobParser() sample_job_file = os.path.join(os.path.dirname(__file__), 'sample_jobs/{}'.format(test_file)) with open(sample_job_file) as sample_job_data: job = job_parser.parse(sample_job_data, device, 4212, None, "") job.logger = DummyLogger() try: job.validate() except JobError: self.fail(job.pipeline.errors) description_ref = self.pipeline_reference(test_file, job=job) self.assertEqual(description_ref, job.pipeline.describe(False)) return job
def create_ssh_job(self, filename): # pylint: disable=no-self-use device = NewDevice( os.path.join(os.path.dirname(__file__), '../devices/ssh-host-01.yaml')) kvm_yaml = os.path.join(os.path.dirname(__file__), filename) with open(kvm_yaml) as sample_job_data: parser = JobParser() job = parser.parse(sample_job_data, device, 0, None, dispatcher_config="") job.logger = DummyLogger() return job
def test_zimage_nobootz(self): # drop bootz from the device for this part of the test del self.device['parameters']['bootz'] self.deploy_block['kernel']['type'] = 'zimage' job = self.parser.parse(yaml.dump(self.base_data), self.device, 4212, None, "") job.logger = DummyLogger() job.validate() deploy = [action for action in job.pipeline.actions if action.name == 'tftp-deploy'][0] overlay = [action for action in deploy.internal_pipeline.actions if action.name == 'prepare-tftp-overlay'][0] prepare = [action for action in overlay.internal_pipeline.actions if action.name == 'prepare-kernel'][0] uboot_prepare = [action for action in prepare.internal_pipeline.actions if action.name == 'uboot-prepare-kernel'][0] self.assertEqual('zimage', uboot_prepare.kernel_type) self.assertEqual('bootm', uboot_prepare.bootcommand) self.assertTrue(uboot_prepare.mkimage_conversion)
def test_primary_media(self): """ Test that definitions of secondary media do not block submissions using primary media """ job_parser = JobParser() (rendered, _) = self.factory.create_device('bbb-01.jinja2') bbb = NewDevice(yaml.safe_load(rendered)) sample_job_file = os.path.join(os.path.dirname(__file__), 'sample_jobs/uboot-ramdisk.yaml') with open(sample_job_file) as sample_job_data: job = job_parser.parse(sample_job_data, bbb, 4212, None, "") job.logger = DummyLogger() job.validate() self.assertEqual(job.pipeline.errors, []) self.assertIn('usb', bbb['parameters']['media'].keys())
def test_juno_deployment(self): factory = RemovableFactory() job = factory.create_job('sample_jobs/juno-uboot-removable.yaml', '../devices/juno-uboot.yaml') job.logger = DummyLogger() job.validate() self.assertEqual(job.pipeline.errors, []) self.assertIn('usb', job.device['parameters']['media'].keys()) deploy_params = [ methods for methods in job.parameters['actions'] if 'deploy' in methods.keys() ][1]['deploy'] self.assertIn('device', deploy_params) self.assertIn(deploy_params['device'], job.device['parameters']['media']['usb']) self.assertIn( 'uuid', job.device['parameters']['media']['usb'][deploy_params['device']]) self.assertIn( 'device_id', job.device['parameters']['media']['usb'][deploy_params['device']]) self.assertNotIn( 'boot_part', job.device['parameters']['media']['usb'][deploy_params['device']]) tftp_deploys = [ action for action in job.pipeline.actions if action.name == 'tftp-deploy' ] self.assertEqual(len(tftp_deploys), 2) first_deploy = tftp_deploys[0] second_deploy = tftp_deploys[1] self.assertIsNotNone(first_deploy) self.assertIsNotNone(second_deploy) self.assertEqual('openembedded', first_deploy.parameters['namespace']) self.assertEqual('android', second_deploy.parameters['namespace']) self.assertNotIn('deployment_data', first_deploy.parameters) self.assertNotIn('deployment_data', second_deploy.parameters) storage_deploy_action = [ action for action in job.pipeline.actions if action.name == 'storage-deploy' ][0] download_action = [ action for action in storage_deploy_action.internal_pipeline.actions if action.name == 'download-retry' ][0] self.assertIsNotNone(download_action) self.assertEqual('android', storage_deploy_action.parameters['namespace'])
def create_qemu_installer_job(self): # pylint: disable=no-self-use device = NewDevice( os.path.join(os.path.dirname(__file__), '../devices/kvm01.yaml')) sample_job_file = os.path.join( os.path.dirname(__file__), 'sample_jobs/qemu-debian-installer.yaml') parser = JobParser() try: with open(sample_job_file) as sample_job_data: job = parser.parse(sample_job_data, device, 4212, None, "") job.logger = DummyLogger() except NotImplementedError: # some deployments listed in basics.yaml are not implemented yet return None return job
def test_health_visibility_owners_personal(self): self._check_initial_state() # repeat test_health_visibility_owners with suitable test job Device.get_health_check = self._minimal_personal_job self.device_type01.disable_health_check = False self.device_type01.owners_only = True self.device_type01.save() schedule_health_checks(DummyLogger())[0] # health checks can be scheduled for _minimal_personal_job self._check_hc_scheduled(self.device01) self._check_hc_not_scheduled(self.device02) self._check_hc_scheduled(self.device03)
def setUp(self): super(TestQemuNFS, self).setUp() device = NewDevice( os.path.join(os.path.dirname(__file__), '../devices/kvm03.yaml')) kvm_yaml = os.path.join(os.path.dirname(__file__), 'sample_jobs/qemu-nfs.yaml') parser = JobParser() try: with open(kvm_yaml) as sample_job_data: job = parser.parse(sample_job_data, device, 4212, None, "") except NotImplementedError as exc: print(exc) # some deployments listed in basics.yaml are not implemented yet return None self.job = job self.job.logger = DummyLogger()
def test_job_bad_tags(self): with open(self.filename) as yaml_data: alpha_data = yaml.safe_load(yaml_data) for vlan_key, _ in alpha_data['protocols'][VlandProtocol.name].items(): alpha_data['protocols'][VlandProtocol.name][vlan_key] = { 'tags': ['spurious'] } # replaced tags from original job to simulate job where an unsupported tag is specified self.assertEqual(alpha_data['protocols'][VlandProtocol.name], {'vlan_one': { 'tags': ['spurious'] }}) parser = JobParser() job = parser.parse(yaml.dump(alpha_data), self.device, 4212, None, "") job.logger = DummyLogger() self.assertRaises(JobError, job.validate)
def test_low_medium_high_with_hc(self): # Enable health checks self.device_type01.health_denominator = DeviceType.HEALTH_PER_HOUR self.device_type01.health_frequency = 24 self.device_type01.save() Device.get_health_check = _minimal_valid_job self.assertNotEqual(self.device01.get_health_check(), None) jobs = [] for p in [ TestJob.LOW, TestJob.MEDIUM, TestJob.HIGH, TestJob.MEDIUM, TestJob.LOW ]: j = TestJob.objects.create( requested_device_type=self.device_type01, user=self.user, submitter=self.user, is_public=True, definition=_minimal_valid_job(None), priority=p) jobs.append(j) # Check that an health check will be scheduled before any jobs log = DummyLogger() schedule(log) self.device01.refresh_from_db() self.assertEqual(self.device01.state, Device.STATE_RESERVED) self._check_job(jobs[0], TestJob.STATE_SUBMITTED) self._check_job(jobs[1], TestJob.STATE_SUBMITTED) self._check_job(jobs[2], TestJob.STATE_SUBMITTED) self._check_job(jobs[3], TestJob.STATE_SUBMITTED) self._check_job(jobs[4], TestJob.STATE_SUBMITTED) current_hc = self.device01.current_job() self.assertEqual(current_hc.state, TestJob.STATE_SCHEDULED) current_hc.go_state_finished(TestJob.HEALTH_COMPLETE) current_hc.save() # Check that the next job is the highest priority schedule(log) self.device01.refresh_from_db() self.assertEqual(self.device01.state, Device.STATE_RESERVED) self._check_job(jobs[0], TestJob.STATE_SUBMITTED) self._check_job(jobs[1], TestJob.STATE_SUBMITTED) self._check_job(jobs[2], TestJob.STATE_SCHEDULED, self.device01) self._check_job(jobs[3], TestJob.STATE_SUBMITTED) self._check_job(jobs[4], TestJob.STATE_SUBMITTED)
def test_no_devicedict(self): Device.get_health_check = _minimal_valid_job self.device_type02.disable_health_check = False self.device_type02.display = True self.device_type02.owners_only = False self.device_type02.save() self.device04.state = Device.STATE_IDLE self.device04.health = Device.HEALTH_UNKNOWN self.device04.save() schedule_health_checks(DummyLogger()) self.device04.refresh_from_db() self.assertFalse(self.device04.is_valid()) self.assertEqual(self.device04.health, Device.HEALTH_BAD) self.assertIsNone(self.device04.current_job())
def test_multi_deploy(self): self.assertIsNotNone(self.parsed_data) job = Job(4212, self.parsed_data, None) job.timeout = Timeout("Job", Timeout.parse({'minutes': 2})) pipeline = Pipeline(job=job) device = TestMultiDeploy.FakeDevice() self.assertIsNotNone(device) job.device = device job.logger = DummyLogger() job.pipeline = pipeline counts = {} for action_data in self.parsed_data['actions']: for name in action_data: counts.setdefault(name, 1) parameters = action_data[name] test_deploy = TestMultiDeploy.TestDeploy( pipeline, parameters, job) self.assertEqual({}, test_deploy.action.data) counts[name] += 1 # check that only one action has the example set self.assertEqual(['nowhere'], [ detail['deploy']['example'] for detail in self.parsed_data['actions'] if 'example' in detail['deploy'] ]) self.assertEqual(['faked', 'valid'], [ detail['deploy']['parameters'] for detail in self.parsed_data['actions'] if 'parameters' in detail['deploy'] ]) self.assertIsInstance(pipeline.actions[0], TestMultiDeploy.TestDeployAction) self.assertIsInstance(pipeline.actions[1], TestMultiDeploy.TestDeployAction) self.assertIsInstance(pipeline.actions[2], TestMultiDeploy.TestDeployAction) job.validate() self.assertEqual([], job.pipeline.errors) job.run() self.assertNotEqual(pipeline.actions[0].data, {'fake-deploy': pipeline.actions[0].parameters}) self.assertEqual(pipeline.actions[1].data, {'fake-deploy': pipeline.actions[2].parameters}) # check that values from previous DeployAction run actions have been cleared self.assertEqual(pipeline.actions[2].data, {'fake-deploy': pipeline.actions[2].parameters})
def create_custom_job(self, template, job_data): job_ctx = job_data.get('context') (data, device_dict) = self.create_device(template, job_ctx) device = NewDevice(yaml.safe_load(data)) if self.debug: print('####### Device configuration #######') print(data) print('#######') try: parser = JobParser() job = parser.parse(yaml.dump(job_data), device, 4999, None, "") except (ConfigurationError, TypeError) as exc: print('####### Parser exception ########') print(device) print('#######') raise ConfigurationError("Invalid device: %s" % exc) job.logger = DummyLogger() return job
def test_device_health_looping(self): # Make sure that get_health_check does return something Device.get_health_check = _minimal_valid_job self.assertNotEqual(self.device01.get_health_check(), None) self.assertNotEqual(self.device02.get_health_check(), None) self.assertNotEqual(self.device03.get_health_check(), None) self.device01.health = Device.HEALTH_LOOPING self.device01.save() self.device02.health = Device.HEALTH_LOOPING self.device02.save() self.device03.health = Device.HEALTH_LOOPING self.device03.save() available_devices = schedule_health_checks(DummyLogger())[0] self.assertEquals(available_devices, {"dt-01": []}) self._check_hc_scheduled(self.device01) self._check_hc_not_scheduled(self.device02) self._check_hc_scheduled(self.device03)
def test_missing_handler(self): device = NewDevice( os.path.join(os.path.dirname(__file__), '../devices/kvm01.yaml')) kvm_yaml = os.path.join(os.path.dirname(__file__), 'sample_jobs/kvm.yaml') parser = JobParser() with open(kvm_yaml) as sample_job_data: data = yaml.load(sample_job_data) data['actions'][2]['test']['definitions'][0][ 'from'] = 'unusable-handler' try: job = parser.parse(yaml.dump(data), device, 4212, None, "") job.logger = DummyLogger() except JobError: pass except Exception as exc: # pylint: disable=broad-except self.fail(exc) else: self.fail('JobError not raised')
def test_uimage_boot_type(self): # uimage in boot type del self.deploy_block['kernel']['type'] self.boot_block['type'] = 'bootm' job = self.parser.parse(yaml.dump(self.base_data), self.device, 4212, None, "") job.logger = DummyLogger() job.validate() deploy = [ action for action in job.pipeline.actions if action.name == 'tftp-deploy' ][0] overlay = [ action for action in deploy.internal_pipeline.actions if action.name == 'prepare-tftp-overlay' ][0] self.assertNotIn( 'uboot-prepare-kernel', [action.name for action in overlay.internal_pipeline.actions])
def test_device_health_wrong(self): # Make sure that get_health_check does return something Device.get_health_check = _minimal_valid_job self.assertNotEqual(self.device01.get_health_check(), None) self.assertNotEqual(self.device02.get_health_check(), None) self.assertNotEqual(self.device03.get_health_check(), None) # HEALTH_(BAD|MAINTENANCE|RETIRED) for health in [Device.HEALTH_BAD, Device.HEALTH_MAINTENANCE, Device.HEALTH_RETIRED]: self.device01.health = health self.device01.save() self.device02.health = health self.device02.save() self.device03.health = health self.device03.save() available_devices = schedule_health_checks(DummyLogger())[0] self.assertEquals(available_devices, {"dt-01": []}) self._check_hc_not_scheduled(self.device01) self._check_hc_not_scheduled(self.device02) self._check_hc_not_scheduled(self.device03)
def test_device_environment(self): data = """ # YAML syntax. overrides: DEBEMAIL: "*****@*****.**" DEBFULLNAME: "Neil Williams" """ factory = Factory() job_parser = JobParser() (rendered, _) = factory.create_device('bbb-01.jinja2') device = NewDevice(yaml.load(rendered)) sample_job_file = os.path.join(os.path.dirname(__file__), 'sample_jobs/uboot-ramdisk.yaml') with open(sample_job_file) as sample_job_data: job = job_parser.parse(sample_job_data, device, 4212, None, "", env_dut=data) job.logger = DummyLogger() self.assertEqual(job.parameters['env_dut'], data) job.validate() boot_actions = [ action.internal_pipeline.actions for action in job.pipeline.actions if action.name == 'uboot-action' ][0] retry = [ action for action in boot_actions if action.name == 'uboot-retry' ][0] boot_env = [ action for action in retry.internal_pipeline.actions if action.name == 'export-device-env' ][0] found = False for line in boot_env.env: if 'DEBFULLNAME' in line: found = True # assert that the string containing a space still contains that space and is quoted self.assertIn('\\\'Neil Williams\\\'', line) self.assertTrue(found)
def test_extra_options(self): (rendered, _) = self.factory.create_device('kvm01.jinja2') device = NewDevice(yaml.safe_load(rendered)) kvm_yaml = os.path.join(os.path.dirname(__file__), 'sample_jobs/kvm-inline.yaml') with open(kvm_yaml) as sample_job_data: job_data = yaml.safe_load(sample_job_data) device['actions']['boot']['methods']['qemu']['parameters'][ 'extra'] = yaml.safe_load(""" - -smp - 1 - -global - virtio-blk-device.scsi=off - -device virtio-scsi-device,id=scsi - --append "console=ttyAMA0 root=/dev/vda rw" """) self.assertIsInstance( device['actions']['boot']['methods']['qemu']['parameters']['extra'] [1], int) parser = JobParser() job = parser.parse(yaml.dump(job_data), device, 4212, None, "") job.logger = DummyLogger() job.validate() boot_image = [ action for action in job.pipeline.actions if action.name == 'boot-image-retry' ][0] boot_qemu = [ action for action in boot_image.internal_pipeline.actions if action.name == 'boot-qemu-image' ][0] qemu = [ action for action in boot_qemu.internal_pipeline.actions if action.name == 'execute-qemu' ][0] self.assertIsInstance(qemu.sub_command, list) [self.assertIsInstance(item, str) for item in qemu.sub_command] # pylint: disable=expression-not-assigned self.assertIn('virtio-blk-device.scsi=off', qemu.sub_command) self.assertIn('1', qemu.sub_command) self.assertNotIn(1, qemu.sub_command)
def create_kvm_job(self, filename): # pylint: disable=no-self-use """ Custom function to allow for extra exception handling. """ (data, device_dict) = self.create_device('kvm01.jinja2') device = NewDevice(yaml.safe_load(data)) if self.debug: print('####### Device configuration #######') print(data) print('#######') self.validate_data('hi6220-hikey-01', device_dict) kvm_yaml = os.path.join(os.path.dirname(__file__), filename) parser = JobParser() try: with open(kvm_yaml) as sample_job_data: job = parser.parse(sample_job_data, device, 4212, None, "") job.logger = DummyLogger() except LAVAError as exc: print(exc) # some deployments listed in basics.yaml are not implemented yet return None return job
def test_vland_overlay(self): with open(self.filename) as yaml_data: alpha_data = yaml.safe_load(yaml_data) for vlan_key, _ in alpha_data['protocols'][VlandProtocol.name].items(): alpha_data['protocols'][VlandProtocol.name][vlan_key] = { 'tags': [] } # removed tags from original job to simulate job where any interface tags will be acceptable self.assertEqual(alpha_data['protocols'][VlandProtocol.name], {'vlan_one': { 'tags': [] }}) parser = JobParser() job = parser.parse(yaml.dump(alpha_data), self.device, 4212, None, "") job.logger = DummyLogger() job.validate() tftp_deploy = [ action for action in job.pipeline.actions if action.name == 'tftp-deploy' ][0] prepare = [ action for action in tftp_deploy.internal_pipeline.actions if action.name == 'prepare-tftp-overlay' ][0] overlay = [ action for action in prepare.internal_pipeline.actions if action.name == 'lava-overlay' ][0] vland = [ action for action in overlay.internal_pipeline.actions if action.name == 'lava-vland-overlay' ][0] self.assertTrue(os.path.exists(vland.lava_vland_test_dir)) vland_files = os.listdir(vland.lava_vland_test_dir) self.assertIn('lava-vland-names', vland_files) self.assertIn('lava-vland-tags', vland_files) self.assertIn('lava-vland-self', vland_files)